summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/devlink-resource-mlxsw33
-rw-r--r--Documentation/ABI/testing/sysfs-class-net24
-rw-r--r--Documentation/bpf/bpf_devel_QA.txt519
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt5
-rw-r--r--Documentation/devicetree/bindings/net/can/can-transceiver.txt24
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt6
-rw-r--r--Documentation/devicetree/bindings/net/can/m_can.txt9
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt7
-rw-r--r--Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt92
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt4
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/adf7242.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt12
-rw-r--r--Documentation/devicetree/bindings/net/sff,sfp.txt10
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt48
-rw-r--r--Documentation/devicetree/bindings/net/socionext-netsec.txt53
-rw-r--r--Documentation/devicetree/bindings/net/ti-bluetooth.txt (renamed from Documentation/devicetree/bindings/net/ti,wilink-st.txt)18
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt32
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt3
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt2
-rw-r--r--Documentation/fault-injection/fault-injection.txt68
-rw-r--r--Documentation/networking/00-INDEX4
-rw-r--r--Documentation/networking/batman-adv.rst2
-rw-r--r--Documentation/networking/can.rst1437
-rw-r--r--Documentation/networking/can.txt1308
-rw-r--r--Documentation/networking/dsa/dsa.txt5
-rw-r--r--Documentation/networking/filter.txt2
-rw-r--r--Documentation/networking/ieee802154.txt40
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.txt1
-rw-r--r--Documentation/networking/kapi.rst24
-rw-r--r--Documentation/networking/netdev-features.txt9
-rw-r--r--Documentation/networking/pktgen.txt19
-rw-r--r--Documentation/networking/xfrm_device.txt135
-rw-r--r--Documentation/networking/xfrm_proc.txt20
-rw-r--r--Documentation/sysctl/net.txt4
-rw-r--r--MAINTAINERS45
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arm/boot/dts/imx25.dtsi4
-rw-r--r--arch/arm/boot/dts/imx28.dtsi4
-rw-r--r--arch/arm/boot/dts/imx35.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6q-b450v3.dts52
-rw-r--r--arch/arm/boot/dts/imx6q-b650v3.dts52
-rw-r--r--arch/arm/boot/dts/imx6q-b850v3.dts75
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi62
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts16
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts16
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi36
-rw-r--r--arch/arm/net/bpf_jit_32.c12
-rw-r--r--arch/arm64/net/bpf_jit_comp.c86
-rw-r--r--arch/mips/bcm63xx/dev-enet.c8
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h4
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mips/net/ebpf_jit.c33
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/s390/include/asm/diag.h44
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/sh/boards/board-espt.c1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c1
-rw-r--r--arch/sh/boards/mach-sh7763rdp/setup.c1
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c1
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c2
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c66
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/error-injection.h13
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/ptrace.h5
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/error-inject.c19
-rw-r--r--arch/x86/net/bpf_jit_comp.c106
-rw-r--r--drivers/atm/eni.c18
-rw-r--r--drivers/base/property.c104
-rw-r--r--drivers/bcma/driver_pcie2.c3
-rw-r--r--drivers/bluetooth/Kconfig14
-rw-r--r--drivers/bluetooth/bluecard_cs.c8
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btbcm.h2
-rw-r--r--drivers/bluetooth/btintel.c157
-rw-r--r--drivers/bluetooth/btintel.h33
-rw-r--r--drivers/bluetooth/btqcomsmd.c3
-rw-r--r--drivers/bluetooth/btsdio.c9
-rw-r--r--drivers/bluetooth/btusb.c162
-rw-r--r--drivers/bluetooth/hci_bcm.c243
-rw-r--r--drivers/bluetooth/hci_intel.c186
-rw-r--r--drivers/bluetooth/hci_ll.c107
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bluetooth/hci_serdev.c1
-rw-r--r--drivers/hv/ring_buffer.c23
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h125
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_options.c6
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/can/c_can/c_can_pci.c4
-rw-r--r--drivers/net/can/dev.c47
-rw-r--r--drivers/net/can/flexcan.c241
-rw-r--r--drivers/net/can/m_can/m_can.c183
-rw-r--r--drivers/net/can/rx-offload.c2
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c55
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c9
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c16
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c10
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c11
-rw-r--r--drivers/net/dsa/dsa_loop.c9
-rw-r--r--drivers/net/dsa/lan9303-core.c138
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c12
-rw-r--r--drivers/net/dsa/mt7530.c288
-rw-r--r--drivers/net/dsa/mt7530.h83
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c94
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c87
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c74
-rw-r--r--drivers/net/dummy.c215
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c68
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c110
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c290
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h62
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c413
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c472
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h65
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c506
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h64
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1326
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h544
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h1521
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c346
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h133
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c184
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c27
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c814
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h11936
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h14
-rw-r--r--drivers/net/ethernet/cadence/macb.h170
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c758
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c353
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c58
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c297
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h156
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c1173
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c117
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c304
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c605
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c221
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c269
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cortina/Kconfig23
-rw-r--r--drivers/net/ethernet/cortina/Makefile4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2593
-rw-r--r--drivers/net/ethernet/cortina/gemini.h958
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c28
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c)2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c)405
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h)28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c)347
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c418
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c342
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h248
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c1505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c181
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h13
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c43
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c38
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c36
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c54
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c192
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c220
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c143
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c66
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c228
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c83
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c941
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h93
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c589
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c372
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c34
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c687
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c253
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c35
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c602
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c551
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c280
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile3
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c453
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h157
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c988
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c248
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h207
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c159
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h71
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c156
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c95
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c135
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h210
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c811
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c109
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c76
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c94
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10603
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c68
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c1091
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c119
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h190
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c47
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c70
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h32
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c309
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c133
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c26
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c363
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h46
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c26
-rw-r--r--drivers/net/ethernet/sfc/io.h19
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h2453
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c168
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h33
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/ptp.c370
-rw-r--r--drivers/net/ethernet/sfc/siena.c12
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/socionext/Kconfig34
-rw-r--r--drivers/net/ethernet/socionext/Makefile6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c1777
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c1736
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c61
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c24
-rw-r--r--drivers/net/ethernet/ti/cpsw.c96
-rw-r--r--drivers/net/ethernet/ti/cpsw.h23
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c109
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c6
-rw-r--r--drivers/net/geneve.c24
-rw-r--r--drivers/net/hyperv/hyperv_net.h60
-rw-r--r--drivers/net/hyperv/netvsc.c73
-rw-r--r--drivers/net/hyperv/netvsc_drv.c111
-rw-r--r--drivers/net/hyperv/rndis_filter.c44
-rw-r--r--drivers/net/ieee802154/adf7242.c90
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c17
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c33
-rw-r--r--drivers/net/macsec.c68
-rw-r--r--drivers/net/netdevsim/Makefile11
-rw-r--r--drivers/net/netdevsim/bpf.c643
-rw-r--r--drivers/net/netdevsim/netdev.c504
-rw-r--r--drivers/net/netdevsim/netdevsim.h109
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/at803x.c44
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c6
-rw-r--r--drivers/net/phy/broadcom.c69
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c5
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/fixed_phy.c31
-rw-r--r--drivers/net/phy/icplus.c4
-rw-r--r--drivers/net/phy/intel-xway.c12
-rw-r--r--drivers/net/phy/lxt.c5
-rw-r--r--drivers/net/phy/marvell.c600
-rw-r--r--drivers/net/phy/marvell10g.c111
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/phy/mdio_bus.c99
-rw-r--r--drivers/net/phy/mdio_device.c34
-rw-r--r--drivers/net/phy/meson-gxl.c186
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy-c45.c33
-rw-r--r--drivers/net/phy/phy-core.c258
-rw-r--r--drivers/net/phy/phy.c56
-rw-r--r--drivers/net/phy/phy_device.c108
-rw-r--r--drivers/net/phy/phylink.c448
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c105
-rw-r--r--drivers/net/phy/rockchip.c1
-rw-r--r--drivers/net/phy/sfp-bus.c221
-rw-r--r--drivers/net/phy/sfp.c119
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/uPD60620.c1
-rw-r--r--drivers/net/phy/vitesse.c12
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/tap.c42
-rw-r--r--drivers/net/tun.c459
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c235
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/vxlan.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c636
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c342
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c993
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h225
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c293
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h165
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c365
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c600
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c72
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c230
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h52
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c146
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h115
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c74
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c133
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c46
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h107
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h9
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c85
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h18
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c234
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c304
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c192
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c121
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h15
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c635
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h117
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c531
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c487
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h73
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c462
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c314
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c81
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c111
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h68
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c97
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c61
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c7
-rw-r--r--drivers/net/wireless/mediatek/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig10
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile15
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c459
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c112
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c505
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h432
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h228
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_core.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c133
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c506
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c664
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c875
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c839
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c577
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c453
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h155
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_pci.c110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c740
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h587
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_trace.h144
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c511
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h44
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c191
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h19
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c581
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c152
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c80
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h351
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c78
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c32
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c12
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c164
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c203
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c269
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c206
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c285
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c177
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c663
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h252
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c325
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c483
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c57
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h68
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c41
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c4
-rw-r--r--drivers/of/of_mdio.c5
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/lcs.c10
-rw-r--r--drivers/s390/net/lcs.h3
-rw-r--r--drivers/s390/net/qeth_core.h42
-rw-r--r--drivers/s390/net/qeth_core_main.c105
-rw-r--r--drivers/s390/net/qeth_core_mpc.h13
-rw-r--r--drivers/s390/net/qeth_l2.h3
-rw-r--r--drivers/s390/net/qeth_l2_main.c92
-rw-r--r--drivers/s390/net/qeth_l3.h15
-rw-r--r--drivers/s390/net/qeth_l3_main.c510
-rw-r--r--drivers/s390/net/qeth_l3_sys.c114
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c35
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h68
-rw-r--r--drivers/scsi/qedf/qedf_io.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c12
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c59
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c139
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c9
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_version.h8
-rw-r--r--drivers/ssb/Kconfig9
-rw-r--r--drivers/tty/serdev/core.c12
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c24
-rw-r--r--drivers/vhost/net.c68
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/gfs2/glock.c7
-rw-r--r--fs/nsfs.c29
-rw-r--r--include/asm-generic/error-injection.h35
-rw-r--r--include/asm-generic/vmlinux.lds.h10
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/bpf.h94
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h63
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/dsa/lan9303.h3
-rw-r--r--include/linux/error-injection.h27
-rw-r--r--include/linux/filter.h40
-rw-r--r--include/linux/hyperv.h22
-rw-r--r--include/linux/if_link.h2
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_tap.h6
-rw-r--r--include/linux/if_tun.h21
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/mdio.h8
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h41
-rw-r--r--include/linux/mlx5/transobj.h23
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/net_dim.h380
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h54
-rw-r--r--include/linux/netfilter.h116
-rw-r--r--include/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h25
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_defs.h12
-rw-r--r--include/linux/netfilter_ipv4.h46
-rw-r--r--include/linux/netfilter_ipv6.h19
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/phy.h145
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/phylink.h201
-rw-r--r--include/linux/proc_ns.h3
-rw-r--r--include/linux/property.h11
-rw-r--r--include/linux/ptr_ring.h79
-rw-r--r--include/linux/qed/common_hsi.h1264
-rw-r--r--include/linux/qed/eth_common.h396
-rw-r--r--include/linux/qed/fcoe_common.h940
-rw-r--r--include/linux/qed/iscsi_common.h1585
-rw-r--r--include/linux/qed/iwarp_common.h17
-rw-r--r--include/linux/qed/qed_eth_if.h38
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/qed/rdma_common.h25
-rw-r--r--include/linux/qed/roce_common.h15
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h165
-rw-r--r--include/linux/rhashtable.h38
-rw-r--r--include/linux/rtnetlink.h15
-rw-r--r--include/linux/sctp.h37
-rw-r--r--include/linux/serdev.h10
-rw-r--r--include/linux/sfp.h94
-rw-r--r--include/linux/sh_eth.h3
-rw-r--r--include/linux/skb_array.h7
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/tcp.h13
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/net/act_api.h15
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/caif/cfpkt.h27
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/devlink.h115
-rw-r--r--include/net/dn_route.h1
-rw-r--r--include/net/dsa.h61
-rw-r--r--include/net/dst.h39
-rw-r--r--include/net/erspan.h240
-rw-r--r--include/net/gen_stats.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_hashtables.h29
-rw-r--r--include/net/inet_sock.h25
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h9
-rw-r--r--include/net/ip6_fib.h20
-rw-r--r--include/net/ip6_route.h11
-rw-r--r--include/net/ip6_tunnel.h4
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6.h19
-rw-r--r--include/net/mac80211.h10
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h12
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h12
-rw-r--r--include/net/netfilter/nf_conntrack_count.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h19
-rw-r--r--include/net/netfilter/nf_flow_table.h122
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h129
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h27
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h29
-rw-r--r--include/net/netns/can.h4
-rw-r--r--include/net/netns/core.h5
-rw-r--r--include/net/netns/netfilter.h12
-rw-r--r--include/net/netns/nftables.h8
-rw-r--r--include/net/netns/sctp.h5
-rw-r--r--include/net/pkt_cls.h113
-rw-r--r--include/net/pkt_sched.h17
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h150
-rw-r--r--include/net/sctp/constants.h9
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--include/net/sctp/sm.h18
-rw-r--r--include/net/sctp/stream_interleave.h61
-rw-r--r--include/net/sctp/structs.h68
-rw-r--r--include/net/sctp/ulpevent.h23
-rw-r--r--include/net/sctp/ulpqueue.h10
-rw-r--r--include/net/sock.h57
-rw-r--r--include/net/tc_act/tc_csum.h16
-rw-r--r--include/net/tc_act/tc_mirred.h6
-rw-r--r--include/net/tcp.h52
-rw-r--r--include/net/tls.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/wext.h4
-rw-r--r--include/net/xdp.h48
-rw-r--r--include/net/xfrm.h79
-rw-r--r--include/trace/events/bridge.h4
-rw-r--r--include/trace/events/net_probe_common.h44
-rw-r--r--include/trace/events/sctp.h99
-rw-r--r--include/trace/events/sock.h117
-rw-r--r--include/trace/events/tcp.h76
-rw-r--r--include/uapi/linux/batadv_packet.h (renamed from net/batman-adv/packet.h)269
-rw-r--r--include/uapi/linux/batman_adv.h27
-rw-r--r--include/uapi/linux/bpf.h116
-rw-r--r--include/uapi/linux/bpf_common.h7
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/devlink.h25
-rw-r--r--include/uapi/linux/erspan.h52
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/if_macsec.h9
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--include/uapi/linux/if_tunnel.h3
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/l2tp.h6
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h6
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h76
-rw-r--r--include/uapi/linux/netfilter/xt_connlimit.h2
-rw-r--r--include/uapi/linux/netfilter_arp.h3
-rw-r--r--include/uapi/linux/netfilter_decnet.h4
-rw-r--r--include/uapi/linux/netfilter_ipv4.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h57
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--include/uapi/linux/perf_event.h22
-rw-r--r--include/uapi/linux/rtnetlink.h12
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/uapi/linux/tipc.h7
-rw-r--r--include/uapi/linux/virtio_net.h13
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arraymap.c49
-rw-r--r--kernel/bpf/cgroup.c15
-rw-r--r--kernel/bpf/core.c409
-rw-r--r--kernel/bpf/cpumap.c31
-rw-r--r--kernel/bpf/devmap.c8
-rw-r--r--kernel/bpf/disasm.c63
-rw-r--r--kernel/bpf/disasm.h29
-rw-r--r--kernel/bpf/hashtab.c103
-rw-r--r--kernel/bpf/lpm_trie.c98
-rw-r--r--kernel/bpf/offload.c430
-rw-r--r--kernel/bpf/sockmap.c16
-rw-r--r--kernel/bpf/stackmap.c34
-rw-r--r--kernel/bpf/syscall.c214
-rw-r--r--kernel/bpf/verifier.c1451
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/fail_function.c349
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/bpf_trace.c59
-rw-r--r--kernel/trace/trace_kprobe.c61
-rw-r--r--kernel/trace/trace_probe.h12
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bucket_locks.c54
-rw-r--r--lib/error-inject.c242
-rw-r--r--lib/rhashtable.c160
-rw-r--r--lib/test_bpf.c112
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/Kconfig18
-rw-r--r--net/appletalk/aarp.c1
-rw-r--r--net/appletalk/atalk_proc.c3
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/atm/mpc.c9
-rw-r--r--net/atm/mpoa_caches.c48
-rw-r--r--net/atm/mpoa_caches.h9
-rw-r--r--net/atm/mpoa_proc.c16
-rw-r--r--net/atm/proc.c1
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/batman-adv/Kconfig17
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.c35
-rw-r--r--net/batman-adv/bat_algo.h1
-rw-r--r--net/batman-adv/bat_iv_ogm.c107
-rw-r--r--net/batman-adv/bat_iv_ogm.h1
-rw-r--r--net/batman-adv/bat_v.c51
-rw-r--r--net/batman-adv/bat_v.h1
-rw-r--r--net/batman-adv/bat_v_elp.c29
-rw-r--r--net/batman-adv/bat_v_elp.h1
-rw-r--r--net/batman-adv/bat_v_ogm.c39
-rw-r--r--net/batman-adv/bat_v_ogm.h1
-rw-r--r--net/batman-adv/bitarray.c3
-rw-r--r--net/batman-adv/bitarray.h10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c114
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h5
-rw-r--r--net/batman-adv/debugfs.c30
-rw-r--r--net/batman-adv/debugfs.h1
-rw-r--r--net/batman-adv/distributed-arp-table.c80
-rw-r--r--net/batman-adv/distributed-arp-table.h9
-rw-r--r--net/batman-adv/fragmentation.c25
-rw-r--r--net/batman-adv/fragmentation.h3
-rw-r--r--net/batman-adv/gateway_client.c67
-rw-r--r--net/batman-adv/gateway_client.h1
-rw-r--r--net/batman-adv/gateway_common.c30
-rw-r--r--net/batman-adv/gateway_common.h6
-rw-r--r--net/batman-adv/hard-interface.c67
-rw-r--r--net/batman-adv/hard-interface.h59
-rw-r--r--net/batman-adv/hash.c20
-rw-r--r--net/batman-adv/hash.h28
-rw-r--r--net/batman-adv/icmp_socket.c17
-rw-r--r--net/batman-adv/icmp_socket.h1
-rw-r--r--net/batman-adv/log.c19
-rw-r--r--net/batman-adv/log.h62
-rw-r--r--net/batman-adv/main.c54
-rw-r--r--net/batman-adv/main.h127
-rw-r--r--net/batman-adv/multicast.c83
-rw-r--r--net/batman-adv/multicast.h17
-rw-r--r--net/batman-adv/netlink.c27
-rw-r--r--net/batman-adv/netlink.h1
-rw-r--r--net/batman-adv/network-coding.c126
-rw-r--r--net/batman-adv/network-coding.h1
-rw-r--r--net/batman-adv/originator.c154
-rw-r--r--net/batman-adv/originator.h47
-rw-r--r--net/batman-adv/routing.c56
-rw-r--r--net/batman-adv/routing.h1
-rw-r--r--net/batman-adv/send.c66
-rw-r--r--net/batman-adv/send.h8
-rw-r--r--net/batman-adv/soft-interface.c66
-rw-r--r--net/batman-adv/soft-interface.h1
-rw-r--r--net/batman-adv/sysfs.c58
-rw-r--r--net/batman-adv/sysfs.h14
-rw-r--r--net/batman-adv/tp_meter.c77
-rw-r--r--net/batman-adv/tp_meter.h1
-rw-r--r--net/batman-adv/translation-table.c234
-rw-r--r--net/batman-adv/translation-table.h1
-rw-r--r--net/batman-adv/tvlv.c43
-rw-r--r--net/batman-adv/tvlv.h1
-rw-r--r--net/batman-adv/types.h1996
-rw-r--r--net/bluetooth/af_bluetooth.c38
-rw-r--r--net/bluetooth/cmtp/capi.c1
-rw-r--r--net/bluetooth/hci_debugfs.c201
-rw-r--r--net/bluetooth/hci_request.c64
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_fdb.c392
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_nf_core.c1
-rw-r--r--net/bridge/br_private.h18
-rw-r--r--net/bridge/br_switchdev.c8
-rw-r--r--net/bridge/br_sysfs_br.c13
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c120
-rw-r--r--net/caif/cfctrl.c50
-rw-r--r--net/caif/cfpkt_skbuff.c1
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/can/Kconfig2
-rw-r--r--net/can/af_can.c20
-rw-r--r--net/can/af_can.h2
-rw-r--r--net/can/bcm.c1
-rw-r--r--net/can/gw.c14
-rw-r--r--net/can/proc.c14
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c260
-rw-r--r--net/core/dev_ioctl.c132
-rw-r--r--net/core/devlink.c596
-rw-r--r--net/core/dst.c14
-rw-r--r--net/core/ethtool.c14
-rw-r--r--net/core/filter.c359
-rw-r--r--net/core/flow_dissector.c69
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/gen_stats.c9
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/net-procfs.c4
-rw-r--r--net/core/net-sysfs.c56
-rw-r--r--net/core/net_namespace.c83
-rw-r--r--net/core/pktgen.c281
-rw-r--r--net/core/rtnetlink.c482
-rw-r--r--net/core/skbuff.c14
-rw-r--r--net/core/sock.c56
-rw-r--r--net/core/sock_reuseport.c4
-rw-r--r--net/core/sysctl_net_core.c60
-rw-r--r--net/core/xdp.c73
-rw-r--r--net/dccp/Kconfig17
-rw-r--r--net/dccp/Makefile5
-rw-r--r--net/dccp/ackvec.c2
-rw-r--r--net/dccp/minisocks.c7
-rw-r--r--net/dccp/probe.c203
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/dccp/trace.h84
-rw-r--r--net/decnet/af_decnet.c1
-rw-r--r--net/decnet/dn_dev.c10
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/decnet/dn_neigh.c1
-rw-r--r--net/decnet/dn_route.c43
-rw-r--r--net/dsa/Kconfig9
-rw-r--r--net/dsa/Makefile3
-rw-r--r--net/dsa/dsa2.c9
-rw-r--r--net/dsa/dsa_priv.h13
-rw-r--r--net/dsa/legacy.c24
-rw-r--r--net/dsa/port.c103
-rw-r--r--net/dsa/slave.c25
-rw-r--r--net/dsa/switch.c111
-rw-r--r--net/dsa/tag_brcm.c12
-rw-r--r--net/dsa/tag_mtk.c38
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c45
-rw-r--r--net/ipv4/arp.c1
-rw-r--r--net/ipv4/devinet.c61
-rw-r--r--net/ipv4/esp4.c36
-rw-r--r--net/ipv4/esp4_offload.c75
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_diag.c8
-rw-r--r--net/ipv4/inet_hashtables.c186
-rw-r--r--net/ipv4/inet_timewait_sock.c31
-rw-r--r--net/ipv4/ip_gre.c169
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ip_tunnel.c13
-rw-r--r--net/ipv4/ipconfig.c48
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter.c62
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/netfilter/Makefile9
-rw-r--r--net/ipv4/netfilter/arp_tables.c27
-rw-r--r--net/ipv4/netfilter/ip_tables.c27
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c1
-rw-r--r--net/ipv4/netfilter/iptable_filter.c6
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c5
-rw-r--r--net/ipv4/netfilter/iptable_nat.c4
-rw-r--r--net/ipv4/netfilter/iptable_raw.c37
-rw-r--r--net/ipv4/netfilter/iptable_security.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c4
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c284
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.asn1177
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c1286
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c235
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c62
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c83
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c8
-rw-r--r--net/ipv4/proc.c3
-rw-r--r--net/ipv4/raw.c16
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp.c73
-rw-r--r--net/ipv4/tcp_bbr.c3
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_fastopen.c30
-rw-r--r--net/ipv4/tcp_input.c58
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_metrics.c7
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_nv.c4
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_probe.c301
-rw-r--r--net/ipv4/tcp_timer.c24
-rw-r--r--net/ipv4/udp.c62
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c7
-rw-r--r--net/ipv6/addrconf.c90
-rw-r--r--net/ipv6/addrlabel.c25
-rw-r--r--net/ipv6/af_inet6.c11
-rw-r--r--net/ipv6/anycast.c1
-rw-r--r--net/ipv6/datagram.c3
-rw-r--r--net/ipv6/esp6.c36
-rw-r--r--net/ipv6/esp6_offload.c82
-rw-r--r--net/ipv6/ila/ila_xlat.c4
-rw-r--r--net/ipv6/inet6_hashtables.c77
-rw-r--r--net/ipv6/ip6_fib.c66
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c638
-rw-r--r--net/ipv6/ip6_output.c15
-rw-r--r--net/ipv6/ip6_tunnel.c5
-rw-r--r--net/ipv6/ip6_vti.c20
-rw-r--r--net/ipv6/ip6mr.c12
-rw-r--r--net/ipv6/mcast.c4
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/netfilter.c44
-rw-r--r--net/ipv6/netfilter/Kconfig18
-rw-r--r--net/ipv6/netfilter/Makefile4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c27
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c161
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c8
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c4
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c31
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c15
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c3
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c277
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c82
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c12
-rw-r--r--net/ipv6/proc.c3
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/route.c567
-rw-r--r--net/ipv6/seg6.c4
-rw-r--r--net/ipv6/seg6_local.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c55
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c7
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipx/ipx_proc.c3
-rw-r--r--net/kcm/kcmproc.c2
-rw-r--r--net/l2tp/l2tp_core.c54
-rw-r--r--net/l2tp/l2tp_core.h16
-rw-r--r--net/l2tp/l2tp_debugfs.c4
-rw-r--r--net/l2tp/l2tp_netlink.c39
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/agg-rx.c26
-rw-r--r--net/mac80211/agg-tx.c34
-rw-r--r--net/mac80211/cfg.c31
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/ht.c1
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c12
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh_hwmp.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c34
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/offchannel.c4
-rw-r--r--net/mac80211/rx.c17
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c19
-rw-r--r--net/mac80211/wme.c1
-rw-r--r--net/mac80211/wpa.c16
-rw-r--r--net/mpls/af_mpls.c15
-rw-r--r--net/ncsi/ncsi-aen.c35
-rw-r--r--net/netfilter/Kconfig33
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/core.c263
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h10
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c8
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c8
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c8
-rw-r--r--net/netfilter/ipset/ip_set_core.c36
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h38
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c21
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c1
-rw-r--r--net/netfilter/nf_conncount.c373
-rw-r--r--net/netfilter/nf_conntrack_core.c26
-rw-r--r--net/netfilter/nf_conntrack_expect.c1
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c40
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c77
-rw-r--r--net/netfilter/nf_conntrack_netlink.c22
-rw-r--r--net/netfilter/nf_conntrack_proto.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c25
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c10
-rw-r--r--net/netfilter/nf_conntrack_standalone.c14
-rw-r--r--net/netfilter/nf_flow_table.c429
-rw-r--r--net/netfilter/nf_flow_table_inet.c48
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_queue.c96
-rw-r--r--net/netfilter/nf_synproxy_core.c1
-rw-r--r--net/netfilter/nf_tables_api.c1658
-rw-r--r--net/netfilter/nf_tables_inet.c88
-rw-r--r--net/netfilter/nf_tables_netdev.c87
-rw-r--r--net/netfilter/nfnetlink.c4
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nfnetlink_queue.c10
-rw-r--r--net/netfilter/nft_cmp.c2
-rw-r--r--net/netfilter/nft_compat.c26
-rw-r--r--net/netfilter/nft_ct.c16
-rw-r--r--net/netfilter/nft_dynset.c4
-rw-r--r--net/netfilter/nft_flow_offload.c264
-rw-r--r--net/netfilter/nft_log.c4
-rw-r--r--net/netfilter/nft_masq.c2
-rw-r--r--net/netfilter/nft_meta.c45
-rw-r--r--net/netfilter/nft_nat.c2
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_rt.c15
-rw-r--r--net/netfilter/nft_set_hash.c10
-rw-r--r--net/netfilter/utils.c90
-rw-r--r--net/netfilter/x_tables.c41
-rw-r--r--net/netfilter/xt_TCPMSS.c5
-rw-r--r--net/netfilter/xt_addrtype.c15
-rw-r--r--net/netfilter/xt_connlimit.c369
-rw-r--r--net/netfilter/xt_hashlimit.c5
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/netfilter/xt_policy.c3
-rw-r--r--net/netfilter/xt_set.c119
-rw-r--r--net/netlink/af_netlink.c72
-rw-r--r--net/netlink/diag.c8
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/netrom/nr_route.c2
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/flow_netlink.c56
-rw-r--r--net/openvswitch/meter.c2
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/pn_netlink.c21
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/qrtr/qrtr.c8
-rw-r--r--net/rds/bind.c1
-rw-r--r--net/rds/cong.c10
-rw-r--r--net/rds/connection.c27
-rw-r--r--net/rds/rds.h10
-rw-r--r--net/rds/send.c37
-rw-r--r--net/rds/tcp.c81
-rw-r--r--net/rds/tcp.h1
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_recv.c8
-rw-r--r--net/rds/tcp_send.c5
-rw-r--r--net/rds/threads.c20
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/rose/rose_route.c3
-rw-r--r--net/rxrpc/proc.c2
-rw-r--r--net/sched/Kconfig3
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c10
-rw-r--r--net/sched/act_connmark.c8
-rw-r--r--net/sched/act_csum.c74
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ife.c18
-rw-r--r--net/sched/act_ipt.c18
-rw-r--r--net/sched/act_mirred.c27
-rw-r--r--net/sched/act_nat.c8
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/act_skbmod.c10
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c10
-rw-r--r--net/sched/cls_api.c695
-rw-r--r--net/sched/cls_basic.c16
-rw-r--r--net/sched/cls_bpf.c58
-rw-r--r--net/sched/cls_cgroup.c12
-rw-r--r--net/sched/cls_flow.c12
-rw-r--r--net/sched/cls_flower.c54
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_matchall.c35
-rw-r--r--net/sched/cls_route.c16
-rw-r--r--net/sched/cls_rsvp.h9
-rw-r--r--net/sched/cls_tcindex.c17
-rw-r--r--net/sched/cls_u32.c133
-rw-r--r--net/sched/sch_api.c305
-rw-r--r--net/sched/sch_atm.c23
-rw-r--r--net/sched/sch_cbq.c78
-rw-r--r--net/sched/sch_cbs.c31
-rw-r--r--net/sched/sch_choke.c8
-rw-r--r--net/sched/sch_codel.c8
-rw-r--r--net/sched/sch_drr.c40
-rw-r--r--net/sched/sch_dsmark.c19
-rw-r--r--net/sched/sch_fifo.c11
-rw-r--r--net/sched/sch_fq.c8
-rw-r--r--net/sched/sch_fq_codel.c13
-rw-r--r--net/sched/sch_generic.c545
-rw-r--r--net/sched/sch_gred.c13
-rw-r--r--net/sched/sch_hfsc.c28
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_htb.c29
-rw-r--r--net/sched/sch_ingress.c99
-rw-r--r--net/sched/sch_mq.c42
-rw-r--r--net/sched/sch_mqprio.c76
-rw-r--r--net/sched/sch_multiq.c19
-rw-r--r--net/sched/sch_netem.c10
-rw-r--r--net/sched/sch_pie.c8
-rw-r--r--net/sched/sch_plug.c6
-rw-r--r--net/sched/sch_prio.c81
-rw-r--r--net/sched/sch_qfq.c22
-rw-r--r--net/sched/sch_red.c65
-rw-r--r--net/sched/sch_sfb.c20
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_tbf.c21
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/Kconfig12
-rw-r--r--net/sctp/Makefile5
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/chunk.c8
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/output.c5
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/probe.c244
-rw-r--r--net/sctp/proc.c7
-rw-r--r--net/sctp/sm_make_chunk.c72
-rw-r--r--net/sctp/sm_sideeffect.c51
-rw-r--r--net/sctp/sm_statefuns.c50
-rw-r--r--net/sctp/sm_statetable.c5
-rw-r--r--net/sctp/socket.c160
-rw-r--r--net/sctp/stream.c47
-rw-r--r--net/sctp/stream_interleave.c1334
-rw-r--r--net/sctp/stream_sched.c3
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/ulpevent.c15
-rw-r--r--net/sctp/ulpqueue.c23
-rw-r--r--net/smc/af_smc.c231
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c52
-rw-r--r--net/smc/smc_cdc.h1
-rw-r--r--net/smc/smc_clc.c84
-rw-r--r--net/smc/smc_clc.h34
-rw-r--r--net/smc/smc_close.c208
-rw-r--r--net/smc/smc_close.h2
-rw-r--r--net/smc/smc_core.c17
-rw-r--r--net/smc/smc_diag.c6
-rw-r--r--net/smc/smc_ib.c38
-rw-r--r--net/smc/smc_rx.c5
-rw-r--r--net/smc/smc_tx.c32
-rw-r--r--net/smc/smc_wr.c50
-rw-r--r--net/smc/smc_wr.h2
-rw-r--r--net/socket.c301
-rw-r--r--net/tipc/bcast.c12
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/group.c371
-rw-r--r--net/tipc/group.h10
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.c51
-rw-r--r--net/tipc/msg.h3
-rw-r--r--net/tipc/name_table.c57
-rw-r--r--net/tipc/name_table.h9
-rw-r--r--net/tipc/server.c76
-rw-r--r--net/tipc/server.h13
-rw-r--r--net/tipc/socket.c114
-rw-r--r--net/tipc/subscr.c35
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/tls/tls_sw.c8
-rw-r--r--net/unix/af_unix.c1
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mlme.c6
-rw-r--r--net/wireless/nl80211.c68
-rw-r--r--net/wireless/scan.c5
-rw-r--r--net/wireless/trace.h12
-rw-r--r--net/wireless/wext-core.c13
-rw-r--r--net/wireless/wext-proc.c1
-rw-r--r--net/xfrm/xfrm_device.c199
-rw-r--r--net/xfrm/xfrm_input.c1
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c137
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_replay.c5
-rw-r--r--net/xfrm/xfrm_state.c7
-rw-r--r--samples/bpf/Makefile14
-rw-r--r--samples/bpf/tcbpf2_kern.c170
-rw-r--r--samples/bpf/test_cgrp2_attach2.c36
-rwxr-xr-xsamples/bpf/test_override_return.sh15
-rwxr-xr-xsamples/bpf/test_tunnel_bpf.sh128
-rw-r--r--samples/bpf/tracex7_kern.c16
-rw-r--r--samples/bpf/tracex7_user.c28
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh220
-rw-r--r--samples/bpf/xdp2skb_meta_kern.c105
-rw-r--r--samples/bpf/xdp_monitor_kern.c96
-rw-r--r--samples/bpf/xdp_monitor_user.c416
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c96
-rw-r--r--samples/bpf/xdp_rxq_info_user.c531
-rw-r--r--samples/sockmap/sockmap_user.c392
-rw-r--r--security/selinux/xfrm.c2
-rw-r--r--tools/bpf/Makefile29
-rw-r--r--tools/bpf/bpf_jit_disasm.c14
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile30
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst118
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst16
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst12
-rw-r--r--tools/bpf/bpftool/Makefile90
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool8
-rw-r--r--tools/bpf/bpftool/cgroup.c308
-rw-r--r--tools/bpf/bpftool/common.c195
-rw-r--r--tools/bpf/bpftool/jit_disasm.c23
-rw-r--r--tools/bpf/bpftool/main.c16
-rw-r--r--tools/bpf/bpftool/main.h9
-rw-r--r--tools/bpf/bpftool/map.c11
-rw-r--r--tools/bpf/bpftool/prog.c229
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-disassembler-four-args.c15
-rw-r--r--tools/include/uapi/linux/bpf.h116
-rw-r--r--tools/include/uapi/linux/perf_event.h22
-rw-r--r--tools/lib/bpf/Makefile24
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/lib/bpf/libbpf.c200
-rw-r--r--tools/scripts/Makefile.include1
-rw-r--r--tools/testing/selftests/bpf/.gitignore7
-rw-r--r--tools/testing/selftests/bpf/Makefile20
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h5
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/sample_map_ret0.c34
-rw-r--r--tools/testing/selftests/bpf/sample_ret0.c7
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py51
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py83
-rw-r--r--tools/testing/selftests/bpf/test_align.c178
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c11
-rw-r--r--tools/testing/selftests/bpf/test_l4lb_noinline.c473
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c217
-rw-r--r--tools/testing/selftests/bpf/test_maps.c32
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py1085
-rw-r--r--tools/testing/selftests/bpf/test_progs.c355
-rw-r--r--tools/testing/selftests/bpf/test_stacktrace_map.c62
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h16
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c115
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c126
-rw-r--r--tools/testing/selftests/bpf/test_tracepoint.c26
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2334
-rw-r--r--tools/testing/selftests/bpf/test_xdp_noinline.c833
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh429
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c21
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh229
-rw-r--r--tools/virtio/linux/kernel.h2
-rw-r--r--tools/virtio/linux/thread_info.h1
-rw-r--r--tools/virtio/ringtest/main.h59
-rw-r--r--tools/virtio/ringtest/ptr_ring.c2
1666 files changed, 120519 insertions, 45602 deletions
diff --git a/Documentation/ABI/testing/devlink-resource-mlxsw b/Documentation/ABI/testing/devlink-resource-mlxsw
new file mode 100644
index 000000000000..259ed2948ec0
--- /dev/null
+++ b/Documentation/ABI/testing/devlink-resource-mlxsw
@@ -0,0 +1,33 @@
+What: 		/kvd/
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The main database in the Spectrum device is a centralized
+		KVD database used for many of the tables used to configure
+		the chip including L2 FDB, L3 LPM, ECMP and more. The KVD
+		is divided into two sections, the first is hash-based table
+		and the second is a linear access table. The division
+		between the linear and hash-based sections is static and
+		require reload before the changes take effect.
+
+What: 		/kvd/linear
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The linear section of the KVD is managed by software as a
+		flat memory accessed using an index.
+
+What: 		/kvd/hash_single
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The hash based section of the KVD is managed by the switch
+		device. Used in case the key size is smaller or equal to
+		64bit.
+
+What: 		/kvd/hash_double
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The hash based section of the KVD is managed by the switch
+		device. Used in case the key is larger than 64 bit.
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 6856da99b6f7..2f1788111cd9 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -259,3 +259,27 @@ Contact:	netdev@vger.kernel.org
 Description:
 		Symbolic link to the PHY device this network device is attached
 		to.
+
+What:		/sys/class/net/<iface>/carrier_changes
+Date:		Mar 2014
+KernelVersion:	3.15
+Contact:	netdev@vger.kernel.org
+Description:
+		32-bit unsigned integer counting the number of times the link has
+		seen a change from UP to DOWN and vice versa
+
+What:		/sys/class/net/<iface>/carrier_up_count
+Date:		Jan 2018
+KernelVersion:	4.16
+Contact:	netdev@vger.kernel.org
+Description:
+		32-bit unsigned integer counting the number of times the link has
+		been up
+
+What:		/sys/class/net/<iface>/carrier_down_count
+Date:		Jan 2018
+KernelVersion:	4.16
+Contact:	netdev@vger.kernel.org
+Description:
+		32-bit unsigned integer counting the number of times the link has
+		been down
diff --git a/Documentation/bpf/bpf_devel_QA.txt b/Documentation/bpf/bpf_devel_QA.txt
new file mode 100644
index 000000000000..cefef855dea4
--- /dev/null
+++ b/Documentation/bpf/bpf_devel_QA.txt
@@ -0,0 +1,519 @@
+This document provides information for the BPF subsystem about various
+workflows related to reporting bugs, submitting patches, and queueing
+patches for stable kernels.
+
+For general information about submitting patches, please refer to
+Documentation/process/. This document only describes additional specifics
+related to BPF.
+
+Reporting bugs:
+---------------
+
+Q: How do I report bugs for BPF kernel code?
+
+A: Since all BPF kernel development as well as bpftool and iproute2 BPF
+   loader development happens through the netdev kernel mailing list,
+   please report any found issues around BPF to the following mailing
+   list:
+
+     netdev@vger.kernel.org
+
+   This may also include issues related to XDP, BPF tracing, etc.
+
+   Given netdev has a high volume of traffic, please also add the BPF
+   maintainers to Cc (from kernel MAINTAINERS file):
+
+     Alexei Starovoitov <ast@kernel.org>
+     Daniel Borkmann <daniel@iogearbox.net>
+
+   In case a buggy commit has already been identified, make sure to keep
+   the actual commit authors in Cc as well for the report. They can
+   typically be identified through the kernel's git tree.
+
+   Please do *not* report BPF issues to bugzilla.kernel.org since it
+   is a guarantee that the reported issue will be overlooked.
+
+Submitting patches:
+-------------------
+
+Q: To which mailing list do I need to submit my BPF patches?
+
+A: Please submit your BPF patches to the netdev kernel mailing list:
+
+     netdev@vger.kernel.org
+
+   Historically, BPF came out of networking and has always been maintained
+   by the kernel networking community. Although these days BPF touches
+   many other subsystems as well, the patches are still routed mainly
+   through the networking community.
+
+   In case your patch has changes in various different subsystems (e.g.
+   tracing, security, etc), make sure to Cc the related kernel mailing
+   lists and maintainers from there as well, so they are able to review
+   the changes and provide their Acked-by's to the patches.
+
+Q: Where can I find patches currently under discussion for BPF subsystem?
+
+A: All patches that are Cc'ed to netdev are queued for review under netdev
+   patchwork project:
+
+     http://patchwork.ozlabs.org/project/netdev/list/
+
+   Those patches which target BPF, are assigned to a 'bpf' delegate for
+   further processing from BPF maintainers. The current queue with
+   patches under review can be found at:
+
+     https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
+
+   Once the patches have been reviewed by the BPF community as a whole
+   and approved by the BPF maintainers, their status in patchwork will be
+   changed to 'Accepted' and the submitter will be notified by mail. This
+   means that the patches look good from a BPF perspective and have been
+   applied to one of the two BPF kernel trees.
+
+   In case feedback from the community requires a respin of the patches,
+   their status in patchwork will be set to 'Changes Requested', and purged
+   from the current review queue. Likewise for cases where patches would
+   get rejected or are not applicable to the BPF trees (but assigned to
+   the 'bpf' delegate).
+
+Q: How do the changes make their way into Linux?
+
+A: There are two BPF kernel trees (git repositories). Once patches have
+   been accepted by the BPF maintainers, they will be applied to one
+   of the two BPF trees:
+
+     https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/
+     https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/
+
+   The bpf tree itself is for fixes only, whereas bpf-next for features,
+   cleanups or other kind of improvements ("next-like" content). This is
+   analogous to net and net-next trees for networking. Both bpf and
+   bpf-next will only have a master branch in order to simplify against
+   which branch patches should get rebased to.
+
+   Accumulated BPF patches in the bpf tree will regularly get pulled
+   into the net kernel tree. Likewise, accumulated BPF patches accepted
+   into the bpf-next tree will make their way into net-next tree. net and
+   net-next are both run by David S. Miller. From there, they will go
+   into the kernel mainline tree run by Linus Torvalds. To read up on the
+   process of net and net-next being merged into the mainline tree, see
+   the netdev FAQ under:
+
+     Documentation/networking/netdev-FAQ.txt
+
+   Occasionally, to prevent merge conflicts, we might send pull requests
+   to other trees (e.g. tracing) with a small subset of the patches, but
+   net and net-next are always the main trees targeted for integration.
+
+   The pull requests will contain a high-level summary of the accumulated
+   patches and can be searched on netdev kernel mailing list through the
+   following subject lines (yyyy-mm-dd is the date of the pull request):
+
+     pull-request: bpf yyyy-mm-dd
+     pull-request: bpf-next yyyy-mm-dd
+
+Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be
+   applied to?
+
+A: The process is the very same as described in the netdev FAQ, so
+   please read up on it. The subject line must indicate whether the
+   patch is a fix or rather "next-like" content in order to let the
+   maintainers know whether it is targeted at bpf or bpf-next.
+
+   For fixes eventually landing in bpf -> net tree, the subject must
+   look like:
+
+     git format-patch --subject-prefix='PATCH bpf' start..finish
+
+   For features/improvements/etc that should eventually land in
+   bpf-next -> net-next, the subject must look like:
+
+     git format-patch --subject-prefix='PATCH bpf-next' start..finish
+
+   If unsure whether the patch or patch series should go into bpf
+   or net directly, or bpf-next or net-next directly, it is not a
+   problem either if the subject line says net or net-next as target.
+   It is eventually up to the maintainers to do the delegation of
+   the patches.
+
+   If it is clear that patches should go into bpf or bpf-next tree,
+   please make sure to rebase the patches against those trees in
+   order to reduce potential conflicts.
+
+   In case the patch or patch series has to be reworked and sent out
+   again in a second or later revision, it is also required to add a
+   version number (v2, v3, ...) into the subject prefix:
+
+     git format-patch --subject-prefix='PATCH net-next v2' start..finish
+
+   When changes have been requested to the patch series, always send the
+   whole patch series again with the feedback incorporated (never send
+   individual diffs on top of the old series).
+
+Q: What does it mean when a patch gets applied to bpf or bpf-next tree?
+
+A: It means that the patch looks good for mainline inclusion from
+   a BPF point of view.
+
+   Be aware that this is not a final verdict that the patch will
+   automatically get accepted into net or net-next trees eventually:
+
+   On the netdev kernel mailing list reviews can come in at any point
+   in time. If discussions around a patch conclude that they cannot
+   get included as-is, we will either apply a follow-up fix or drop
+   them from the trees entirely. Therefore, we also reserve to rebase
+   the trees when deemed necessary. After all, the purpose of the tree
+   is to i) accumulate and stage BPF patches for integration into trees
+   like net and net-next, and ii) run extensive BPF test suite and
+   workloads on the patches before they make their way any further.
+
+   Once the BPF pull request was accepted by David S. Miller, then
+   the patches end up in net or net-next tree, respectively, and
+   make their way from there further into mainline. Again, see the
+   netdev FAQ for additional information e.g. on how often they are
+   merged to mainline.
+
+Q: How long do I need to wait for feedback on my BPF patches?
+
+A: We try to keep the latency low. The usual time to feedback will
+   be around 2 or 3 business days. It may vary depending on the
+   complexity of changes and current patch load.
+
+Q: How often do you send pull requests to major kernel trees like
+   net or net-next?
+
+A: Pull requests will be sent out rather often in order to not
+   accumulate too many patches in bpf or bpf-next.
+
+   As a rule of thumb, expect pull requests for each tree regularly
+   at the end of the week. In some cases pull requests could additionally
+   come also in the middle of the week depending on the current patch
+   load or urgency.
+
+Q: Are patches applied to bpf-next when the merge window is open?
+
+A: For the time when the merge window is open, bpf-next will not be
+   processed. This is roughly analogous to net-next patch processing,
+   so feel free to read up on the netdev FAQ about further details.
+
+   During those two weeks of merge window, we might ask you to resend
+   your patch series once bpf-next is open again. Once Linus released
+   a v*-rc1 after the merge window, we continue processing of bpf-next.
+
+   For non-subscribers to kernel mailing lists, there is also a status
+   page run by David S. Miller on net-next that provides guidance:
+
+     http://vger.kernel.org/~davem/net-next.html
+
+Q: I made a BPF verifier change, do I need to add test cases for
+   BPF kernel selftests?
+
+A: If the patch has changes to the behavior of the verifier, then yes,
+   it is absolutely necessary to add test cases to the BPF kernel
+   selftests suite. If they are not present and we think they are
+   needed, then we might ask for them before accepting any changes.
+
+   In particular, test_verifier.c is tracking a high number of BPF test
+   cases, including a lot of corner cases that LLVM BPF back end may
+   generate out of the restricted C code. Thus, adding test cases is
+   absolutely crucial to make sure future changes do not accidentally
+   affect prior use-cases. Thus, treat those test cases as: verifier
+   behavior that is not tracked in test_verifier.c could potentially
+   be subject to change.
+
+Q: When should I add code to samples/bpf/ and when to BPF kernel
+   selftests?
+
+A: In general, we prefer additions to BPF kernel selftests rather than
+   samples/bpf/. The rationale is very simple: kernel selftests are
+   regularly run by various bots to test for kernel regressions.
+
+   The more test cases we add to BPF selftests, the better the coverage
+   and the less likely it is that those could accidentally break. It is
+   not that BPF kernel selftests cannot demo how a specific feature can
+   be used.
+
+   That said, samples/bpf/ may be a good place for people to get started,
+   so it might be advisable that simple demos of features could go into
+   samples/bpf/, but advanced functional and corner-case testing rather
+   into kernel selftests.
+
+   If your sample looks like a test case, then go for BPF kernel selftests
+   instead!
+
+Q: When should I add code to the bpftool?
+
+A: The main purpose of bpftool (under tools/bpf/bpftool/) is to provide
+   a central user space tool for debugging and introspection of BPF programs
+   and maps that are active in the kernel. If UAPI changes related to BPF
+   enable for dumping additional information of programs or maps, then
+   bpftool should be extended as well to support dumping them.
+
+Q: When should I add code to iproute2's BPF loader?
+
+A: For UAPI changes related to the XDP or tc layer (e.g. cls_bpf), the
+   convention is that those control-path related changes are added to
+   iproute2's BPF loader as well from user space side. This is not only
+   useful to have UAPI changes properly designed to be usable, but also
+   to make those changes available to a wider user base of major
+   downstream distributions.
+
+Q: Do you accept patches as well for iproute2's BPF loader?
+
+A: Patches for the iproute2's BPF loader have to be sent to:
+
+     netdev@vger.kernel.org
+
+   While those patches are not processed by the BPF kernel maintainers,
+   please keep them in Cc as well, so they can be reviewed.
+
+   The official git repository for iproute2 is run by Stephen Hemminger
+   and can be found at:
+
+     https://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git/
+
+   The patches need to have a subject prefix of '[PATCH iproute2 master]'
+   or '[PATCH iproute2 net-next]'. 'master' or 'net-next' describes the
+   target branch where the patch should be applied to. Meaning, if kernel
+   changes went into the net-next kernel tree, then the related iproute2
+   changes need to go into the iproute2 net-next branch, otherwise they
+   can be targeted at master branch. The iproute2 net-next branch will get
+   merged into the master branch after the current iproute2 version from
+   master has been released.
+
+   Like BPF, the patches end up in patchwork under the netdev project and
+   are delegated to 'shemminger' for further processing:
+
+     http://patchwork.ozlabs.org/project/netdev/list/?delegate=389
+
+Q: What is the minimum requirement before I submit my BPF patches?
+
+A: When submitting patches, always take the time and properly test your
+   patches *prior* to submission. Never rush them! If maintainers find
+   that your patches have not been properly tested, it is a good way to
+   get them grumpy. Testing patch submissions is a hard requirement!
+
+   Note, fixes that go to bpf tree *must* have a Fixes: tag included. The
+   same applies to fixes that target bpf-next, where the affected commit
+   is in net-next (or in some cases bpf-next). The Fixes: tag is crucial
+   in order to identify follow-up commits and tremendously helps for people
+   having to do backporting, so it is a must have!
+
+   We also don't accept patches with an empty commit message. Take your
+   time and properly write up a high quality commit message, it is
+   essential!
+
+   Think about it this way: other developers looking at your code a month
+   from now need to understand *why* a certain change has been done that
+   way, and whether there have been flaws in the analysis or assumptions
+   that the original author did. Thus providing a proper rationale and
+   describing the use-case for the changes is a must.
+
+   Patch submissions with >1 patch must have a cover letter which includes
+   a high level description of the series. This high level summary will
+   then be placed into the merge commit by the BPF maintainers such that
+   it is also accessible from the git log for future reference.
+
+Q: What do I need to consider when adding a new instruction or feature
+   that would require BPF JIT and/or LLVM integration as well?
+
+A: We try hard to keep all BPF JITs up to date such that the same user
+   experience can be guaranteed when running BPF programs on different
+   architectures without having the program punt to the less efficient
+   interpreter in case the in-kernel BPF JIT is enabled.
+
+   If you are unable to implement or test the required JIT changes for
+   certain architectures, please work together with the related BPF JIT
+   developers in order to get the feature implemented in a timely manner.
+   Please refer to the git log (arch/*/net/) to locate the necessary
+   people for helping out.
+
+   Also always make sure to add BPF test cases (e.g. test_bpf.c and
+   test_verifier.c) for new instructions, so that they can receive
+   broad test coverage and help run-time testing the various BPF JITs.
+
+   In case of new BPF instructions, once the changes have been accepted
+   into the Linux kernel, please implement support into LLVM's BPF back
+   end. See LLVM section below for further information.
+
+Stable submission:
+------------------
+
+Q: I need a specific BPF commit in stable kernels. What should I do?
+
+A: In case you need a specific fix in stable kernels, first check whether
+   the commit has already been applied in the related linux-*.y branches:
+
+     https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/
+
+   If not the case, then drop an email to the BPF maintainers with the
+   netdev kernel mailing list in Cc and ask for the fix to be queued up:
+
+     netdev@vger.kernel.org
+
+   The process in general is the same as on netdev itself, see also the
+   netdev FAQ document.
+
+Q: Do you also backport to kernels not currently maintained as stable?
+
+A: No. If you need a specific BPF commit in kernels that are currently not
+   maintained by the stable maintainers, then you are on your own.
+
+   The current stable and longterm stable kernels are all listed here:
+
+     https://www.kernel.org/
+
+Q: The BPF patch I am about to submit needs to go to stable as well. What
+   should I do?
+
+A: The same rules apply as with netdev patch submissions in general, see
+   netdev FAQ under:
+
+     Documentation/networking/netdev-FAQ.txt
+
+   Never add "Cc: stable@vger.kernel.org" to the patch description, but
+   ask the BPF maintainers to queue the patches instead. This can be done
+   with a note, for example, under the "---" part of the patch which does
+   not go into the git log. Alternatively, this can be done as a simple
+   request by mail instead.
+
+Q: Where do I find currently queued BPF patches that will be submitted
+   to stable?
+
+A: Once patches that fix critical bugs got applied into the bpf tree, they
+   are queued up for stable submission under:
+
+     http://patchwork.ozlabs.org/bundle/bpf/stable/?state=*
+
+   They will be on hold there at minimum until the related commit made its
+   way into the mainline kernel tree.
+
+   After having been under broader exposure, the queued patches will be
+   submitted by the BPF maintainers to the stable maintainers.
+
+Testing patches:
+----------------
+
+Q: Which BPF kernel selftests version should I run my kernel against?
+
+A: If you run a kernel xyz, then always run the BPF kernel selftests from
+   that kernel xyz as well. Do not expect that the BPF selftest from the
+   latest mainline tree will pass all the time.
+
+   In particular, test_bpf.c and test_verifier.c have a large number of
+   test cases and are constantly updated with new BPF test sequences, or
+   existing ones are adapted to verifier changes e.g. due to verifier
+   becoming smarter and being able to better track certain things.
+
+LLVM:
+-----
+
+Q: Where do I find LLVM with BPF support?
+
+A: The BPF back end for LLVM is upstream in LLVM since version 3.7.1.
+
+   All major distributions these days ship LLVM with BPF back end enabled,
+   so for the majority of use-cases it is not required to compile LLVM by
+   hand anymore, just install the distribution provided package.
+
+   LLVM's static compiler lists the supported targets through 'llc --version',
+   make sure BPF targets are listed. Example:
+
+     $ llc --version
+     LLVM (http://llvm.org/):
+       LLVM version 6.0.0svn
+       Optimized build.
+       Default target: x86_64-unknown-linux-gnu
+       Host CPU: skylake
+
+       Registered Targets:
+         bpf    - BPF (host endian)
+         bpfeb  - BPF (big endian)
+         bpfel  - BPF (little endian)
+         x86    - 32-bit X86: Pentium-Pro and above
+         x86-64 - 64-bit X86: EM64T and AMD64
+
+   For developers in order to utilize the latest features added to LLVM's
+   BPF back end, it is advisable to run the latest LLVM releases. Support
+   for new BPF kernel features such as additions to the BPF instruction
+   set are often developed together.
+
+   All LLVM releases can be found at: http://releases.llvm.org/
+
+Q: Got it, so how do I build LLVM manually anyway?
+
+A: You need cmake and gcc-c++ as build requisites for LLVM. Once you have
+   that set up, proceed with building the latest LLVM and clang version
+   from the git repositories:
+
+     $ git clone http://llvm.org/git/llvm.git
+     $ cd llvm/tools
+     $ git clone --depth 1 http://llvm.org/git/clang.git
+     $ cd ..; mkdir build; cd build
+     $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
+                -DBUILD_SHARED_LIBS=OFF           \
+                -DCMAKE_BUILD_TYPE=Release        \
+                -DLLVM_BUILD_RUNTIME=OFF
+     $ make -j $(getconf _NPROCESSORS_ONLN)
+
+   The built binaries can then be found in the build/bin/ directory, where
+   you can point the PATH variable to.
+
+Q: Should I notify BPF kernel maintainers about issues in LLVM's BPF code
+   generation back end or about LLVM generated code that the verifier
+   refuses to accept?
+
+A: Yes, please do! LLVM's BPF back end is a key piece of the whole BPF
+   infrastructure and it ties deeply into verification of programs from the
+   kernel side. Therefore, any issues on either side need to be investigated
+   and fixed whenever necessary.
+
+   Therefore, please make sure to bring them up at netdev kernel mailing
+   list and Cc BPF maintainers for LLVM and kernel bits:
+
+     Yonghong Song <yhs@fb.com>
+     Alexei Starovoitov <ast@kernel.org>
+     Daniel Borkmann <daniel@iogearbox.net>
+
+   LLVM also has an issue tracker where BPF related bugs can be found:
+
+     https://bugs.llvm.org/buglist.cgi?quicksearch=bpf
+
+   However, it is better to reach out through mailing lists with having
+   maintainers in Cc.
+
+Q: I have added a new BPF instruction to the kernel, how can I integrate
+   it into LLVM?
+
+A: LLVM has a -mcpu selector for the BPF back end in order to allow the
+   selection of BPF instruction set extensions. By default the 'generic'
+   processor target is used, which is the base instruction set (v1) of BPF.
+
+   LLVM has an option to select -mcpu=probe where it will probe the host
+   kernel for supported BPF instruction set extensions and selects the
+   optimal set automatically.
+
+   For cross-compilation, a specific version can be select manually as well.
+
+     $ llc -march bpf -mcpu=help
+     Available CPUs for this target:
+
+       generic - Select the generic processor.
+       probe   - Select the probe processor.
+       v1      - Select the v1 processor.
+       v2      - Select the v2 processor.
+     [...]
+
+   Newly added BPF instructions to the Linux kernel need to follow the same
+   scheme, bump the instruction set version and implement probing for the
+   extensions such that -mcpu=probe users can benefit from the optimization
+   transparently when upgrading their kernels.
+
+   If you are unable to implement support for the newly added BPF instruction
+   please reach out to BPF developers for help.
+
+   By the way, the BPF kernel selftests run with -mcpu=probe for better
+   test coverage.
+
+Happy BPF hacking!
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 9a734d808aa7..b7336b9d6a3c 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -2,7 +2,10 @@
 
 Required properties:
 
-- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0"
+- compatible: should be one of
+	"brcm,bcm7445-switch-v4.0"
+	"brcm,bcm7278-switch-v4.0"
+	"brcm,bcm7278-switch-v4.8"
 - reg: addresses and length of the register sets for the device, must be 6
   pairs of register addresses and lengths
 - interrupts: interrupts for the devices, must be two interrupts
diff --git a/Documentation/devicetree/bindings/net/can/can-transceiver.txt b/Documentation/devicetree/bindings/net/can/can-transceiver.txt
new file mode 100644
index 000000000000..0011f53ff159
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/can-transceiver.txt
@@ -0,0 +1,24 @@
+Generic CAN transceiver Device Tree binding
+------------------------------
+
+CAN transceiver typically limits the max speed in standard CAN and CAN FD
+modes. Typically these limitations are static and the transceivers themselves
+provide no way to detect this limitation at runtime. For this situation,
+the "can-transceiver" node can be used.
+
+Required Properties:
+ max-bitrate:	a positive non 0 value that determines the max
+		speed that CAN/CAN-FD can run. Any other value
+		will be ignored.
+
+Examples:
+
+Based on Texas Instrument's TCAN1042HGV CAN Transceiver
+
+m_can0 {
+	....
+	can-transceiver {
+		max-bitrate = <5000000>;
+	};
+	...
+};
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index 56d6cc336e1c..bfc0c433654f 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -18,6 +18,12 @@ Optional properties:
 
 - xceiver-supply: Regulator that powers the CAN transceiver
 
+- big-endian: This means the registers of FlexCAN controller are big endian.
+              This is optional property.i.e. if this property is not present in
+              device tree node then controller is assumed to be little endian.
+              if this property is present then controller is assumed to be big
+              endian.
+
 Example:
 
 	can@1c000 {
diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt
index 63e90421d029..ed614383af9c 100644
--- a/Documentation/devicetree/bindings/net/can/m_can.txt
+++ b/Documentation/devicetree/bindings/net/can/m_can.txt
@@ -43,6 +43,11 @@ Required properties:
 			  Please refer to 2.4.1 Message RAM Configuration in
 			  Bosch M_CAN user manual for details.
 
+Optional Subnode:
+- can-transceiver	: Can-transceiver subnode describing maximum speed
+			  that can be used for CAN/CAN-FD modes. See
+			  Documentation/devicetree/bindings/net/can/can-transceiver.txt
+			  for details.
 Example:
 SoC dtsi:
 m_can1: can@20e8000 {
@@ -63,4 +68,8 @@ Board dts:
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_m_can1>;
 	status = "enabled";
+
+	can-transceiver {
+		max-bitrate = <5000000>;
+	};
 };
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 06bb7cc334c8..94a7f33ac5e9 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -2,7 +2,9 @@ Renesas R-Car CAN controller Device Tree Bindings
 -------------------------------------------------
 
 Required properties:
-- compatible: "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
+- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
+	      "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
+	      "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
 	      "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
 	      "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
 	      "renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC.
@@ -12,7 +14,8 @@ Required properties:
 	      "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
 	      "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
 	      "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
-	      "renesas,rcar-gen2-can" for a generic R-Car Gen2 compatible device.
+	      "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
+	      compatible device.
 	      "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
 	      When compatible with the generic version, nodes must list the
 	      SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt b/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
new file mode 100644
index 000000000000..6c559981d110
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
@@ -0,0 +1,92 @@
+Cortina Systems Gemini Ethernet Controller
+==========================================
+
+This ethernet controller is found in the Gemini SoC family:
+StorLink SL3512 and SL3516, also known as Cortina Systems
+CS3512 and CS3516.
+
+Required properties:
+- compatible: must be "cortina,gemini-ethernet"
+- reg: must contain the global registers and the V-bit and A-bit
+  memory areas, in total three register sets.
+- syscon: a phandle to the system controller
+- #address-cells: must be specified, must be <1>
+- #size-cells: must be specified, must be <1>
+- ranges: should be state like this giving a 1:1 address translation
+  for the subnodes
+
+The subnodes represents the two ethernet ports in this device.
+They are not independent of each other since they share resources
+in the parent node, and are thus children.
+
+Required subnodes:
+- port0: contains the resources for ethernet port 0
+- port1: contains the resources for ethernet port 1
+
+Required subnode properties:
+- compatible: must be "cortina,gemini-ethernet-port"
+- reg: must contain two register areas: the DMA/TOE memory and
+  the GMAC memory area of the port
+- interrupts: should contain the interrupt line of the port.
+  this is nominally a level interrupt active high.
+- resets: this must provide an SoC-integrated reset line for
+  the port.
+- clocks: this should contain a handle to the PCLK clock for
+  clocking the silicon in this port
+- clock-names: must be "PCLK"
+
+Optional subnode properties:
+- phy-mode: see ethernet.txt
+- phy-handle: see ethernet.txt
+
+Example:
+
+mdio-bus {
+	(...)
+	phy0: ethernet-phy@1 {
+		reg = <1>;
+		device_type = "ethernet-phy";
+	};
+	phy1: ethernet-phy@3 {
+		reg = <3>;
+		device_type = "ethernet-phy";
+	};
+};
+
+
+ethernet@60000000 {
+	compatible = "cortina,gemini-ethernet";
+	reg = <0x60000000 0x4000>, /* Global registers, queue */
+	      <0x60004000 0x2000>, /* V-bit */
+	      <0x60006000 0x2000>; /* A-bit */
+	syscon = <&syscon>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges;
+
+	gmac0: ethernet-port@0 {
+		compatible = "cortina,gemini-ethernet-port";
+		reg = <0x60008000 0x2000>, /* Port 0 DMA/TOE */
+		      <0x6000a000 0x2000>; /* Port 0 GMAC */
+		interrupt-parent = <&intcon>;
+		interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+		resets = <&syscon GEMINI_RESET_GMAC0>;
+		clocks = <&syscon GEMINI_CLK_GATE_GMAC0>;
+		clock-names = "PCLK";
+		phy-mode = "rgmii";
+		phy-handle = <&phy0>;
+	};
+
+	gmac1: ethernet-port@1 {
+		compatible = "cortina,gemini-ethernet-port";
+		reg = <0x6000c000 0x2000>, /* Port 1 DMA/TOE */
+		      <0x6000e000 0x2000>; /* Port 1 GMAC */
+		interrupt-parent = <&intcon>;
+		interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+		resets = <&syscon GEMINI_RESET_GMAC1>;
+		clocks = <&syscon GEMINI_CLK_GATE_GMAC1>;
+		clock-names = "PCLK";
+		phy-mode = "rgmii";
+		phy-handle = <&phy1>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index f0dc94409107..2d41fb96ce0a 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -59,7 +59,7 @@ ethernet@83fec000 {
 	reg = <0x83fec000 0x4000>;
 	interrupts = <87>;
 	phy-mode = "mii";
-	phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
+	phy-reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>; /* GPIO2_14 */
 	local-mac-address = [00 04 9F 01 1B B9];
 	phy-supply = <&reg_fec_supply>;
 };
@@ -71,7 +71,7 @@ ethernet@83fec000 {
 	reg = <0x83fec000 0x4000>;
 	interrupts = <87>;
 	phy-mode = "mii";
-	phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
+	phy-reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>; /* GPIO2_14 */
 	local-mac-address = [00 04 9F 01 1B B9];
 	phy-supply = <&reg_fec_supply>;
 	phy-handle = <&ethphy>;
diff --git a/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt b/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
index dea5124cdc52..d24172cc6d32 100644
--- a/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
+++ b/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
@@ -1,7 +1,7 @@
 * ADF7242 IEEE 802.15.4 *
 
 Required properties:
-  - compatible:		should be "adi,adf7242"
+  - compatible:		should be "adi,adf7242", "adi,adf7241"
   - spi-max-frequency:	maximal bus speed (12.5 MHz)
   - reg:		the chipselect index
   - interrupts:		the interrupt generated by the device via pin IRQ1.
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 214eaa9a6683..53c13ee384a4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -28,7 +28,7 @@ Required properties:
 - mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup
 	which is required for those SoCs equipped with SGMII such as MT7622 SoC.
 - mediatek,pctl: phandle to the syscon node that handles the ports slew rate
-	and driver current
+	and driver current: only for MT2701 and MT7623 SoC
 
 Optional properties:
 - interrupt-parent: Should be the phandle for the interrupt controller
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 77d0b2a61ffa..d2169a56f5e3 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -53,6 +53,14 @@ Optional Properties:
   to ensure the integrated PHY is used. The absence of this property indicates
   the muxers should be configured so that the external PHY is used.
 
+- reset-gpios: The GPIO phandle and specifier for the PHY reset signal.
+
+- reset-assert-us: Delay after the reset was asserted in microseconds.
+  If this property is missing the delay will be skipped.
+
+- reset-deassert-us: Delay after the reset was deasserted in microseconds.
+  If this property is missing the delay will be skipped.
+
 Example:
 
 ethernet-phy@0 {
@@ -60,4 +68,8 @@ ethernet-phy@0 {
 	interrupt-parent = <&PIC>;
 	interrupts = <35 IRQ_TYPE_EDGE_RISING>;
 	reg = <0>;
+
+	reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+	reset-assert-us = <1000>;
+	reset-deassert-us = <2000>;
 };
diff --git a/Documentation/devicetree/bindings/net/sff,sfp.txt b/Documentation/devicetree/bindings/net/sff,sfp.txt
index 60e970ce10ee..f1c441bedf68 100644
--- a/Documentation/devicetree/bindings/net/sff,sfp.txt
+++ b/Documentation/devicetree/bindings/net/sff,sfp.txt
@@ -3,7 +3,9 @@ Transceiver
 
 Required properties:
 
-- compatible : must be "sff,sfp"
+- compatible : must be one of
+  "sff,sfp" for SFP modules
+  "sff,sff" for soldered down SFF modules
 
 Optional Properties:
 
@@ -11,7 +13,8 @@ Optional Properties:
   interface
 
 - mod-def0-gpios : GPIO phandle and a specifier of the MOD-DEF0 (AKA Mod_ABS)
-  module presence input gpio signal, active (module absent) high
+  module presence input gpio signal, active (module absent) high. Must
+  not be present for SFF modules
 
 - los-gpios : GPIO phandle and a specifier of the Receiver Loss of Signal
   Indication input gpio signal, active (signal lost) high
@@ -24,10 +27,11 @@ Optional Properties:
 
 - rate-select0-gpios : GPIO phandle and a specifier of the Rx Signaling Rate
   Select (AKA RS0) output gpio signal, low: low Rx rate, high: high Rx rate
+  Must not be present for SFF modules
 
 - rate-select1-gpios : GPIO phandle and a specifier of the Tx Signaling Rate
   Select (AKA RS1) output gpio signal (SFP+ only), low: low Tx rate, high:
-  high Tx rate
+  high Tx rate. Must not be present for SFF modules
 
 Example #1: Direct serdes to SFP connection
 
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
new file mode 100644
index 000000000000..270ea4efff13
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
@@ -0,0 +1,48 @@
+* Socionext AVE ethernet controller
+
+This describes the devicetree bindings for AVE ethernet controller
+implemented on Socionext UniPhier SoCs.
+
+Required properties:
+ - compatible: Should be
+	- "socionext,uniphier-pro4-ave4" : for Pro4 SoC
+	- "socionext,uniphier-pxs2-ave4" : for PXs2 SoC
+	- "socionext,uniphier-ld11-ave4" : for LD11 SoC
+	- "socionext,uniphier-ld20-ave4" : for LD20 SoC
+ - reg: Address where registers are mapped and size of region.
+ - interrupts: Should contain the MAC interrupt.
+ - phy-mode: See ethernet.txt in the same directory. Allow to choose
+	"rgmii", "rmii", or "mii" according to the PHY.
+ - phy-handle: Should point to the external phy device.
+	See ethernet.txt file in the same directory.
+ - clocks: A phandle to the clock for the MAC.
+
+Optional properties:
+ - resets: A phandle to the reset control for the MAC.
+ - local-mac-address: See ethernet.txt in the same directory.
+
+Required subnode:
+ - mdio: A container for child nodes representing phy nodes.
+         See phy.txt in the same directory.
+
+Example:
+
+	ether: ethernet@65000000 {
+		compatible = "socionext,uniphier-ld20-ave4";
+		reg = <0x65000000 0x8500>;
+		interrupts = <0 66 4>;
+		phy-mode = "rgmii";
+		phy-handle = <&ethphy>;
+		clocks = <&sys_clk 6>;
+		resets = <&sys_rst 6>;
+		local-mac-address = [00 00 00 00 00 00];
+
+		mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ethphy: ethphy@1 {
+				reg = <1>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
new file mode 100644
index 000000000000..0cff94fb0433
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
@@ -0,0 +1,53 @@
+* Socionext NetSec Ethernet Controller IP
+
+Required properties:
+- compatible: Should be "socionext,synquacer-netsec"
+- reg: Address and length of the control register area, followed by the
+       address and length of the EEPROM holding the MAC address and
+       microengine firmware
+- interrupts: Should contain ethernet controller interrupt
+- clocks: phandle to the PHY reference clock
+- clock-names: Should be "phy_ref_clk"
+- phy-mode: See ethernet.txt file in the same directory
+- phy-handle: See ethernet.txt in the same directory.
+
+- mdio device tree subnode: When the Netsec has a phy connected to its local
+		mdio, there must be device tree subnode with the following
+		required properties:
+
+	- #address-cells: Must be <1>.
+	- #size-cells: Must be <0>.
+
+	For each phy on the mdio bus, there must be a node with the following
+	fields:
+	- compatible: Refer to phy.txt
+	- reg: phy id used to communicate to phy.
+
+Optional properties: (See ethernet.txt file in the same directory)
+- dma-coherent: Boolean property, must only be present if memory
+	accesses performed by the device are cache coherent.
+- local-mac-address: See ethernet.txt in the same directory.
+- mac-address: See ethernet.txt in the same directory.
+- max-speed: See ethernet.txt in the same directory.
+- max-frame-size: See ethernet.txt in the same directory.
+
+Example:
+	eth0: ethernet@522d0000 {
+		compatible = "socionext,synquacer-netsec";
+		reg = <0 0x522d0000 0x0 0x10000>, <0 0x10000000 0x0 0x10000>;
+		interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clk_netsec>;
+		clock-names = "phy_ref_clk";
+		phy-mode = "rgmii";
+		max-speed = <1000>;
+		max-frame-size = <9000>;
+		phy-handle = <&phy1>;
+
+		mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			phy1: ethernet-phy@1 {
+				compatible = "ethernet-phy-ieee802.3-c22";
+				reg = <1>;
+			};
+		};
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
index 1649c1f66b07..6d03ff8c7068 100644
--- a/Documentation/devicetree/bindings/net/ti,wilink-st.txt
+++ b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
@@ -1,10 +1,18 @@
-TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
+Texas Instruments Bluetooth Chips
+---------------------------------
+
+This documents the binding structure and common properties for serial
+attached TI Bluetooth devices. The following chips are included in this
+binding:
+
+* TI CC256x Bluetooth devices
+* TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
 
 TI WiLink devices have a UART interface for providing Bluetooth, FM radio,
 and GPS over what's called "shared transport". The shared transport is
 standard BT HCI protocol with additional channels for the other functions.
 
-These devices also have a separate WiFi interface as described in
+TI WiLink devices also have a separate WiFi interface as described in
 wireless/ti,wlcore.txt.
 
 This bindings follows the UART slave device binding in
@@ -12,6 +20,7 @@ This bindings follows the UART slave device binding in
 
 Required properties:
  - compatible: should be one of the following:
+    "ti,cc2560"
     "ti,wl1271-st"
     "ti,wl1273-st"
     "ti,wl1281-st"
@@ -32,6 +41,9 @@ Optional properties:
    See ../clocks/clock-bindings.txt for details.
  - clock-names : Must include the following entry:
    "ext_clock" (External clock provided to the TI combo chip).
+ - nvmem-cells: phandle to nvmem data cell that contains a 6 byte BD address
+   with the most significant byte first (big-endian).
+ - nvmem-cell-names: "bd-address" (required when nvmem-cells is specified)
 
 Example:
 
@@ -43,5 +55,7 @@ Example:
 		enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
 		clocks = <&clk32k_wl18xx>;
 		clock-names = "ext_clock";
+		nvmem-cells = <&bd_address>;
+		nvmem-cell-names = "bd-address";
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
new file mode 100644
index 000000000000..0c17a0ec9b7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -0,0 +1,32 @@
+* MediaTek mt76xx devices
+
+This node provides properties for configuring the MediaTek mt76xx wireless
+device. The node is expected to be specified as a child node of the PCI
+controller to which the wireless chip is connected.
+
+Optional properties:
+
+- mac-address: See ethernet.txt in the parent directory
+- local-mac-address: See ethernet.txt in the parent directory
+- ieee80211-freq-limit: See ieee80211.txt
+- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+
+Optional nodes:
+- led: Properties for a connected LED
+  Optional properties:
+    - led-sources: See Documentation/devicetree/bindings/leds/common.txt
+
+&pcie {
+	pcie0 {
+		wifi@0,0 {
+			compatible = "mediatek,mt76";
+			reg = <0x0000 0 0 0 0>;
+			ieee80211-freq-limit = <5000000 6000000>;
+			mediatek,mtd-eeprom = <&factory 0x8000>;
+
+			led {
+				led-sources = <2>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 74d7f0af209c..3d2a031217da 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -41,6 +41,9 @@ Optional properties:
 - qcom,msi_addr: MSI interrupt address.
 - qcom,msi_base: Base value to add before writing MSI data into
 		MSI address register.
+- qcom,ath10k-calibration-variant: string to search for in the board-2.bin
+				   variant list with the same bus and device
+				   specific ids
 - qcom,ath10k-calibration-data : calibration data + board specific data
 				 as an array, the length can vary between
 				 hw versions.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
index 4ccb2cd5df94..d096cf461d81 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
@@ -195,4 +195,4 @@ External interrupts:
 
 fsl,mpc5200-mscan nodes
 -----------------------
-See file can.txt in this directory.
+See file Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 918972babcd8..f4a32463ca48 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -30,6 +30,12 @@ o fail_mmc_request
   injects MMC data errors on devices permitted by setting
   debugfs entries under /sys/kernel/debug/mmc0/fail_mmc_request
 
+o fail_function
+
+  injects error return on specific functions, which are marked by
+  ALLOW_ERROR_INJECTION() macro, by setting debugfs entries
+  under /sys/kernel/debug/fail_function. No boot option supported.
+
 Configure fault-injection capabilities behavior
 -----------------------------------------------
 
@@ -123,6 +129,29 @@ configuration of fault-injection capabilities.
 	default is 'N', setting it to 'Y' will disable failure injections
 	when dealing with private (address space) futexes.
 
+- /sys/kernel/debug/fail_function/inject:
+
+	Format: { 'function-name' | '!function-name' | '' }
+	specifies the target function of error injection by name.
+	If the function name leads '!' prefix, given function is
+	removed from injection list. If nothing specified ('')
+	injection list is cleared.
+
+- /sys/kernel/debug/fail_function/injectable:
+
+	(read only) shows error injectable functions and what type of
+	error values can be specified. The error type will be one of
+	below;
+	- NULL:	retval must be 0.
+	- ERRNO: retval must be -1 to -MAX_ERRNO (-4096).
+	- ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096).
+
+- /sys/kernel/debug/fail_function/<functiuon-name>/retval:
+
+	specifies the "error" return value to inject to the given
+	function for given function. This will be created when
+	user specifies new injection entry.
+
 o Boot option
 
 In order to inject faults while debugfs is not available (early boot time),
@@ -268,6 +297,45 @@ trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
 echo "Injecting errors into the module $module... (interrupt to stop)"
 sleep 1000000
 
+------------------------------------------------------------------------------
+
+o Inject open_ctree error while btrfs mount
+
+#!/bin/bash
+
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir -p tmpmnt
+
+FAILTYPE=fail_function
+FAILFUNC=open_ctree
+echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject
+echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
+echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 100 > /sys/kernel/debug/$FAILTYPE/probability
+echo 0 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 1 > /sys/kernel/debug/$FAILTYPE/verbose
+
+mount -t btrfs $DEVICE tmpmnt
+if [ $? -ne 0 ]
+then
+	echo "SUCCESS!"
+else
+	echo "FAILED!"
+	umount tmpmnt
+fi
+
+echo > /sys/kernel/debug/$FAILTYPE/inject
+
+rmdir tmpmnt
+losetup -d $DEVICE
+rm testfile.img
+
+
 Tool to run command with failslab or fail_page_alloc
 ----------------------------------------------------
 In order to make it easier to accomplish the tasks mentioned above, we can use
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 7a79b3587dd3..2b89d91b376f 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -36,8 +36,6 @@ bonding.txt
 	- Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux.
 bridge.txt
 	- where to get user space programs for ethernet bridging with Linux.
-can.txt
-	- documentation on CAN protocol family.
 cdc_mbim.txt
 	- 3G/LTE USB modem (Mobile Broadband Interface Model)
 checksum-offloads.txt
@@ -228,6 +226,8 @@ x25.txt
 	- general info on X.25 development.
 x25-iface.txt
 	- description of the X.25 Packet Layer to LAPB device interface.
+xfrm_device.txt
+	- description of XFRM offload API
 xfrm_proc.txt
 	- description of the statistics package for XFRM.
 xfrm_sync.txt
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
index a342b2cc3dc6..245fb6c0ab6f 100644
--- a/Documentation/networking/batman-adv.rst
+++ b/Documentation/networking/batman-adv.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==========
 batman-adv
 ==========
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
new file mode 100644
index 000000000000..d23c51abf8c6
--- /dev/null
+++ b/Documentation/networking/can.rst
@@ -0,0 +1,1437 @@
+===================================
+SocketCAN - Controller Area Network
+===================================
+
+Overview / What is SocketCAN
+============================
+
+The socketcan package is an implementation of CAN protocols
+(Controller Area Network) for Linux.  CAN is a networking technology
+which has widespread use in automation, embedded devices, and
+automotive fields.  While there have been other CAN implementations
+for Linux based on character devices, SocketCAN uses the Berkeley
+socket API, the Linux network stack and implements the CAN device
+drivers as network interfaces.  The CAN socket API has been designed
+as similar as possible to the TCP/IP protocols to allow programmers,
+familiar with network programming, to easily learn how to use CAN
+sockets.
+
+
+.. _socketcan-motivation:
+
+Motivation / Why Using the Socket API
+=====================================
+
+There have been CAN implementations for Linux before SocketCAN so the
+question arises, why we have started another project.  Most existing
+implementations come as a device driver for some CAN hardware, they
+are based on character devices and provide comparatively little
+functionality.  Usually, there is only a hardware-specific device
+driver which provides a character device interface to send and
+receive raw CAN frames, directly to/from the controller hardware.
+Queueing of frames and higher-level transport protocols like ISO-TP
+have to be implemented in user space applications.  Also, most
+character-device implementations support only one single process to
+open the device at a time, similar to a serial interface.  Exchanging
+the CAN controller requires employment of another device driver and
+often the need for adaption of large parts of the application to the
+new driver's API.
+
+SocketCAN was designed to overcome all of these limitations.  A new
+protocol family has been implemented which provides a socket interface
+to user space applications and which builds upon the Linux network
+layer, enabling use all of the provided queueing functionality.  A device
+driver for CAN controller hardware registers itself with the Linux
+network layer as a network device, so that CAN frames from the
+controller can be passed up to the network layer and on to the CAN
+protocol family module and also vice-versa.  Also, the protocol family
+module provides an API for transport protocol modules to register, so
+that any number of transport protocols can be loaded or unloaded
+dynamically.  In fact, the can core module alone does not provide any
+protocol and cannot be used without loading at least one additional
+protocol module.  Multiple sockets can be opened at the same time,
+on different or the same protocol module and they can listen/send
+frames on different or the same CAN IDs.  Several sockets listening on
+the same interface for frames with the same CAN ID are all passed the
+same received matching CAN frames.  An application wishing to
+communicate using a specific transport protocol, e.g. ISO-TP, just
+selects that protocol when opening the socket, and then can read and
+write application data byte streams, without having to deal with
+CAN-IDs, frames, etc.
+
+Similar functionality visible from user-space could be provided by a
+character device, too, but this would lead to a technically inelegant
+solution for a couple of reasons:
+
+* **Intricate usage:**  Instead of passing a protocol argument to
+  socket(2) and using bind(2) to select a CAN interface and CAN ID, an
+  application would have to do all these operations using ioctl(2)s.
+
+* **Code duplication:**  A character device cannot make use of the Linux
+  network queueing code, so all that code would have to be duplicated
+  for CAN networking.
+
+* **Abstraction:**  In most existing character-device implementations, the
+  hardware-specific device driver for a CAN controller directly
+  provides the character device for the application to work with.
+  This is at least very unusual in Unix systems for both, char and
+  block devices.  For example you don't have a character device for a
+  certain UART of a serial interface, a certain sound chip in your
+  computer, a SCSI or IDE controller providing access to your hard
+  disk or tape streamer device.  Instead, you have abstraction layers
+  which provide a unified character or block device interface to the
+  application on the one hand, and a interface for hardware-specific
+  device drivers on the other hand.  These abstractions are provided
+  by subsystems like the tty layer, the audio subsystem or the SCSI
+  and IDE subsystems for the devices mentioned above.
+
+  The easiest way to implement a CAN device driver is as a character
+  device without such a (complete) abstraction layer, as is done by most
+  existing drivers.  The right way, however, would be to add such a
+  layer with all the functionality like registering for certain CAN
+  IDs, supporting several open file descriptors and (de)multiplexing
+  CAN frames between them, (sophisticated) queueing of CAN frames, and
+  providing an API for device drivers to register with.  However, then
+  it would be no more difficult, or may be even easier, to use the
+  networking framework provided by the Linux kernel, and this is what
+  SocketCAN does.
+
+The use of the networking framework of the Linux kernel is just the
+natural and most appropriate way to implement CAN for Linux.
+
+
+.. _socketcan-concept:
+
+SocketCAN Concept
+=================
+
+As described in :ref:`socketcan-motivation` the main goal of SocketCAN is to
+provide a socket interface to user space applications which builds
+upon the Linux network layer. In contrast to the commonly known
+TCP/IP and ethernet networking, the CAN bus is a broadcast-only(!)
+medium that has no MAC-layer addressing like ethernet. The CAN-identifier
+(can_id) is used for arbitration on the CAN-bus. Therefore the CAN-IDs
+have to be chosen uniquely on the bus. When designing a CAN-ECU
+network the CAN-IDs are mapped to be sent by a specific ECU.
+For this reason a CAN-ID can be treated best as a kind of source address.
+
+
+.. _socketcan-receive-lists:
+
+Receive Lists
+-------------
+
+The network transparent access of multiple applications leads to the
+problem that different applications may be interested in the same
+CAN-IDs from the same CAN network interface. The SocketCAN core
+module - which implements the protocol family CAN - provides several
+high efficient receive lists for this reason. If e.g. a user space
+application opens a CAN RAW socket, the raw protocol module itself
+requests the (range of) CAN-IDs from the SocketCAN core that are
+requested by the user. The subscription and unsubscription of
+CAN-IDs can be done for specific CAN interfaces or for all(!) known
+CAN interfaces with the can_rx_(un)register() functions provided to
+CAN protocol modules by the SocketCAN core (see :ref:`socketcan-core-module`).
+To optimize the CPU usage at runtime the receive lists are split up
+into several specific lists per device that match the requested
+filter complexity for a given use-case.
+
+
+.. _socketcan-local-loopback1:
+
+Local Loopback of Sent Frames
+-----------------------------
+
+As known from other networking concepts the data exchanging
+applications may run on the same or different nodes without any
+change (except for the according addressing information):
+
+.. code::
+
+	 ___   ___   ___                   _______   ___
+	| _ | | _ | | _ |                 | _   _ | | _ |
+	||A|| ||B|| ||C||                 ||A| |B|| ||C||
+	|___| |___| |___|                 |_______| |___|
+	  |     |     |                       |       |
+	-----------------(1)- CAN bus -(2)---------------
+
+To ensure that application A receives the same information in the
+example (2) as it would receive in example (1) there is need for
+some kind of local loopback of the sent CAN frames on the appropriate
+node.
+
+The Linux network devices (by default) just can handle the
+transmission and reception of media dependent frames. Due to the
+arbitration on the CAN bus the transmission of a low prio CAN-ID
+may be delayed by the reception of a high prio CAN frame. To
+reflect the correct [*]_ traffic on the node the loopback of the sent
+data has to be performed right after a successful transmission. If
+the CAN network interface is not capable of performing the loopback for
+some reason the SocketCAN core can do this task as a fallback solution.
+See :ref:`socketcan-local-loopback1` for details (recommended).
+
+The loopback functionality is enabled by default to reflect standard
+networking behaviour for CAN applications. Due to some requests from
+the RT-SocketCAN group the loopback optionally may be disabled for each
+separate socket. See sockopts from the CAN RAW sockets in :ref:`socketcan-raw-sockets`.
+
+.. [*] you really like to have this when you're running analyser
+       tools like 'candump' or 'cansniffer' on the (same) node.
+
+
+.. _socketcan-network-problem-notifications:
+
+Network Problem Notifications
+-----------------------------
+
+The use of the CAN bus may lead to several problems on the physical
+and media access control layer. Detecting and logging of these lower
+layer problems is a vital requirement for CAN users to identify
+hardware issues on the physical transceiver layer as well as
+arbitration problems and error frames caused by the different
+ECUs. The occurrence of detected errors are important for diagnosis
+and have to be logged together with the exact timestamp. For this
+reason the CAN interface driver can generate so called Error Message
+Frames that can optionally be passed to the user application in the
+same way as other CAN frames. Whenever an error on the physical layer
+or the MAC layer is detected (e.g. by the CAN controller) the driver
+creates an appropriate error message frame. Error messages frames can
+be requested by the user application using the common CAN filter
+mechanisms. Inside this filter definition the (interested) type of
+errors may be selected. The reception of error messages is disabled
+by default. The format of the CAN error message frame is briefly
+described in the Linux header file "include/uapi/linux/can/error.h".
+
+
+How to use SocketCAN
+====================
+
+Like TCP/IP, you first need to open a socket for communicating over a
+CAN network. Since SocketCAN implements a new protocol family, you
+need to pass PF_CAN as the first argument to the socket(2) system
+call. Currently, there are two CAN protocols to choose from, the raw
+socket protocol and the broadcast manager (BCM). So to open a socket,
+you would write::
+
+    s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+
+and::
+
+    s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
+
+respectively.  After the successful creation of the socket, you would
+normally use the bind(2) system call to bind the socket to a CAN
+interface (which is different from TCP/IP due to different addressing
+- see :ref:`socketcan-concept`). After binding (CAN_RAW) or connecting (CAN_BCM)
+the socket, you can read(2) and write(2) from/to the socket or use
+send(2), sendto(2), sendmsg(2) and the recv* counterpart operations
+on the socket as usual. There are also CAN specific socket options
+described below.
+
+The basic CAN frame structure and the sockaddr structure are defined
+in include/linux/can.h:
+
+.. code-block:: C
+
+    struct can_frame {
+            canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
+            __u8    can_dlc; /* frame payload length in byte (0 .. 8) */
+            __u8    __pad;   /* padding */
+            __u8    __res0;  /* reserved / padding */
+            __u8    __res1;  /* reserved / padding */
+            __u8    data[8] __attribute__((aligned(8)));
+    };
+
+The alignment of the (linear) payload data[] to a 64bit boundary
+allows the user to define their own structs and unions to easily access
+the CAN payload. There is no given byteorder on the CAN bus by
+default. A read(2) system call on a CAN_RAW socket transfers a
+struct can_frame to the user space.
+
+The sockaddr_can structure has an interface index like the
+PF_PACKET socket, that also binds to a specific interface:
+
+.. code-block:: C
+
+    struct sockaddr_can {
+            sa_family_t can_family;
+            int         can_ifindex;
+            union {
+                    /* transport protocol class address info (e.g. ISOTP) */
+                    struct { canid_t rx_id, tx_id; } tp;
+
+                    /* reserved for future CAN protocols address information */
+            } can_addr;
+    };
+
+To determine the interface index an appropriate ioctl() has to
+be used (example for CAN_RAW sockets without error checking):
+
+.. code-block:: C
+
+    int s;
+    struct sockaddr_can addr;
+    struct ifreq ifr;
+
+    s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+
+    strcpy(ifr.ifr_name, "can0" );
+    ioctl(s, SIOCGIFINDEX, &ifr);
+
+    addr.can_family = AF_CAN;
+    addr.can_ifindex = ifr.ifr_ifindex;
+
+    bind(s, (struct sockaddr *)&addr, sizeof(addr));
+
+    (..)
+
+To bind a socket to all(!) CAN interfaces the interface index must
+be 0 (zero). In this case the socket receives CAN frames from every
+enabled CAN interface. To determine the originating CAN interface
+the system call recvfrom(2) may be used instead of read(2). To send
+on a socket that is bound to 'any' interface sendto(2) is needed to
+specify the outgoing interface.
+
+Reading CAN frames from a bound CAN_RAW socket (see above) consists
+of reading a struct can_frame:
+
+.. code-block:: C
+
+    struct can_frame frame;
+
+    nbytes = read(s, &frame, sizeof(struct can_frame));
+
+    if (nbytes < 0) {
+            perror("can raw socket read");
+            return 1;
+    }
+
+    /* paranoid check ... */
+    if (nbytes < sizeof(struct can_frame)) {
+            fprintf(stderr, "read: incomplete CAN frame\n");
+            return 1;
+    }
+
+    /* do something with the received CAN frame */
+
+Writing CAN frames can be done similarly, with the write(2) system call::
+
+    nbytes = write(s, &frame, sizeof(struct can_frame));
+
+When the CAN interface is bound to 'any' existing CAN interface
+(addr.can_ifindex = 0) it is recommended to use recvfrom(2) if the
+information about the originating CAN interface is needed:
+
+.. code-block:: C
+
+    struct sockaddr_can addr;
+    struct ifreq ifr;
+    socklen_t len = sizeof(addr);
+    struct can_frame frame;
+
+    nbytes = recvfrom(s, &frame, sizeof(struct can_frame),
+                      0, (struct sockaddr*)&addr, &len);
+
+    /* get interface name of the received CAN frame */
+    ifr.ifr_ifindex = addr.can_ifindex;
+    ioctl(s, SIOCGIFNAME, &ifr);
+    printf("Received a CAN frame from interface %s", ifr.ifr_name);
+
+To write CAN frames on sockets bound to 'any' CAN interface the
+outgoing interface has to be defined certainly:
+
+.. code-block:: C
+
+    strcpy(ifr.ifr_name, "can0");
+    ioctl(s, SIOCGIFINDEX, &ifr);
+    addr.can_ifindex = ifr.ifr_ifindex;
+    addr.can_family  = AF_CAN;
+
+    nbytes = sendto(s, &frame, sizeof(struct can_frame),
+                    0, (struct sockaddr*)&addr, sizeof(addr));
+
+An accurate timestamp can be obtained with an ioctl(2) call after reading
+a message from the socket:
+
+.. code-block:: C
+
+    struct timeval tv;
+    ioctl(s, SIOCGSTAMP, &tv);
+
+The timestamp has a resolution of one microsecond and is set automatically
+at the reception of a CAN frame.
+
+Remark about CAN FD (flexible data rate) support:
+
+Generally the handling of CAN FD is very similar to the formerly described
+examples. The new CAN FD capable CAN controllers support two different
+bitrates for the arbitration phase and the payload phase of the CAN FD frame
+and up to 64 bytes of payload. This extended payload length breaks all the
+kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight
+bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g.
+the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that
+switches the socket into a mode that allows the handling of CAN FD frames
+and (legacy) CAN frames simultaneously (see :ref:`socketcan-rawfd`).
+
+The struct canfd_frame is defined in include/linux/can.h:
+
+.. code-block:: C
+
+    struct canfd_frame {
+            canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
+            __u8    len;     /* frame payload length in byte (0 .. 64) */
+            __u8    flags;   /* additional flags for CAN FD */
+            __u8    __res0;  /* reserved / padding */
+            __u8    __res1;  /* reserved / padding */
+            __u8    data[64] __attribute__((aligned(8)));
+    };
+
+The struct canfd_frame and the existing struct can_frame have the can_id,
+the payload length and the payload data at the same offset inside their
+structures. This allows to handle the different structures very similar.
+When the content of a struct can_frame is copied into a struct canfd_frame
+all structure elements can be used as-is - only the data[] becomes extended.
+
+When introducing the struct canfd_frame it turned out that the data length
+code (DLC) of the struct can_frame was used as a length information as the
+length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve
+the easy handling of the length information the canfd_frame.len element
+contains a plain length value from 0 .. 64. So both canfd_frame.len and
+can_frame.can_dlc are equal and contain a length information and no DLC.
+For details about the distinction of CAN and CAN FD capable devices and
+the mapping to the bus-relevant data length code (DLC), see :ref:`socketcan-can-fd-driver`.
+
+The length of the two CAN(FD) frame structures define the maximum transfer
+unit (MTU) of the CAN(FD) network interface and skbuff data length. Two
+definitions are specified for CAN specific MTUs in include/linux/can.h:
+
+.. code-block:: C
+
+  #define CAN_MTU   (sizeof(struct can_frame))   == 16  => 'legacy' CAN frame
+  #define CANFD_MTU (sizeof(struct canfd_frame)) == 72  => CAN FD frame
+
+
+.. _socketcan-raw-sockets:
+
+RAW Protocol Sockets with can_filters (SOCK_RAW)
+------------------------------------------------
+
+Using CAN_RAW sockets is extensively comparable to the commonly
+known access to CAN character devices. To meet the new possibilities
+provided by the multi user SocketCAN approach, some reasonable
+defaults are set at RAW socket binding time:
+
+- The filters are set to exactly one filter receiving everything
+- The socket only receives valid data frames (=> no error message frames)
+- The loopback of sent CAN frames is enabled (see :ref:`socketcan-local-loopback2`)
+- The socket does not receive its own sent frames (in loopback mode)
+
+These default settings may be changed before or after binding the socket.
+To use the referenced definitions of the socket options for CAN_RAW
+sockets, include <linux/can/raw.h>.
+
+
+.. _socketcan-rawfilter:
+
+RAW socket option CAN_RAW_FILTER
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The reception of CAN frames using CAN_RAW sockets can be controlled
+by defining 0 .. n filters with the CAN_RAW_FILTER socket option.
+
+The CAN filter structure is defined in include/linux/can.h:
+
+.. code-block:: C
+
+    struct can_filter {
+            canid_t can_id;
+            canid_t can_mask;
+    };
+
+A filter matches, when:
+
+.. code-block:: C
+
+    <received_can_id> & mask == can_id & mask
+
+which is analogous to known CAN controllers hardware filter semantics.
+The filter can be inverted in this semantic, when the CAN_INV_FILTER
+bit is set in can_id element of the can_filter structure. In
+contrast to CAN controller hardware filters the user may set 0 .. n
+receive filters for each open socket separately:
+
+.. code-block:: C
+
+    struct can_filter rfilter[2];
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = CAN_SFF_MASK;
+    rfilter[1].can_id   = 0x200;
+    rfilter[1].can_mask = 0x700;
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
+
+To disable the reception of CAN frames on the selected CAN_RAW socket:
+
+.. code-block:: C
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
+
+To set the filters to zero filters is quite obsolete as to not read
+data causes the raw socket to discard the received CAN frames. But
+having this 'send only' use-case we may remove the receive list in the
+Kernel to save a little (really a very little!) CPU usage.
+
+CAN Filter Usage Optimisation
+.............................
+
+The CAN filters are processed in per-device filter lists at CAN frame
+reception time. To reduce the number of checks that need to be performed
+while walking through the filter lists the CAN core provides an optimized
+filter handling when the filter subscription focusses on a single CAN ID.
+
+For the possible 2048 SFF CAN identifiers the identifier is used as an index
+to access the corresponding subscription list without any further checks.
+For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
+hash function to retrieve the EFF table index.
+
+To benefit from the optimized filters for single CAN identifiers the
+CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
+with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
+can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
+subscribed. E.g. in the example from above:
+
+.. code-block:: C
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = CAN_SFF_MASK;
+
+both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
+
+To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
+filter has to be defined in this way to benefit from the optimized filters:
+
+.. code-block:: C
+
+    struct can_filter rfilter[2];
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
+    rfilter[1].can_id   = 0x12345678 | CAN_EFF_FLAG;
+    rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
+
+
+RAW Socket Option CAN_RAW_ERR_FILTER
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As described in :ref:`socketcan-network-problem-notifications` the CAN interface driver can generate so
+called Error Message Frames that can optionally be passed to the user
+application in the same way as other CAN frames. The possible
+errors are divided into different error classes that may be filtered
+using the appropriate error mask. To register for every possible
+error condition CAN_ERR_MASK can be used as value for the error mask.
+The values for the error mask are defined in linux/can/error.h:
+
+.. code-block:: C
+
+    can_err_mask_t err_mask = ( CAN_ERR_TX_TIMEOUT | CAN_ERR_BUSOFF );
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_ERR_FILTER,
+               &err_mask, sizeof(err_mask));
+
+
+RAW Socket Option CAN_RAW_LOOPBACK
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To meet multi user needs the local loopback is enabled by default
+(see :ref:`socketcan-local-loopback1` for details). But in some embedded use-cases
+(e.g. when only one application uses the CAN bus) this loopback
+functionality can be disabled (separately for each socket):
+
+.. code-block:: C
+
+    int loopback = 0; /* 0 = disabled, 1 = enabled (default) */
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_LOOPBACK, &loopback, sizeof(loopback));
+
+
+RAW socket option CAN_RAW_RECV_OWN_MSGS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the local loopback is enabled, all the sent CAN frames are
+looped back to the open CAN sockets that registered for the CAN
+frames' CAN-ID on this given interface to meet the multi user
+needs. The reception of the CAN frames on the same socket that was
+sending the CAN frame is assumed to be unwanted and therefore
+disabled by default. This default behaviour may be changed on
+demand:
+
+.. code-block:: C
+
+    int recv_own_msgs = 1; /* 0 = disabled (default), 1 = enabled */
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS,
+               &recv_own_msgs, sizeof(recv_own_msgs));
+
+
+.. _socketcan-rawfd:
+
+RAW Socket Option CAN_RAW_FD_FRAMES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+CAN FD support in CAN_RAW sockets can be enabled with a new socket option
+CAN_RAW_FD_FRAMES which is off by default. When the new socket option is
+not supported by the CAN_RAW socket (e.g. on older kernels), switching the
+CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT.
+
+Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames
+and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames
+when reading from the socket:
+
+.. code-block:: C
+
+    CAN_RAW_FD_FRAMES enabled:  CAN_MTU and CANFD_MTU are allowed
+    CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default)
+
+Example:
+
+.. code-block:: C
+
+    [ remember: CANFD_MTU == sizeof(struct canfd_frame) ]
+
+    struct canfd_frame cfd;
+
+    nbytes = read(s, &cfd, CANFD_MTU);
+
+    if (nbytes == CANFD_MTU) {
+            printf("got CAN FD frame with length %d\n", cfd.len);
+            /* cfd.flags contains valid data */
+    } else if (nbytes == CAN_MTU) {
+            printf("got legacy CAN frame with length %d\n", cfd.len);
+            /* cfd.flags is undefined */
+    } else {
+            fprintf(stderr, "read: invalid CAN(FD) frame\n");
+            return 1;
+    }
+
+    /* the content can be handled independently from the received MTU size */
+
+    printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len);
+    for (i = 0; i < cfd.len; i++)
+            printf("%02X ", cfd.data[i]);
+
+When reading with size CANFD_MTU only returns CAN_MTU bytes that have
+been received from the socket a legacy CAN frame has been read into the
+provided CAN FD structure. Note that the canfd_frame.flags data field is
+not specified in the struct can_frame and therefore it is only valid in
+CANFD_MTU sized CAN FD frames.
+
+Implementation hint for new CAN applications:
+
+To build a CAN FD aware application use struct canfd_frame as basic CAN
+data structure for CAN_RAW based applications. When the application is
+executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES
+socket option returns an error: No problem. You'll get legacy CAN frames
+or CAN FD frames and can process them the same way.
+
+When sending to CAN devices make sure that the device is capable to handle
+CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
+The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
+
+
+RAW socket option CAN_RAW_JOIN_FILTERS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CAN_RAW socket can set multiple CAN identifier specific filters that
+lead to multiple filters in the af_can.c filter processing. These filters
+are indenpendent from each other which leads to logical OR'ed filters when
+applied (see :ref:`socketcan-rawfilter`).
+
+This socket option joines the given CAN filters in the way that only CAN
+frames are passed to user space that matched *all* given CAN filters. The
+semantic for the applied filters is therefore changed to a logical AND.
+
+This is useful especially when the filterset is a combination of filters
+where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
+CAN ID ranges from the incoming traffic.
+
+
+RAW Socket Returned Message Flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using recvmsg() call, the msg->msg_flags may contain following flags:
+
+MSG_DONTROUTE:
+	set when the received frame was created on the local host.
+
+MSG_CONFIRM:
+	set when the frame was sent via the socket it is received on.
+	This flag can be interpreted as a 'transmission confirmation' when the
+	CAN driver supports the echo of frames on driver level, see
+	:ref:`socketcan-local-loopback1` and :ref:`socketcan-local-loopback2`.
+	In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
+
+
+Broadcast Manager Protocol Sockets (SOCK_DGRAM)
+-----------------------------------------------
+
+The Broadcast Manager protocol provides a command based configuration
+interface to filter and send (e.g. cyclic) CAN messages in kernel space.
+
+Receive filters can be used to down sample frequent messages; detect events
+such as message contents changes, packet length changes, and do time-out
+monitoring of received messages.
+
+Periodic transmission tasks of CAN frames or a sequence of CAN frames can be
+created and modified at runtime; both the message content and the two
+possible transmit intervals can be altered.
+
+A BCM socket is not intended for sending individual CAN frames using the
+struct can_frame as known from the CAN_RAW socket. Instead a special BCM
+configuration message is defined. The basic BCM configuration message used
+to communicate with the broadcast manager and the available operations are
+defined in the linux/can/bcm.h include. The BCM message consists of a
+message header with a command ('opcode') followed by zero or more CAN frames.
+The broadcast manager sends responses to user space in the same form:
+
+.. code-block:: C
+
+    struct bcm_msg_head {
+            __u32 opcode;                   /* command */
+            __u32 flags;                    /* special flags */
+            __u32 count;                    /* run 'count' times with ival1 */
+            struct timeval ival1, ival2;    /* count and subsequent interval */
+            canid_t can_id;                 /* unique can_id for task */
+            __u32 nframes;                  /* number of can_frames following */
+            struct can_frame frames[0];
+    };
+
+The aligned payload 'frames' uses the same basic CAN frame structure defined
+at the beginning of :ref:`socketcan-rawfd` and in the include/linux/can.h include. All
+messages to the broadcast manager from user space have this structure.
+
+Note a CAN_BCM socket must be connected instead of bound after socket
+creation (example without error checking):
+
+.. code-block:: C
+
+    int s;
+    struct sockaddr_can addr;
+    struct ifreq ifr;
+
+    s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
+
+    strcpy(ifr.ifr_name, "can0");
+    ioctl(s, SIOCGIFINDEX, &ifr);
+
+    addr.can_family = AF_CAN;
+    addr.can_ifindex = ifr.ifr_ifindex;
+
+    connect(s, (struct sockaddr *)&addr, sizeof(addr));
+
+    (..)
+
+The broadcast manager socket is able to handle any number of in flight
+transmissions or receive filters concurrently. The different RX/TX jobs are
+distinguished by the unique can_id in each BCM message. However additional
+CAN_BCM sockets are recommended to communicate on multiple CAN interfaces.
+When the broadcast manager socket is bound to 'any' CAN interface (=> the
+interface index is set to zero) the configured receive filters apply to any
+CAN interface unless the sendto() syscall is used to overrule the 'any' CAN
+interface index. When using recvfrom() instead of read() to retrieve BCM
+socket messages the originating CAN interface is provided in can_ifindex.
+
+
+Broadcast Manager Operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The opcode defines the operation for the broadcast manager to carry out,
+or details the broadcast managers response to several events, including
+user requests.
+
+Transmit Operations (user space to broadcast manager):
+
+TX_SETUP:
+	Create (cyclic) transmission task.
+
+TX_DELETE:
+	Remove (cyclic) transmission task, requires only can_id.
+
+TX_READ:
+	Read properties of (cyclic) transmission task for can_id.
+
+TX_SEND:
+	Send one CAN frame.
+
+Transmit Responses (broadcast manager to user space):
+
+TX_STATUS:
+	Reply to TX_READ request (transmission task configuration).
+
+TX_EXPIRED:
+	Notification when counter finishes sending at initial interval
+	'ival1'. Requires the TX_COUNTEVT flag to be set at TX_SETUP.
+
+Receive Operations (user space to broadcast manager):
+
+RX_SETUP:
+	Create RX content filter subscription.
+
+RX_DELETE:
+	Remove RX content filter subscription, requires only can_id.
+
+RX_READ:
+	Read properties of RX content filter subscription for can_id.
+
+Receive Responses (broadcast manager to user space):
+
+RX_STATUS:
+	Reply to RX_READ request (filter task configuration).
+
+RX_TIMEOUT:
+	Cyclic message is detected to be absent (timer ival1 expired).
+
+RX_CHANGED:
+	BCM message with updated CAN frame (detected content change).
+	Sent on first message received or on receipt of revised CAN messages.
+
+
+Broadcast Manager Message Flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When sending a message to the broadcast manager the 'flags' element may
+contain the following flag definitions which influence the behaviour:
+
+SETTIMER:
+	Set the values of ival1, ival2 and count
+
+STARTTIMER:
+	Start the timer with the actual values of ival1, ival2
+	and count. Starting the timer leads simultaneously to emit a CAN frame.
+
+TX_COUNTEVT:
+	Create the message TX_EXPIRED when count expires
+
+TX_ANNOUNCE:
+	A change of data by the process is emitted immediately.
+
+TX_CP_CAN_ID:
+	Copies the can_id from the message header to each
+	subsequent frame in frames. This is intended as usage simplification. For
+	TX tasks the unique can_id from the message header may differ from the
+	can_id(s) stored for transmission in the subsequent struct can_frame(s).
+
+RX_FILTER_ID:
+	Filter by can_id alone, no frames required (nframes=0).
+
+RX_CHECK_DLC:
+	A change of the DLC leads to an RX_CHANGED.
+
+RX_NO_AUTOTIMER:
+	Prevent automatically starting the timeout monitor.
+
+RX_ANNOUNCE_RESUME:
+	If passed at RX_SETUP and a receive timeout occurred, a
+	RX_CHANGED message will be generated when the (cyclic) receive restarts.
+
+TX_RESET_MULTI_IDX:
+	Reset the index for the multiple frame transmission.
+
+RX_RTR_FRAME:
+	Send reply for RTR-request (placed in op->frames[0]).
+
+
+Broadcast Manager Transmission Timers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Periodic transmission configurations may use up to two interval timers.
+In this case the BCM sends a number of messages ('count') at an interval
+'ival1', then continuing to send at another given interval 'ival2'. When
+only one timer is needed 'count' is set to zero and only 'ival2' is used.
+When SET_TIMER and START_TIMER flag were set the timers are activated.
+The timer values can be altered at runtime when only SET_TIMER is set.
+
+
+Broadcast Manager message sequence transmission
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Up to 256 CAN frames can be transmitted in a sequence in the case of a cyclic
+TX task configuration. The number of CAN frames is provided in the 'nframes'
+element of the BCM message head. The defined number of CAN frames are added
+as array to the TX_SETUP BCM configuration message:
+
+.. code-block:: C
+
+    /* create a struct to set up a sequence of four CAN frames */
+    struct {
+            struct bcm_msg_head msg_head;
+            struct can_frame frame[4];
+    } mytxmsg;
+
+    (..)
+    mytxmsg.msg_head.nframes = 4;
+    (..)
+
+    write(s, &mytxmsg, sizeof(mytxmsg));
+
+With every transmission the index in the array of CAN frames is increased
+and set to zero at index overflow.
+
+
+Broadcast Manager Receive Filter Timers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The timer values ival1 or ival2 may be set to non-zero values at RX_SETUP.
+When the SET_TIMER flag is set the timers are enabled:
+
+ival1:
+	Send RX_TIMEOUT when a received message is not received again within
+	the given time. When START_TIMER is set at RX_SETUP the timeout detection
+	is activated directly - even without a former CAN frame reception.
+
+ival2:
+	Throttle the received message rate down to the value of ival2. This
+	is useful to reduce messages for the application when the signal inside the
+	CAN frame is stateless as state changes within the ival2 periode may get
+	lost.
+
+Broadcast Manager Multiplex Message Receive Filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To filter for content changes in multiplex message sequences an array of more
+than one CAN frames can be passed in a RX_SETUP configuration message. The
+data bytes of the first CAN frame contain the mask of relevant bits that
+have to match in the subsequent CAN frames with the received CAN frame.
+If one of the subsequent CAN frames is matching the bits in that frame data
+mark the relevant content to be compared with the previous received content.
+Up to 257 CAN frames (multiplex filter bit mask CAN frame plus 256 CAN
+filters) can be added as array to the TX_SETUP BCM configuration message:
+
+.. code-block:: C
+
+    /* usually used to clear CAN frame data[] - beware of endian problems! */
+    #define U64_DATA(p) (*(unsigned long long*)(p)->data)
+
+    struct {
+            struct bcm_msg_head msg_head;
+            struct can_frame frame[5];
+    } msg;
+
+    msg.msg_head.opcode  = RX_SETUP;
+    msg.msg_head.can_id  = 0x42;
+    msg.msg_head.flags   = 0;
+    msg.msg_head.nframes = 5;
+    U64_DATA(&msg.frame[0]) = 0xFF00000000000000ULL; /* MUX mask */
+    U64_DATA(&msg.frame[1]) = 0x01000000000000FFULL; /* data mask (MUX 0x01) */
+    U64_DATA(&msg.frame[2]) = 0x0200FFFF000000FFULL; /* data mask (MUX 0x02) */
+    U64_DATA(&msg.frame[3]) = 0x330000FFFFFF0003ULL; /* data mask (MUX 0x33) */
+    U64_DATA(&msg.frame[4]) = 0x4F07FC0FF0000000ULL; /* data mask (MUX 0x4F) */
+
+    write(s, &msg, sizeof(msg));
+
+
+Broadcast Manager CAN FD Support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The programming API of the CAN_BCM depends on struct can_frame which is
+given as array directly behind the bcm_msg_head structure. To follow this
+schema for the CAN FD frames a new flag 'CAN_FD_FRAME' in the bcm_msg_head
+flags indicates that the concatenated CAN frame structures behind the
+bcm_msg_head are defined as struct canfd_frame:
+
+.. code-block:: C
+
+    struct {
+            struct bcm_msg_head msg_head;
+            struct canfd_frame frame[5];
+    } msg;
+
+    msg.msg_head.opcode  = RX_SETUP;
+    msg.msg_head.can_id  = 0x42;
+    msg.msg_head.flags   = CAN_FD_FRAME;
+    msg.msg_head.nframes = 5;
+    (..)
+
+When using CAN FD frames for multiplex filtering the MUX mask is still
+expected in the first 64 bit of the struct canfd_frame data section.
+
+
+Connected Transport Protocols (SOCK_SEQPACKET)
+----------------------------------------------
+
+(to be written)
+
+
+Unconnected Transport Protocols (SOCK_DGRAM)
+--------------------------------------------
+
+(to be written)
+
+
+.. _socketcan-core-module:
+
+SocketCAN Core Module
+=====================
+
+The SocketCAN core module implements the protocol family
+PF_CAN. CAN protocol modules are loaded by the core module at
+runtime. The core module provides an interface for CAN protocol
+modules to subscribe needed CAN IDs (see :ref:`socketcan-receive-lists`).
+
+
+can.ko Module Params
+--------------------
+
+- **stats_timer**:
+  To calculate the SocketCAN core statistics
+  (e.g. current/maximum frames per second) this 1 second timer is
+  invoked at can.ko module start time by default. This timer can be
+  disabled by using stattimer=0 on the module commandline.
+
+- **debug**:
+  (removed since SocketCAN SVN r546)
+
+
+procfs content
+--------------
+
+As described in :ref:`socketcan-receive-lists` the SocketCAN core uses several filter
+lists to deliver received CAN frames to CAN protocol modules. These
+receive lists, their filters and the count of filter matches can be
+checked in the appropriate receive list. All entries contain the
+device and a protocol module identifier::
+
+    foo@bar:~$ cat /proc/net/can/rcvlist_all
+
+    receive list 'rx_all':
+      (vcan3: no entry)
+      (vcan2: no entry)
+      (vcan1: no entry)
+      device   can_id   can_mask  function  userdata   matches  ident
+       vcan0     000    00000000  f88e6370  f6c6f400         0  raw
+      (any: no entry)
+
+In this example an application requests any CAN traffic from vcan0::
+
+    rcvlist_all - list for unfiltered entries (no filter operations)
+    rcvlist_eff - list for single extended frame (EFF) entries
+    rcvlist_err - list for error message frames masks
+    rcvlist_fil - list for mask/value filters
+    rcvlist_inv - list for mask/value filters (inverse semantic)
+    rcvlist_sff - list for single standard frame (SFF) entries
+
+Additional procfs files in /proc/net/can::
+
+    stats       - SocketCAN core statistics (rx/tx frames, match ratios, ...)
+    reset_stats - manual statistic reset
+    version     - prints the SocketCAN core version and the ABI version
+
+
+Writing Own CAN Protocol Modules
+--------------------------------
+
+To implement a new protocol in the protocol family PF_CAN a new
+protocol has to be defined in include/linux/can.h .
+The prototypes and definitions to use the SocketCAN core can be
+accessed by including include/linux/can/core.h .
+In addition to functions that register the CAN protocol and the
+CAN device notifier chain there are functions to subscribe CAN
+frames received by CAN interfaces and to send CAN frames::
+
+    can_rx_register   - subscribe CAN frames from a specific interface
+    can_rx_unregister - unsubscribe CAN frames from a specific interface
+    can_send          - transmit a CAN frame (optional with local loopback)
+
+For details see the kerneldoc documentation in net/can/af_can.c or
+the source code of net/can/raw.c or net/can/bcm.c .
+
+
+CAN Network Drivers
+===================
+
+Writing a CAN network device driver is much easier than writing a
+CAN character device driver. Similar to other known network device
+drivers you mainly have to deal with:
+
+- TX: Put the CAN frame from the socket buffer to the CAN controller.
+- RX: Put the CAN frame from the CAN controller to the socket buffer.
+
+See e.g. at Documentation/networking/netdevices.txt . The differences
+for writing CAN network device driver are described below:
+
+
+General Settings
+----------------
+
+.. code-block:: C
+
+    dev->type  = ARPHRD_CAN; /* the netdevice hardware type */
+    dev->flags = IFF_NOARP;  /* CAN has no arp */
+
+    dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */
+
+    or alternative, when the controller supports CAN with flexible data rate:
+    dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
+
+The struct can_frame or struct canfd_frame is the payload of each socket
+buffer (skbuff) in the protocol family PF_CAN.
+
+
+.. _socketcan-local-loopback2:
+
+Local Loopback of Sent Frames
+-----------------------------
+
+As described in :ref:`socketcan-local-loopback1` the CAN network device driver should
+support a local loopback functionality similar to the local echo
+e.g. of tty devices. In this case the driver flag IFF_ECHO has to be
+set to prevent the PF_CAN core from locally echoing sent frames
+(aka loopback) as fallback solution::
+
+    dev->flags = (IFF_NOARP | IFF_ECHO);
+
+
+CAN Controller Hardware Filters
+-------------------------------
+
+To reduce the interrupt load on deep embedded systems some CAN
+controllers support the filtering of CAN IDs or ranges of CAN IDs.
+These hardware filter capabilities vary from controller to
+controller and have to be identified as not feasible in a multi-user
+networking approach. The use of the very controller specific
+hardware filters could make sense in a very dedicated use-case, as a
+filter on driver level would affect all users in the multi-user
+system. The high efficient filter sets inside the PF_CAN core allow
+to set different multiple filters for each socket separately.
+Therefore the use of hardware filters goes to the category 'handmade
+tuning on deep embedded systems'. The author is running a MPC603e
+@133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
+load without any problems ...
+
+
+The Virtual CAN Driver (vcan)
+-----------------------------
+
+Similar to the network loopback devices, vcan offers a virtual local
+CAN interface. A full qualified address on CAN consists of
+
+- a unique CAN Identifier (CAN ID)
+- the CAN bus this CAN ID is transmitted on (e.g. can0)
+
+so in common use cases more than one virtual CAN interface is needed.
+
+The virtual CAN interfaces allow the transmission and reception of CAN
+frames without real CAN controller hardware. Virtual CAN network
+devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
+When compiled as a module the virtual CAN driver module is called vcan.ko
+
+Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
+netlink interface to create vcan network devices. The creation and
+removal of vcan network devices can be managed with the ip(8) tool::
+
+  - Create a virtual CAN network interface:
+       $ ip link add type vcan
+
+  - Create a virtual CAN network interface with a specific name 'vcan42':
+       $ ip link add dev vcan42 type vcan
+
+  - Remove a (virtual CAN) network interface 'vcan42':
+       $ ip link del vcan42
+
+
+The CAN Network Device Driver Interface
+---------------------------------------
+
+The CAN network device driver interface provides a generic interface
+to setup, configure and monitor CAN network devices. The user can then
+configure the CAN device, like setting the bit-timing parameters, via
+the netlink interface using the program "ip" from the "IPROUTE2"
+utility suite. The following chapter describes briefly how to use it.
+Furthermore, the interface uses a common data structure and exports a
+set of common functions, which all real CAN network device drivers
+should use. Please have a look to the SJA1000 or MSCAN driver to
+understand how to use them. The name of the module is can-dev.ko.
+
+
+Netlink interface to set/get devices properties
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CAN device must be configured via netlink interface. The supported
+netlink message types are defined and briefly described in
+"include/linux/can/netlink.h". CAN link support for the program "ip"
+of the IPROUTE2 utility suite is available and it can be used as shown
+below:
+
+Setting CAN device properties::
+
+    $ ip link set can0 type can help
+    Usage: ip link set DEVICE type can
+        [ bitrate BITRATE [ sample-point SAMPLE-POINT] ] |
+        [ tq TQ prop-seg PROP_SEG phase-seg1 PHASE-SEG1
+          phase-seg2 PHASE-SEG2 [ sjw SJW ] ]
+
+        [ dbitrate BITRATE [ dsample-point SAMPLE-POINT] ] |
+        [ dtq TQ dprop-seg PROP_SEG dphase-seg1 PHASE-SEG1
+          dphase-seg2 PHASE-SEG2 [ dsjw SJW ] ]
+
+        [ loopback { on | off } ]
+        [ listen-only { on | off } ]
+        [ triple-sampling { on | off } ]
+        [ one-shot { on | off } ]
+        [ berr-reporting { on | off } ]
+        [ fd { on | off } ]
+        [ fd-non-iso { on | off } ]
+        [ presume-ack { on | off } ]
+
+        [ restart-ms TIME-MS ]
+        [ restart ]
+
+        Where: BITRATE       := { 1..1000000 }
+               SAMPLE-POINT  := { 0.000..0.999 }
+               TQ            := { NUMBER }
+               PROP-SEG      := { 1..8 }
+               PHASE-SEG1    := { 1..8 }
+               PHASE-SEG2    := { 1..8 }
+               SJW           := { 1..4 }
+               RESTART-MS    := { 0 | NUMBER }
+
+Display CAN device details and statistics::
+
+    $ ip -details -statistics link show can0
+    2: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 16 qdisc pfifo_fast state UP qlen 10
+      link/can
+      can <TRIPLE-SAMPLING> state ERROR-ACTIVE restart-ms 100
+      bitrate 125000 sample_point 0.875
+      tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1
+      sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
+      clock 8000000
+      re-started bus-errors arbit-lost error-warn error-pass bus-off
+      41         17457      0          41         42         41
+      RX: bytes  packets  errors  dropped overrun mcast
+      140859     17608    17457   0       0       0
+      TX: bytes  packets  errors  dropped carrier collsns
+      861        112      0       41      0       0
+
+More info to the above output:
+
+"<TRIPLE-SAMPLING>"
+	Shows the list of selected CAN controller modes: LOOPBACK,
+	LISTEN-ONLY, or TRIPLE-SAMPLING.
+
+"state ERROR-ACTIVE"
+	The current state of the CAN controller: "ERROR-ACTIVE",
+	"ERROR-WARNING", "ERROR-PASSIVE", "BUS-OFF" or "STOPPED"
+
+"restart-ms 100"
+	Automatic restart delay time. If set to a non-zero value, a
+	restart of the CAN controller will be triggered automatically
+	in case of a bus-off condition after the specified delay time
+	in milliseconds. By default it's off.
+
+"bitrate 125000 sample-point 0.875"
+	Shows the real bit-rate in bits/sec and the sample-point in the
+	range 0.000..0.999. If the calculation of bit-timing parameters
+	is enabled in the kernel (CONFIG_CAN_CALC_BITTIMING=y), the
+	bit-timing can be defined by setting the "bitrate" argument.
+	Optionally the "sample-point" can be specified. By default it's
+	0.000 assuming CIA-recommended sample-points.
+
+"tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1"
+	Shows the time quanta in ns, propagation segment, phase buffer
+	segment 1 and 2 and the synchronisation jump width in units of
+	tq. They allow to define the CAN bit-timing in a hardware
+	independent format as proposed by the Bosch CAN 2.0 spec (see
+	chapter 8 of http://www.semiconductors.bosch.de/pdf/can2spec.pdf).
+
+"sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1 clock 8000000"
+	Shows the bit-timing constants of the CAN controller, here the
+	"sja1000". The minimum and maximum values of the time segment 1
+	and 2, the synchronisation jump width in units of tq, the
+	bitrate pre-scaler and the CAN system clock frequency in Hz.
+	These constants could be used for user-defined (non-standard)
+	bit-timing calculation algorithms in user-space.
+
+"re-started bus-errors arbit-lost error-warn error-pass bus-off"
+	Shows the number of restarts, bus and arbitration lost errors,
+	and the state changes to the error-warning, error-passive and
+	bus-off state. RX overrun errors are listed in the "overrun"
+	field of the standard network statistics.
+
+Setting the CAN Bit-Timing
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CAN bit-timing parameters can always be defined in a hardware
+independent format as proposed in the Bosch CAN 2.0 specification
+specifying the arguments "tq", "prop_seg", "phase_seg1", "phase_seg2"
+and "sjw"::
+
+    $ ip link set canX type can tq 125 prop-seg 6 \
+				phase-seg1 7 phase-seg2 2 sjw 1
+
+If the kernel option CONFIG_CAN_CALC_BITTIMING is enabled, CIA
+recommended CAN bit-timing parameters will be calculated if the bit-
+rate is specified with the argument "bitrate"::
+
+    $ ip link set canX type can bitrate 125000
+
+Note that this works fine for the most common CAN controllers with
+standard bit-rates but may *fail* for exotic bit-rates or CAN system
+clock frequencies. Disabling CONFIG_CAN_CALC_BITTIMING saves some
+space and allows user-space tools to solely determine and set the
+bit-timing parameters. The CAN controller specific bit-timing
+constants can be used for that purpose. They are listed by the
+following command::
+
+    $ ip -details link show can0
+    ...
+      sja1000: clock 8000000 tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
+
+
+Starting and Stopping the CAN Network Device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A CAN network device is started or stopped as usual with the command
+"ifconfig canX up/down" or "ip link set canX up/down". Be aware that
+you *must* define proper bit-timing parameters for real CAN devices
+before you can start it to avoid error-prone default settings::
+
+    $ ip link set canX up type can bitrate 125000
+
+A device may enter the "bus-off" state if too many errors occurred on
+the CAN bus. Then no more messages are received or sent. An automatic
+bus-off recovery can be enabled by setting the "restart-ms" to a
+non-zero value, e.g.::
+
+    $ ip link set canX type can restart-ms 100
+
+Alternatively, the application may realize the "bus-off" condition
+by monitoring CAN error message frames and do a restart when
+appropriate with the command::
+
+    $ ip link set canX type can restart
+
+Note that a restart will also create a CAN error message frame (see
+also :ref:`socketcan-network-problem-notifications`).
+
+
+.. _socketcan-can-fd-driver:
+
+CAN FD (Flexible Data Rate) Driver Support
+------------------------------------------
+
+CAN FD capable CAN controllers support two different bitrates for the
+arbitration phase and the payload phase of the CAN FD frame. Therefore a
+second bit timing has to be specified in order to enable the CAN FD bitrate.
+
+Additionally CAN FD capable CAN controllers support up to 64 bytes of
+payload. The representation of this length in can_frame.can_dlc and
+canfd_frame.len for userspace applications and inside the Linux network
+layer is a plain value from 0 .. 64 instead of the CAN 'data length code'.
+The data length code was a 1:1 mapping to the payload length in the legacy
+CAN frames anyway. The payload length to the bus-relevant DLC mapping is
+only performed inside the CAN drivers, preferably with the helper
+functions can_dlc2len() and can_len2dlc().
+
+The CAN netdevice driver capabilities can be distinguished by the network
+devices maximum transfer unit (MTU)::
+
+  MTU = 16 (CAN_MTU)   => sizeof(struct can_frame)   => 'legacy' CAN device
+  MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device
+
+The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
+N.B. CAN FD capable devices can also handle and send legacy CAN frames.
+
+When configuring CAN FD capable CAN controllers an additional 'data' bitrate
+has to be set. This bitrate for the data phase of the CAN FD frame has to be
+at least the bitrate which was configured for the arbitration phase. This
+second bitrate is specified analogue to the first bitrate but the bitrate
+setting keywords for the 'data' bitrate start with 'd' e.g. dbitrate,
+dsample-point, dsjw or dtq and similar settings. When a data bitrate is set
+within the configuration process the controller option "fd on" can be
+specified to enable the CAN FD mode in the CAN controller. This controller
+option also switches the device MTU to 72 (CANFD_MTU).
+
+The first CAN FD specification presented as whitepaper at the International
+CAN Conference 2012 needed to be improved for data integrity reasons.
+Therefore two CAN FD implementations have to be distinguished today:
+
+- ISO compliant:     The ISO 11898-1:2015 CAN FD implementation (default)
+- non-ISO compliant: The CAN FD implementation following the 2012 whitepaper
+
+Finally there are three types of CAN FD controllers:
+
+1. ISO compliant (fixed)
+2. non-ISO compliant (fixed, like the M_CAN IP core v3.0.1 in m_can.c)
+3. ISO/non-ISO CAN FD controllers (switchable, like the PEAK PCAN-USB FD)
+
+The current ISO/non-ISO mode is announced by the CAN controller driver via
+netlink and displayed by the 'ip' tool (controller option FD-NON-ISO).
+The ISO/non-ISO-mode can be altered by setting 'fd-non-iso {on|off}' for
+switchable CAN FD controllers only.
+
+Example configuring 500 kbit/s arbitration bitrate and 4 Mbit/s data bitrate::
+
+    $ ip link set can0 up type can bitrate 500000 sample-point 0.75 \
+                                   dbitrate 4000000 dsample-point 0.8 fd on
+    $ ip -details link show can0
+    5: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UNKNOWN \
+             mode DEFAULT group default qlen 10
+    link/can  promiscuity 0
+    can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+          bitrate 500000 sample-point 0.750
+          tq 50 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 1
+          pcan_usb_pro_fd: tseg1 1..64 tseg2 1..16 sjw 1..16 brp 1..1024 \
+          brp-inc 1
+          dbitrate 4000000 dsample-point 0.800
+          dtq 12 dprop-seg 7 dphase-seg1 8 dphase-seg2 4 dsjw 1
+          pcan_usb_pro_fd: dtseg1 1..16 dtseg2 1..8 dsjw 1..4 dbrp 1..1024 \
+          dbrp-inc 1
+          clock 80000000
+
+Example when 'fd-non-iso on' is added on this switchable CAN FD adapter::
+
+   can <FD,FD-NON-ISO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+
+
+Supported CAN Hardware
+----------------------
+
+Please check the "Kconfig" file in "drivers/net/can" to get an actual
+list of the support CAN hardware. On the SocketCAN project website
+(see :ref:`socketcan-resources`) there might be further drivers available, also for
+older kernel versions.
+
+
+.. _socketcan-resources:
+
+SocketCAN Resources
+===================
+
+The Linux CAN / SocketCAN project resources (project site / mailing list)
+are referenced in the MAINTAINERS file in the Linux source tree.
+Search for CAN NETWORK [LAYERS|DRIVERS].
+
+Credits
+=======
+
+- Oliver Hartkopp (PF_CAN core, filters, drivers, bcm, SJA1000 driver)
+- Urs Thuermann (PF_CAN core, kernel integration, socket interfaces, raw, vcan)
+- Jan Kizka (RT-SocketCAN core, Socket-API reconciliation)
+- Wolfgang Grandegger (RT-SocketCAN core & drivers, Raw Socket-API reviews, CAN device driver interface, MSCAN driver)
+- Robert Schwebel (design reviews, PTXdist integration)
+- Marc Kleine-Budde (design reviews, Kernel 2.6 cleanups, drivers)
+- Benedikt Spranger (reviews)
+- Thomas Gleixner (LKML reviews, coding style, posting hints)
+- Andrey Volkov (kernel subtree structure, ioctls, MSCAN driver)
+- Matthias Brukner (first SJA1000 CAN netdevice implementation Q2/2003)
+- Klaus Hitschler (PEAK driver integration)
+- Uwe Koppe (CAN netdevices with PF_PACKET approach)
+- Michael Schulze (driver layer loopback requirement, RT CAN drivers review)
+- Pavel Pisa (Bit-timing calculation)
+- Sascha Hauer (SJA1000 platform driver)
+- Sebastian Haas (SJA1000 EMS PCI driver)
+- Markus Plessing (SJA1000 EMS PCI driver)
+- Per Dalen (SJA1000 Kvaser PCI driver)
+- Sam Ravnborg (reviews, coding style, kbuild help)
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
deleted file mode 100644
index aa15b9ee2e70..000000000000
--- a/Documentation/networking/can.txt
+++ /dev/null
@@ -1,1308 +0,0 @@
-============================================================================
-
-can.txt
-
-Readme file for the Controller Area Network Protocol Family (aka SocketCAN)
-
-This file contains
-
-  1 Overview / What is SocketCAN
-
-  2 Motivation / Why using the socket API
-
-  3 SocketCAN concept
-    3.1 receive lists
-    3.2 local loopback of sent frames
-    3.3 network problem notifications
-
-  4 How to use SocketCAN
-    4.1 RAW protocol sockets with can_filters (SOCK_RAW)
-      4.1.1 RAW socket option CAN_RAW_FILTER
-      4.1.2 RAW socket option CAN_RAW_ERR_FILTER
-      4.1.3 RAW socket option CAN_RAW_LOOPBACK
-      4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
-      4.1.5 RAW socket option CAN_RAW_FD_FRAMES
-      4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
-      4.1.7 RAW socket returned message flags
-    4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
-      4.2.1 Broadcast Manager operations
-      4.2.2 Broadcast Manager message flags
-      4.2.3 Broadcast Manager transmission timers
-      4.2.4 Broadcast Manager message sequence transmission
-      4.2.5 Broadcast Manager receive filter timers
-      4.2.6 Broadcast Manager multiplex message receive filter
-      4.2.7 Broadcast Manager CAN FD support
-    4.3 connected transport protocols (SOCK_SEQPACKET)
-    4.4 unconnected transport protocols (SOCK_DGRAM)
-
-  5 SocketCAN core module
-    5.1 can.ko module params
-    5.2 procfs content
-    5.3 writing own CAN protocol modules
-
-  6 CAN network drivers
-    6.1 general settings
-    6.2 local loopback of sent frames
-    6.3 CAN controller hardware filters
-    6.4 The virtual CAN driver (vcan)
-    6.5 The CAN network device driver interface
-      6.5.1 Netlink interface to set/get devices properties
-      6.5.2 Setting the CAN bit-timing
-      6.5.3 Starting and stopping the CAN network device
-    6.6 CAN FD (flexible data rate) driver support
-    6.7 supported CAN hardware
-
-  7 SocketCAN resources
-
-  8 Credits
-
-============================================================================
-
-1. Overview / What is SocketCAN
---------------------------------
-
-The socketcan package is an implementation of CAN protocols
-(Controller Area Network) for Linux.  CAN is a networking technology
-which has widespread use in automation, embedded devices, and
-automotive fields.  While there have been other CAN implementations
-for Linux based on character devices, SocketCAN uses the Berkeley
-socket API, the Linux network stack and implements the CAN device
-drivers as network interfaces.  The CAN socket API has been designed
-as similar as possible to the TCP/IP protocols to allow programmers,
-familiar with network programming, to easily learn how to use CAN
-sockets.
-
-2. Motivation / Why using the socket API
-----------------------------------------
-
-There have been CAN implementations for Linux before SocketCAN so the
-question arises, why we have started another project.  Most existing
-implementations come as a device driver for some CAN hardware, they
-are based on character devices and provide comparatively little
-functionality.  Usually, there is only a hardware-specific device
-driver which provides a character device interface to send and
-receive raw CAN frames, directly to/from the controller hardware.
-Queueing of frames and higher-level transport protocols like ISO-TP
-have to be implemented in user space applications.  Also, most
-character-device implementations support only one single process to
-open the device at a time, similar to a serial interface.  Exchanging
-the CAN controller requires employment of another device driver and
-often the need for adaption of large parts of the application to the
-new driver's API.
-
-SocketCAN was designed to overcome all of these limitations.  A new
-protocol family has been implemented which provides a socket interface
-to user space applications and which builds upon the Linux network
-layer, enabling use all of the provided queueing functionality.  A device
-driver for CAN controller hardware registers itself with the Linux
-network layer as a network device, so that CAN frames from the
-controller can be passed up to the network layer and on to the CAN
-protocol family module and also vice-versa.  Also, the protocol family
-module provides an API for transport protocol modules to register, so
-that any number of transport protocols can be loaded or unloaded
-dynamically.  In fact, the can core module alone does not provide any
-protocol and cannot be used without loading at least one additional
-protocol module.  Multiple sockets can be opened at the same time,
-on different or the same protocol module and they can listen/send
-frames on different or the same CAN IDs.  Several sockets listening on
-the same interface for frames with the same CAN ID are all passed the
-same received matching CAN frames.  An application wishing to
-communicate using a specific transport protocol, e.g. ISO-TP, just
-selects that protocol when opening the socket, and then can read and
-write application data byte streams, without having to deal with
-CAN-IDs, frames, etc.
-
-Similar functionality visible from user-space could be provided by a
-character device, too, but this would lead to a technically inelegant
-solution for a couple of reasons:
-
-* Intricate usage.  Instead of passing a protocol argument to
-  socket(2) and using bind(2) to select a CAN interface and CAN ID, an
-  application would have to do all these operations using ioctl(2)s.
-
-* Code duplication.  A character device cannot make use of the Linux
-  network queueing code, so all that code would have to be duplicated
-  for CAN networking.
-
-* Abstraction.  In most existing character-device implementations, the
-  hardware-specific device driver for a CAN controller directly
-  provides the character device for the application to work with.
-  This is at least very unusual in Unix systems for both, char and
-  block devices.  For example you don't have a character device for a
-  certain UART of a serial interface, a certain sound chip in your
-  computer, a SCSI or IDE controller providing access to your hard
-  disk or tape streamer device.  Instead, you have abstraction layers
-  which provide a unified character or block device interface to the
-  application on the one hand, and a interface for hardware-specific
-  device drivers on the other hand.  These abstractions are provided
-  by subsystems like the tty layer, the audio subsystem or the SCSI
-  and IDE subsystems for the devices mentioned above.
-
-  The easiest way to implement a CAN device driver is as a character
-  device without such a (complete) abstraction layer, as is done by most
-  existing drivers.  The right way, however, would be to add such a
-  layer with all the functionality like registering for certain CAN
-  IDs, supporting several open file descriptors and (de)multiplexing
-  CAN frames between them, (sophisticated) queueing of CAN frames, and
-  providing an API for device drivers to register with.  However, then
-  it would be no more difficult, or may be even easier, to use the
-  networking framework provided by the Linux kernel, and this is what
-  SocketCAN does.
-
-  The use of the networking framework of the Linux kernel is just the
-  natural and most appropriate way to implement CAN for Linux.
-
-3. SocketCAN concept
----------------------
-
-  As described in chapter 2 it is the main goal of SocketCAN to
-  provide a socket interface to user space applications which builds
-  upon the Linux network layer. In contrast to the commonly known
-  TCP/IP and ethernet networking, the CAN bus is a broadcast-only(!)
-  medium that has no MAC-layer addressing like ethernet. The CAN-identifier
-  (can_id) is used for arbitration on the CAN-bus. Therefore the CAN-IDs
-  have to be chosen uniquely on the bus. When designing a CAN-ECU
-  network the CAN-IDs are mapped to be sent by a specific ECU.
-  For this reason a CAN-ID can be treated best as a kind of source address.
-
-  3.1 receive lists
-
-  The network transparent access of multiple applications leads to the
-  problem that different applications may be interested in the same
-  CAN-IDs from the same CAN network interface. The SocketCAN core
-  module - which implements the protocol family CAN - provides several
-  high efficient receive lists for this reason. If e.g. a user space
-  application opens a CAN RAW socket, the raw protocol module itself
-  requests the (range of) CAN-IDs from the SocketCAN core that are
-  requested by the user. The subscription and unsubscription of
-  CAN-IDs can be done for specific CAN interfaces or for all(!) known
-  CAN interfaces with the can_rx_(un)register() functions provided to
-  CAN protocol modules by the SocketCAN core (see chapter 5).
-  To optimize the CPU usage at runtime the receive lists are split up
-  into several specific lists per device that match the requested
-  filter complexity for a given use-case.
-
-  3.2 local loopback of sent frames
-
-  As known from other networking concepts the data exchanging
-  applications may run on the same or different nodes without any
-  change (except for the according addressing information):
-
-         ___   ___   ___                   _______   ___
-        | _ | | _ | | _ |                 | _   _ | | _ |
-        ||A|| ||B|| ||C||                 ||A| |B|| ||C||
-        |___| |___| |___|                 |_______| |___|
-          |     |     |                       |       |
-        -----------------(1)- CAN bus -(2)---------------
-
-  To ensure that application A receives the same information in the
-  example (2) as it would receive in example (1) there is need for
-  some kind of local loopback of the sent CAN frames on the appropriate
-  node.
-
-  The Linux network devices (by default) just can handle the
-  transmission and reception of media dependent frames. Due to the
-  arbitration on the CAN bus the transmission of a low prio CAN-ID
-  may be delayed by the reception of a high prio CAN frame. To
-  reflect the correct* traffic on the node the loopback of the sent
-  data has to be performed right after a successful transmission. If
-  the CAN network interface is not capable of performing the loopback for
-  some reason the SocketCAN core can do this task as a fallback solution.
-  See chapter 6.2 for details (recommended).
-
-  The loopback functionality is enabled by default to reflect standard
-  networking behaviour for CAN applications. Due to some requests from
-  the RT-SocketCAN group the loopback optionally may be disabled for each
-  separate socket. See sockopts from the CAN RAW sockets in chapter 4.1.
-
-  * = you really like to have this when you're running analyser tools
-      like 'candump' or 'cansniffer' on the (same) node.
-
-  3.3 network problem notifications
-
-  The use of the CAN bus may lead to several problems on the physical
-  and media access control layer. Detecting and logging of these lower
-  layer problems is a vital requirement for CAN users to identify
-  hardware issues on the physical transceiver layer as well as
-  arbitration problems and error frames caused by the different
-  ECUs. The occurrence of detected errors are important for diagnosis
-  and have to be logged together with the exact timestamp. For this
-  reason the CAN interface driver can generate so called Error Message
-  Frames that can optionally be passed to the user application in the
-  same way as other CAN frames. Whenever an error on the physical layer
-  or the MAC layer is detected (e.g. by the CAN controller) the driver
-  creates an appropriate error message frame. Error messages frames can
-  be requested by the user application using the common CAN filter
-  mechanisms. Inside this filter definition the (interested) type of
-  errors may be selected. The reception of error messages is disabled
-  by default. The format of the CAN error message frame is briefly
-  described in the Linux header file "include/uapi/linux/can/error.h".
-
-4. How to use SocketCAN
-------------------------
-
-  Like TCP/IP, you first need to open a socket for communicating over a
-  CAN network. Since SocketCAN implements a new protocol family, you
-  need to pass PF_CAN as the first argument to the socket(2) system
-  call. Currently, there are two CAN protocols to choose from, the raw
-  socket protocol and the broadcast manager (BCM). So to open a socket,
-  you would write
-
-    s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
-
-  and
-
-    s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
-
-  respectively.  After the successful creation of the socket, you would
-  normally use the bind(2) system call to bind the socket to a CAN
-  interface (which is different from TCP/IP due to different addressing
-  - see chapter 3). After binding (CAN_RAW) or connecting (CAN_BCM)
-  the socket, you can read(2) and write(2) from/to the socket or use
-  send(2), sendto(2), sendmsg(2) and the recv* counterpart operations
-  on the socket as usual. There are also CAN specific socket options
-  described below.
-
-  The basic CAN frame structure and the sockaddr structure are defined
-  in include/linux/can.h:
-
-    struct can_frame {
-            canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
-            __u8    can_dlc; /* frame payload length in byte (0 .. 8) */
-            __u8    __pad;   /* padding */
-            __u8    __res0;  /* reserved / padding */
-            __u8    __res1;  /* reserved / padding */
-            __u8    data[8] __attribute__((aligned(8)));
-    };
-
-  The alignment of the (linear) payload data[] to a 64bit boundary
-  allows the user to define their own structs and unions to easily access
-  the CAN payload. There is no given byteorder on the CAN bus by
-  default. A read(2) system call on a CAN_RAW socket transfers a
-  struct can_frame to the user space.
-
-  The sockaddr_can structure has an interface index like the
-  PF_PACKET socket, that also binds to a specific interface:
-
-    struct sockaddr_can {
-            sa_family_t can_family;
-            int         can_ifindex;
-            union {
-                    /* transport protocol class address info (e.g. ISOTP) */
-                    struct { canid_t rx_id, tx_id; } tp;
-
-                    /* reserved for future CAN protocols address information */
-            } can_addr;
-    };
-
-  To determine the interface index an appropriate ioctl() has to
-  be used (example for CAN_RAW sockets without error checking):
-
-    int s;
-    struct sockaddr_can addr;
-    struct ifreq ifr;
-
-    s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
-
-    strcpy(ifr.ifr_name, "can0" );
-    ioctl(s, SIOCGIFINDEX, &ifr);
-
-    addr.can_family = AF_CAN;
-    addr.can_ifindex = ifr.ifr_ifindex;
-
-    bind(s, (struct sockaddr *)&addr, sizeof(addr));
-
-    (..)
-
-  To bind a socket to all(!) CAN interfaces the interface index must
-  be 0 (zero). In this case the socket receives CAN frames from every
-  enabled CAN interface. To determine the originating CAN interface
-  the system call recvfrom(2) may be used instead of read(2). To send
-  on a socket that is bound to 'any' interface sendto(2) is needed to
-  specify the outgoing interface.
-
-  Reading CAN frames from a bound CAN_RAW socket (see above) consists
-  of reading a struct can_frame:
-
-    struct can_frame frame;
-
-    nbytes = read(s, &frame, sizeof(struct can_frame));
-
-    if (nbytes < 0) {
-            perror("can raw socket read");
-            return 1;
-    }
-
-    /* paranoid check ... */
-    if (nbytes < sizeof(struct can_frame)) {
-            fprintf(stderr, "read: incomplete CAN frame\n");
-            return 1;
-    }
-
-    /* do something with the received CAN frame */
-
-  Writing CAN frames can be done similarly, with the write(2) system call:
-
-    nbytes = write(s, &frame, sizeof(struct can_frame));
-
-  When the CAN interface is bound to 'any' existing CAN interface
-  (addr.can_ifindex = 0) it is recommended to use recvfrom(2) if the
-  information about the originating CAN interface is needed:
-
-    struct sockaddr_can addr;
-    struct ifreq ifr;
-    socklen_t len = sizeof(addr);
-    struct can_frame frame;
-
-    nbytes = recvfrom(s, &frame, sizeof(struct can_frame),
-                      0, (struct sockaddr*)&addr, &len);
-
-    /* get interface name of the received CAN frame */
-    ifr.ifr_ifindex = addr.can_ifindex;
-    ioctl(s, SIOCGIFNAME, &ifr);
-    printf("Received a CAN frame from interface %s", ifr.ifr_name);
-
-  To write CAN frames on sockets bound to 'any' CAN interface the
-  outgoing interface has to be defined certainly.
-
-    strcpy(ifr.ifr_name, "can0");
-    ioctl(s, SIOCGIFINDEX, &ifr);
-    addr.can_ifindex = ifr.ifr_ifindex;
-    addr.can_family  = AF_CAN;
-
-    nbytes = sendto(s, &frame, sizeof(struct can_frame),
-                    0, (struct sockaddr*)&addr, sizeof(addr));
-
-  An accurate timestamp can be obtained with an ioctl(2) call after reading
-  a message from the socket:
-
-    struct timeval tv;
-    ioctl(s, SIOCGSTAMP, &tv);
-
-  The timestamp has a resolution of one microsecond and is set automatically
-  at the reception of a CAN frame.
-
-  Remark about CAN FD (flexible data rate) support:
-
-  Generally the handling of CAN FD is very similar to the formerly described
-  examples. The new CAN FD capable CAN controllers support two different
-  bitrates for the arbitration phase and the payload phase of the CAN FD frame
-  and up to 64 bytes of payload. This extended payload length breaks all the
-  kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight
-  bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g.
-  the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that
-  switches the socket into a mode that allows the handling of CAN FD frames
-  and (legacy) CAN frames simultaneously (see section 4.1.5).
-
-  The struct canfd_frame is defined in include/linux/can.h:
-
-    struct canfd_frame {
-            canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
-            __u8    len;     /* frame payload length in byte (0 .. 64) */
-            __u8    flags;   /* additional flags for CAN FD */
-            __u8    __res0;  /* reserved / padding */
-            __u8    __res1;  /* reserved / padding */
-            __u8    data[64] __attribute__((aligned(8)));
-    };
-
-  The struct canfd_frame and the existing struct can_frame have the can_id,
-  the payload length and the payload data at the same offset inside their
-  structures. This allows to handle the different structures very similar.
-  When the content of a struct can_frame is copied into a struct canfd_frame
-  all structure elements can be used as-is - only the data[] becomes extended.
-
-  When introducing the struct canfd_frame it turned out that the data length
-  code (DLC) of the struct can_frame was used as a length information as the
-  length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve
-  the easy handling of the length information the canfd_frame.len element
-  contains a plain length value from 0 .. 64. So both canfd_frame.len and
-  can_frame.can_dlc are equal and contain a length information and no DLC.
-  For details about the distinction of CAN and CAN FD capable devices and
-  the mapping to the bus-relevant data length code (DLC), see chapter 6.6.
-
-  The length of the two CAN(FD) frame structures define the maximum transfer
-  unit (MTU) of the CAN(FD) network interface and skbuff data length. Two
-  definitions are specified for CAN specific MTUs in include/linux/can.h :
-
-  #define CAN_MTU   (sizeof(struct can_frame))   == 16  => 'legacy' CAN frame
-  #define CANFD_MTU (sizeof(struct canfd_frame)) == 72  => CAN FD frame
-
-  4.1 RAW protocol sockets with can_filters (SOCK_RAW)
-
-  Using CAN_RAW sockets is extensively comparable to the commonly
-  known access to CAN character devices. To meet the new possibilities
-  provided by the multi user SocketCAN approach, some reasonable
-  defaults are set at RAW socket binding time:
-
-  - The filters are set to exactly one filter receiving everything
-  - The socket only receives valid data frames (=> no error message frames)
-  - The loopback of sent CAN frames is enabled (see chapter 3.2)
-  - The socket does not receive its own sent frames (in loopback mode)
-
-  These default settings may be changed before or after binding the socket.
-  To use the referenced definitions of the socket options for CAN_RAW
-  sockets, include <linux/can/raw.h>.
-
-  4.1.1 RAW socket option CAN_RAW_FILTER
-
-  The reception of CAN frames using CAN_RAW sockets can be controlled
-  by defining 0 .. n filters with the CAN_RAW_FILTER socket option.
-
-  The CAN filter structure is defined in include/linux/can.h:
-
-    struct can_filter {
-            canid_t can_id;
-            canid_t can_mask;
-    };
-
-  A filter matches, when
-
-    <received_can_id> & mask == can_id & mask
-
-  which is analogous to known CAN controllers hardware filter semantics.
-  The filter can be inverted in this semantic, when the CAN_INV_FILTER
-  bit is set in can_id element of the can_filter structure. In
-  contrast to CAN controller hardware filters the user may set 0 .. n
-  receive filters for each open socket separately:
-
-    struct can_filter rfilter[2];
-
-    rfilter[0].can_id   = 0x123;
-    rfilter[0].can_mask = CAN_SFF_MASK;
-    rfilter[1].can_id   = 0x200;
-    rfilter[1].can_mask = 0x700;
-
-    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
-
-  To disable the reception of CAN frames on the selected CAN_RAW socket:
-
-    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
-
-  To set the filters to zero filters is quite obsolete as to not read
-  data causes the raw socket to discard the received CAN frames. But
-  having this 'send only' use-case we may remove the receive list in the
-  Kernel to save a little (really a very little!) CPU usage.
-
-  4.1.1.1 CAN filter usage optimisation
-
-  The CAN filters are processed in per-device filter lists at CAN frame
-  reception time. To reduce the number of checks that need to be performed
-  while walking through the filter lists the CAN core provides an optimized
-  filter handling when the filter subscription focusses on a single CAN ID.
-
-  For the possible 2048 SFF CAN identifiers the identifier is used as an index
-  to access the corresponding subscription list without any further checks.
-  For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
-  hash function to retrieve the EFF table index.
-
-  To benefit from the optimized filters for single CAN identifiers the
-  CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
-  with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
-  can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
-  subscribed. E.g. in the example from above
-
-    rfilter[0].can_id   = 0x123;
-    rfilter[0].can_mask = CAN_SFF_MASK;
-
-  both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
-
-  To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
-  filter has to be defined in this way to benefit from the optimized filters:
-
-    struct can_filter rfilter[2];
-
-    rfilter[0].can_id   = 0x123;
-    rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
-    rfilter[1].can_id   = 0x12345678 | CAN_EFF_FLAG;
-    rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
-
-    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
-
-  4.1.2 RAW socket option CAN_RAW_ERR_FILTER
-
-  As described in chapter 3.3 the CAN interface driver can generate so
-  called Error Message Frames that can optionally be passed to the user
-  application in the same way as other CAN frames. The possible
-  errors are divided into different error classes that may be filtered
-  using the appropriate error mask. To register for every possible
-  error condition CAN_ERR_MASK can be used as value for the error mask.
-  The values for the error mask are defined in linux/can/error.h .
-
-    can_err_mask_t err_mask = ( CAN_ERR_TX_TIMEOUT | CAN_ERR_BUSOFF );
-
-    setsockopt(s, SOL_CAN_RAW, CAN_RAW_ERR_FILTER,
-               &err_mask, sizeof(err_mask));
-
-  4.1.3 RAW socket option CAN_RAW_LOOPBACK
-
-  To meet multi user needs the local loopback is enabled by default
-  (see chapter 3.2 for details). But in some embedded use-cases
-  (e.g. when only one application uses the CAN bus) this loopback
-  functionality can be disabled (separately for each socket):
-
-    int loopback = 0; /* 0 = disabled, 1 = enabled (default) */
-
-    setsockopt(s, SOL_CAN_RAW, CAN_RAW_LOOPBACK, &loopback, sizeof(loopback));
-
-  4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
-
-  When the local loopback is enabled, all the sent CAN frames are
-  looped back to the open CAN sockets that registered for the CAN
-  frames' CAN-ID on this given interface to meet the multi user
-  needs. The reception of the CAN frames on the same socket that was
-  sending the CAN frame is assumed to be unwanted and therefore
-  disabled by default. This default behaviour may be changed on
-  demand:
-
-    int recv_own_msgs = 1; /* 0 = disabled (default), 1 = enabled */
-
-    setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS,
-               &recv_own_msgs, sizeof(recv_own_msgs));
-
-  4.1.5 RAW socket option CAN_RAW_FD_FRAMES
-
-  CAN FD support in CAN_RAW sockets can be enabled with a new socket option
-  CAN_RAW_FD_FRAMES which is off by default. When the new socket option is
-  not supported by the CAN_RAW socket (e.g. on older kernels), switching the
-  CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT.
-
-  Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames
-  and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames
-  when reading from the socket.
-
-    CAN_RAW_FD_FRAMES enabled:  CAN_MTU and CANFD_MTU are allowed
-    CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default)
-
-  Example:
-    [ remember: CANFD_MTU == sizeof(struct canfd_frame) ]
-
-    struct canfd_frame cfd;
-
-    nbytes = read(s, &cfd, CANFD_MTU);
-
-    if (nbytes == CANFD_MTU) {
-            printf("got CAN FD frame with length %d\n", cfd.len);
-	    /* cfd.flags contains valid data */
-    } else if (nbytes == CAN_MTU) {
-            printf("got legacy CAN frame with length %d\n", cfd.len);
-	    /* cfd.flags is undefined */
-    } else {
-            fprintf(stderr, "read: invalid CAN(FD) frame\n");
-            return 1;
-    }
-
-    /* the content can be handled independently from the received MTU size */
-
-    printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len);
-    for (i = 0; i < cfd.len; i++)
-            printf("%02X ", cfd.data[i]);
-
-  When reading with size CANFD_MTU only returns CAN_MTU bytes that have
-  been received from the socket a legacy CAN frame has been read into the
-  provided CAN FD structure. Note that the canfd_frame.flags data field is
-  not specified in the struct can_frame and therefore it is only valid in
-  CANFD_MTU sized CAN FD frames.
-
-  Implementation hint for new CAN applications:
-
-  To build a CAN FD aware application use struct canfd_frame as basic CAN
-  data structure for CAN_RAW based applications. When the application is
-  executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES
-  socket option returns an error: No problem. You'll get legacy CAN frames
-  or CAN FD frames and can process them the same way.
-
-  When sending to CAN devices make sure that the device is capable to handle
-  CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
-  The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
-
-  4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
-
-  The CAN_RAW socket can set multiple CAN identifier specific filters that
-  lead to multiple filters in the af_can.c filter processing. These filters
-  are indenpendent from each other which leads to logical OR'ed filters when
-  applied (see 4.1.1).
-
-  This socket option joines the given CAN filters in the way that only CAN
-  frames are passed to user space that matched *all* given CAN filters. The
-  semantic for the applied filters is therefore changed to a logical AND.
-
-  This is useful especially when the filterset is a combination of filters
-  where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
-  CAN ID ranges from the incoming traffic.
-
-  4.1.7 RAW socket returned message flags
-
-  When using recvmsg() call, the msg->msg_flags may contain following flags:
-
-    MSG_DONTROUTE: set when the received frame was created on the local host.
-
-    MSG_CONFIRM: set when the frame was sent via the socket it is received on.
-      This flag can be interpreted as a 'transmission confirmation' when the
-      CAN driver supports the echo of frames on driver level, see 3.2 and 6.2.
-      In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
-
-  4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
-
-  The Broadcast Manager protocol provides a command based configuration
-  interface to filter and send (e.g. cyclic) CAN messages in kernel space.
-
-  Receive filters can be used to down sample frequent messages; detect events
-  such as message contents changes, packet length changes, and do time-out
-  monitoring of received messages.
-
-  Periodic transmission tasks of CAN frames or a sequence of CAN frames can be
-  created and modified at runtime; both the message content and the two
-  possible transmit intervals can be altered.
-
-  A BCM socket is not intended for sending individual CAN frames using the
-  struct can_frame as known from the CAN_RAW socket. Instead a special BCM
-  configuration message is defined. The basic BCM configuration message used
-  to communicate with the broadcast manager and the available operations are
-  defined in the linux/can/bcm.h include. The BCM message consists of a
-  message header with a command ('opcode') followed by zero or more CAN frames.
-  The broadcast manager sends responses to user space in the same form:
-
-    struct bcm_msg_head {
-            __u32 opcode;                   /* command */
-            __u32 flags;                    /* special flags */
-            __u32 count;                    /* run 'count' times with ival1 */
-            struct timeval ival1, ival2;    /* count and subsequent interval */
-            canid_t can_id;                 /* unique can_id for task */
-            __u32 nframes;                  /* number of can_frames following */
-            struct can_frame frames[0];
-    };
-
-  The aligned payload 'frames' uses the same basic CAN frame structure defined
-  at the beginning of section 4 and in the include/linux/can.h include. All
-  messages to the broadcast manager from user space have this structure.
-
-  Note a CAN_BCM socket must be connected instead of bound after socket
-  creation (example without error checking):
-
-    int s;
-    struct sockaddr_can addr;
-    struct ifreq ifr;
-
-    s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
-
-    strcpy(ifr.ifr_name, "can0");
-    ioctl(s, SIOCGIFINDEX, &ifr);
-
-    addr.can_family = AF_CAN;
-    addr.can_ifindex = ifr.ifr_ifindex;
-
-    connect(s, (struct sockaddr *)&addr, sizeof(addr));
-
-    (..)
-
-  The broadcast manager socket is able to handle any number of in flight
-  transmissions or receive filters concurrently. The different RX/TX jobs are
-  distinguished by the unique can_id in each BCM message. However additional
-  CAN_BCM sockets are recommended to communicate on multiple CAN interfaces.
-  When the broadcast manager socket is bound to 'any' CAN interface (=> the
-  interface index is set to zero) the configured receive filters apply to any
-  CAN interface unless the sendto() syscall is used to overrule the 'any' CAN
-  interface index. When using recvfrom() instead of read() to retrieve BCM
-  socket messages the originating CAN interface is provided in can_ifindex.
-
-  4.2.1 Broadcast Manager operations
-
-  The opcode defines the operation for the broadcast manager to carry out,
-  or details the broadcast managers response to several events, including
-  user requests.
-
-  Transmit Operations (user space to broadcast manager):
-
-    TX_SETUP:   Create (cyclic) transmission task.
-
-    TX_DELETE:  Remove (cyclic) transmission task, requires only can_id.
-
-    TX_READ:    Read properties of (cyclic) transmission task for can_id.
-
-    TX_SEND:    Send one CAN frame.
-
-  Transmit Responses (broadcast manager to user space):
-
-    TX_STATUS:  Reply to TX_READ request (transmission task configuration).
-
-    TX_EXPIRED: Notification when counter finishes sending at initial interval
-      'ival1'. Requires the TX_COUNTEVT flag to be set at TX_SETUP.
-
-  Receive Operations (user space to broadcast manager):
-
-    RX_SETUP:   Create RX content filter subscription.
-
-    RX_DELETE:  Remove RX content filter subscription, requires only can_id.
-
-    RX_READ:    Read properties of RX content filter subscription for can_id.
-
-  Receive Responses (broadcast manager to user space):
-
-    RX_STATUS:  Reply to RX_READ request (filter task configuration).
-
-    RX_TIMEOUT: Cyclic message is detected to be absent (timer ival1 expired).
-
-    RX_CHANGED: BCM message with updated CAN frame (detected content change).
-      Sent on first message received or on receipt of revised CAN messages.
-
-  4.2.2 Broadcast Manager message flags
-
-  When sending a message to the broadcast manager the 'flags' element may
-  contain the following flag definitions which influence the behaviour:
-
-    SETTIMER:           Set the values of ival1, ival2 and count
-
-    STARTTIMER:         Start the timer with the actual values of ival1, ival2
-      and count. Starting the timer leads simultaneously to emit a CAN frame.
-
-    TX_COUNTEVT:        Create the message TX_EXPIRED when count expires
-
-    TX_ANNOUNCE:        A change of data by the process is emitted immediately.
-
-    TX_CP_CAN_ID:       Copies the can_id from the message header to each
-      subsequent frame in frames. This is intended as usage simplification. For
-      TX tasks the unique can_id from the message header may differ from the
-      can_id(s) stored for transmission in the subsequent struct can_frame(s).
-
-    RX_FILTER_ID:       Filter by can_id alone, no frames required (nframes=0).
-
-    RX_CHECK_DLC:       A change of the DLC leads to an RX_CHANGED.
-
-    RX_NO_AUTOTIMER:    Prevent automatically starting the timeout monitor.
-
-    RX_ANNOUNCE_RESUME: If passed at RX_SETUP and a receive timeout occurred, a
-      RX_CHANGED message will be generated when the (cyclic) receive restarts.
-
-    TX_RESET_MULTI_IDX: Reset the index for the multiple frame transmission.
-
-    RX_RTR_FRAME:       Send reply for RTR-request (placed in op->frames[0]).
-
-  4.2.3 Broadcast Manager transmission timers
-
-  Periodic transmission configurations may use up to two interval timers.
-  In this case the BCM sends a number of messages ('count') at an interval
-  'ival1', then continuing to send at another given interval 'ival2'. When
-  only one timer is needed 'count' is set to zero and only 'ival2' is used.
-  When SET_TIMER and START_TIMER flag were set the timers are activated.
-  The timer values can be altered at runtime when only SET_TIMER is set.
-
-  4.2.4 Broadcast Manager message sequence transmission
-
-  Up to 256 CAN frames can be transmitted in a sequence in the case of a cyclic
-  TX task configuration. The number of CAN frames is provided in the 'nframes'
-  element of the BCM message head. The defined number of CAN frames are added
-  as array to the TX_SETUP BCM configuration message.
-
-    /* create a struct to set up a sequence of four CAN frames */
-    struct {
-            struct bcm_msg_head msg_head;
-            struct can_frame frame[4];
-    } mytxmsg;
-
-    (..)
-    mytxmsg.msg_head.nframes = 4;
-    (..)
-
-    write(s, &mytxmsg, sizeof(mytxmsg));
-
-  With every transmission the index in the array of CAN frames is increased
-  and set to zero at index overflow.
-
-  4.2.5 Broadcast Manager receive filter timers
-
-  The timer values ival1 or ival2 may be set to non-zero values at RX_SETUP.
-  When the SET_TIMER flag is set the timers are enabled:
-
-  ival1: Send RX_TIMEOUT when a received message is not received again within
-    the given time. When START_TIMER is set at RX_SETUP the timeout detection
-    is activated directly - even without a former CAN frame reception.
-
-  ival2: Throttle the received message rate down to the value of ival2. This
-    is useful to reduce messages for the application when the signal inside the
-    CAN frame is stateless as state changes within the ival2 periode may get
-    lost.
-
-  4.2.6 Broadcast Manager multiplex message receive filter
-
-  To filter for content changes in multiplex message sequences an array of more
-  than one CAN frames can be passed in a RX_SETUP configuration message. The
-  data bytes of the first CAN frame contain the mask of relevant bits that
-  have to match in the subsequent CAN frames with the received CAN frame.
-  If one of the subsequent CAN frames is matching the bits in that frame data
-  mark the relevant content to be compared with the previous received content.
-  Up to 257 CAN frames (multiplex filter bit mask CAN frame plus 256 CAN
-  filters) can be added as array to the TX_SETUP BCM configuration message.
-
-    /* usually used to clear CAN frame data[] - beware of endian problems! */
-    #define U64_DATA(p) (*(unsigned long long*)(p)->data)
-
-    struct {
-            struct bcm_msg_head msg_head;
-            struct can_frame frame[5];
-    } msg;
-
-    msg.msg_head.opcode  = RX_SETUP;
-    msg.msg_head.can_id  = 0x42;
-    msg.msg_head.flags   = 0;
-    msg.msg_head.nframes = 5;
-    U64_DATA(&msg.frame[0]) = 0xFF00000000000000ULL; /* MUX mask */
-    U64_DATA(&msg.frame[1]) = 0x01000000000000FFULL; /* data mask (MUX 0x01) */
-    U64_DATA(&msg.frame[2]) = 0x0200FFFF000000FFULL; /* data mask (MUX 0x02) */
-    U64_DATA(&msg.frame[3]) = 0x330000FFFFFF0003ULL; /* data mask (MUX 0x33) */
-    U64_DATA(&msg.frame[4]) = 0x4F07FC0FF0000000ULL; /* data mask (MUX 0x4F) */
-
-    write(s, &msg, sizeof(msg));
-
-  4.2.7 Broadcast Manager CAN FD support
-
-  The programming API of the CAN_BCM depends on struct can_frame which is
-  given as array directly behind the bcm_msg_head structure. To follow this
-  schema for the CAN FD frames a new flag 'CAN_FD_FRAME' in the bcm_msg_head
-  flags indicates that the concatenated CAN frame structures behind the
-  bcm_msg_head are defined as struct canfd_frame.
-
-    struct {
-            struct bcm_msg_head msg_head;
-            struct canfd_frame frame[5];
-    } msg;
-
-    msg.msg_head.opcode  = RX_SETUP;
-    msg.msg_head.can_id  = 0x42;
-    msg.msg_head.flags   = CAN_FD_FRAME;
-    msg.msg_head.nframes = 5;
-    (..)
-
-  When using CAN FD frames for multiplex filtering the MUX mask is still
-  expected in the first 64 bit of the struct canfd_frame data section.
-
-  4.3 connected transport protocols (SOCK_SEQPACKET)
-  4.4 unconnected transport protocols (SOCK_DGRAM)
-
-
-5. SocketCAN core module
--------------------------
-
-  The SocketCAN core module implements the protocol family
-  PF_CAN. CAN protocol modules are loaded by the core module at
-  runtime. The core module provides an interface for CAN protocol
-  modules to subscribe needed CAN IDs (see chapter 3.1).
-
-  5.1 can.ko module params
-
-  - stats_timer: To calculate the SocketCAN core statistics
-    (e.g. current/maximum frames per second) this 1 second timer is
-    invoked at can.ko module start time by default. This timer can be
-    disabled by using stattimer=0 on the module commandline.
-
-  - debug: (removed since SocketCAN SVN r546)
-
-  5.2 procfs content
-
-  As described in chapter 3.1 the SocketCAN core uses several filter
-  lists to deliver received CAN frames to CAN protocol modules. These
-  receive lists, their filters and the count of filter matches can be
-  checked in the appropriate receive list. All entries contain the
-  device and a protocol module identifier:
-
-    foo@bar:~$ cat /proc/net/can/rcvlist_all
-
-    receive list 'rx_all':
-      (vcan3: no entry)
-      (vcan2: no entry)
-      (vcan1: no entry)
-      device   can_id   can_mask  function  userdata   matches  ident
-       vcan0     000    00000000  f88e6370  f6c6f400         0  raw
-      (any: no entry)
-
-  In this example an application requests any CAN traffic from vcan0.
-
-    rcvlist_all - list for unfiltered entries (no filter operations)
-    rcvlist_eff - list for single extended frame (EFF) entries
-    rcvlist_err - list for error message frames masks
-    rcvlist_fil - list for mask/value filters
-    rcvlist_inv - list for mask/value filters (inverse semantic)
-    rcvlist_sff - list for single standard frame (SFF) entries
-
-  Additional procfs files in /proc/net/can
-
-    stats       - SocketCAN core statistics (rx/tx frames, match ratios, ...)
-    reset_stats - manual statistic reset
-    version     - prints the SocketCAN core version and the ABI version
-
-  5.3 writing own CAN protocol modules
-
-  To implement a new protocol in the protocol family PF_CAN a new
-  protocol has to be defined in include/linux/can.h .
-  The prototypes and definitions to use the SocketCAN core can be
-  accessed by including include/linux/can/core.h .
-  In addition to functions that register the CAN protocol and the
-  CAN device notifier chain there are functions to subscribe CAN
-  frames received by CAN interfaces and to send CAN frames:
-
-    can_rx_register   - subscribe CAN frames from a specific interface
-    can_rx_unregister - unsubscribe CAN frames from a specific interface
-    can_send          - transmit a CAN frame (optional with local loopback)
-
-  For details see the kerneldoc documentation in net/can/af_can.c or
-  the source code of net/can/raw.c or net/can/bcm.c .
-
-6. CAN network drivers
-----------------------
-
-  Writing a CAN network device driver is much easier than writing a
-  CAN character device driver. Similar to other known network device
-  drivers you mainly have to deal with:
-
-  - TX: Put the CAN frame from the socket buffer to the CAN controller.
-  - RX: Put the CAN frame from the CAN controller to the socket buffer.
-
-  See e.g. at Documentation/networking/netdevices.txt . The differences
-  for writing CAN network device driver are described below:
-
-  6.1 general settings
-
-    dev->type  = ARPHRD_CAN; /* the netdevice hardware type */
-    dev->flags = IFF_NOARP;  /* CAN has no arp */
-
-    dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */
-
-    or alternative, when the controller supports CAN with flexible data rate:
-    dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
-
-  The struct can_frame or struct canfd_frame is the payload of each socket
-  buffer (skbuff) in the protocol family PF_CAN.
-
-  6.2 local loopback of sent frames
-
-  As described in chapter 3.2 the CAN network device driver should
-  support a local loopback functionality similar to the local echo
-  e.g. of tty devices. In this case the driver flag IFF_ECHO has to be
-  set to prevent the PF_CAN core from locally echoing sent frames
-  (aka loopback) as fallback solution:
-
-    dev->flags = (IFF_NOARP | IFF_ECHO);
-
-  6.3 CAN controller hardware filters
-
-  To reduce the interrupt load on deep embedded systems some CAN
-  controllers support the filtering of CAN IDs or ranges of CAN IDs.
-  These hardware filter capabilities vary from controller to
-  controller and have to be identified as not feasible in a multi-user
-  networking approach. The use of the very controller specific
-  hardware filters could make sense in a very dedicated use-case, as a
-  filter on driver level would affect all users in the multi-user
-  system. The high efficient filter sets inside the PF_CAN core allow
-  to set different multiple filters for each socket separately.
-  Therefore the use of hardware filters goes to the category 'handmade
-  tuning on deep embedded systems'. The author is running a MPC603e
-  @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
-  load without any problems ...
-
-  6.4 The virtual CAN driver (vcan)
-
-  Similar to the network loopback devices, vcan offers a virtual local
-  CAN interface. A full qualified address on CAN consists of
-
-  - a unique CAN Identifier (CAN ID)
-  - the CAN bus this CAN ID is transmitted on (e.g. can0)
-
-  so in common use cases more than one virtual CAN interface is needed.
-
-  The virtual CAN interfaces allow the transmission and reception of CAN
-  frames without real CAN controller hardware. Virtual CAN network
-  devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
-  When compiled as a module the virtual CAN driver module is called vcan.ko
-
-  Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
-  netlink interface to create vcan network devices. The creation and
-  removal of vcan network devices can be managed with the ip(8) tool:
-
-  - Create a virtual CAN network interface:
-       $ ip link add type vcan
-
-  - Create a virtual CAN network interface with a specific name 'vcan42':
-       $ ip link add dev vcan42 type vcan
-
-  - Remove a (virtual CAN) network interface 'vcan42':
-       $ ip link del vcan42
-
-  6.5 The CAN network device driver interface
-
-  The CAN network device driver interface provides a generic interface
-  to setup, configure and monitor CAN network devices. The user can then
-  configure the CAN device, like setting the bit-timing parameters, via
-  the netlink interface using the program "ip" from the "IPROUTE2"
-  utility suite. The following chapter describes briefly how to use it.
-  Furthermore, the interface uses a common data structure and exports a
-  set of common functions, which all real CAN network device drivers
-  should use. Please have a look to the SJA1000 or MSCAN driver to
-  understand how to use them. The name of the module is can-dev.ko.
-
-  6.5.1 Netlink interface to set/get devices properties
-
-  The CAN device must be configured via netlink interface. The supported
-  netlink message types are defined and briefly described in
-  "include/linux/can/netlink.h". CAN link support for the program "ip"
-  of the IPROUTE2 utility suite is available and it can be used as shown
-  below:
-
-  - Setting CAN device properties:
-
-    $ ip link set can0 type can help
-    Usage: ip link set DEVICE type can
-        [ bitrate BITRATE [ sample-point SAMPLE-POINT] ] |
-        [ tq TQ prop-seg PROP_SEG phase-seg1 PHASE-SEG1
-          phase-seg2 PHASE-SEG2 [ sjw SJW ] ]
-
-        [ dbitrate BITRATE [ dsample-point SAMPLE-POINT] ] |
-        [ dtq TQ dprop-seg PROP_SEG dphase-seg1 PHASE-SEG1
-          dphase-seg2 PHASE-SEG2 [ dsjw SJW ] ]
-
-        [ loopback { on | off } ]
-        [ listen-only { on | off } ]
-        [ triple-sampling { on | off } ]
-        [ one-shot { on | off } ]
-        [ berr-reporting { on | off } ]
-        [ fd { on | off } ]
-        [ fd-non-iso { on | off } ]
-        [ presume-ack { on | off } ]
-
-        [ restart-ms TIME-MS ]
-        [ restart ]
-
-        Where: BITRATE       := { 1..1000000 }
-               SAMPLE-POINT  := { 0.000..0.999 }
-               TQ            := { NUMBER }
-               PROP-SEG      := { 1..8 }
-               PHASE-SEG1    := { 1..8 }
-               PHASE-SEG2    := { 1..8 }
-               SJW           := { 1..4 }
-               RESTART-MS    := { 0 | NUMBER }
-
-  - Display CAN device details and statistics:
-
-    $ ip -details -statistics link show can0
-    2: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 16 qdisc pfifo_fast state UP qlen 10
-      link/can
-      can <TRIPLE-SAMPLING> state ERROR-ACTIVE restart-ms 100
-      bitrate 125000 sample_point 0.875
-      tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1
-      sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
-      clock 8000000
-      re-started bus-errors arbit-lost error-warn error-pass bus-off
-      41         17457      0          41         42         41
-      RX: bytes  packets  errors  dropped overrun mcast
-      140859     17608    17457   0       0       0
-      TX: bytes  packets  errors  dropped carrier collsns
-      861        112      0       41      0       0
-
-  More info to the above output:
-
-    "<TRIPLE-SAMPLING>"
-	Shows the list of selected CAN controller modes: LOOPBACK,
-	LISTEN-ONLY, or TRIPLE-SAMPLING.
-
-    "state ERROR-ACTIVE"
-	The current state of the CAN controller: "ERROR-ACTIVE",
-	"ERROR-WARNING", "ERROR-PASSIVE", "BUS-OFF" or "STOPPED"
-
-    "restart-ms 100"
-	Automatic restart delay time. If set to a non-zero value, a
-	restart of the CAN controller will be triggered automatically
-	in case of a bus-off condition after the specified delay time
-	in milliseconds. By default it's off.
-
-    "bitrate 125000 sample-point 0.875"
-	Shows the real bit-rate in bits/sec and the sample-point in the
-	range 0.000..0.999. If the calculation of bit-timing parameters
-	is enabled in the kernel (CONFIG_CAN_CALC_BITTIMING=y), the
-	bit-timing can be defined by setting the "bitrate" argument.
-	Optionally the "sample-point" can be specified. By default it's
-	0.000 assuming CIA-recommended sample-points.
-
-    "tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1"
-	Shows the time quanta in ns, propagation segment, phase buffer
-	segment 1 and 2 and the synchronisation jump width in units of
-	tq. They allow to define the CAN bit-timing in a hardware
-	independent format as proposed by the Bosch CAN 2.0 spec (see
-	chapter 8 of http://www.semiconductors.bosch.de/pdf/can2spec.pdf).
-
-    "sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
-     clock 8000000"
-	Shows the bit-timing constants of the CAN controller, here the
-	"sja1000". The minimum and maximum values of the time segment 1
-	and 2, the synchronisation jump width in units of tq, the
-	bitrate pre-scaler and the CAN system clock frequency in Hz.
-	These constants could be used for user-defined (non-standard)
-	bit-timing calculation algorithms in user-space.
-
-    "re-started bus-errors arbit-lost error-warn error-pass bus-off"
-	Shows the number of restarts, bus and arbitration lost errors,
-	and the state changes to the error-warning, error-passive and
-	bus-off state. RX overrun errors are listed in the "overrun"
-	field of the standard network statistics.
-
-  6.5.2 Setting the CAN bit-timing
-
-  The CAN bit-timing parameters can always be defined in a hardware
-  independent format as proposed in the Bosch CAN 2.0 specification
-  specifying the arguments "tq", "prop_seg", "phase_seg1", "phase_seg2"
-  and "sjw":
-
-    $ ip link set canX type can tq 125 prop-seg 6 \
-				phase-seg1 7 phase-seg2 2 sjw 1
-
-  If the kernel option CONFIG_CAN_CALC_BITTIMING is enabled, CIA
-  recommended CAN bit-timing parameters will be calculated if the bit-
-  rate is specified with the argument "bitrate":
-
-    $ ip link set canX type can bitrate 125000
-
-  Note that this works fine for the most common CAN controllers with
-  standard bit-rates but may *fail* for exotic bit-rates or CAN system
-  clock frequencies. Disabling CONFIG_CAN_CALC_BITTIMING saves some
-  space and allows user-space tools to solely determine and set the
-  bit-timing parameters. The CAN controller specific bit-timing
-  constants can be used for that purpose. They are listed by the
-  following command:
-
-    $ ip -details link show can0
-    ...
-      sja1000: clock 8000000 tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
-
-  6.5.3 Starting and stopping the CAN network device
-
-  A CAN network device is started or stopped as usual with the command
-  "ifconfig canX up/down" or "ip link set canX up/down". Be aware that
-  you *must* define proper bit-timing parameters for real CAN devices
-  before you can start it to avoid error-prone default settings:
-
-    $ ip link set canX up type can bitrate 125000
-
-  A device may enter the "bus-off" state if too many errors occurred on
-  the CAN bus. Then no more messages are received or sent. An automatic
-  bus-off recovery can be enabled by setting the "restart-ms" to a
-  non-zero value, e.g.:
-
-    $ ip link set canX type can restart-ms 100
-
-  Alternatively, the application may realize the "bus-off" condition
-  by monitoring CAN error message frames and do a restart when
-  appropriate with the command:
-
-    $ ip link set canX type can restart
-
-  Note that a restart will also create a CAN error message frame (see
-  also chapter 3.3).
-
-  6.6 CAN FD (flexible data rate) driver support
-
-  CAN FD capable CAN controllers support two different bitrates for the
-  arbitration phase and the payload phase of the CAN FD frame. Therefore a
-  second bit timing has to be specified in order to enable the CAN FD bitrate.
-
-  Additionally CAN FD capable CAN controllers support up to 64 bytes of
-  payload. The representation of this length in can_frame.can_dlc and
-  canfd_frame.len for userspace applications and inside the Linux network
-  layer is a plain value from 0 .. 64 instead of the CAN 'data length code'.
-  The data length code was a 1:1 mapping to the payload length in the legacy
-  CAN frames anyway. The payload length to the bus-relevant DLC mapping is
-  only performed inside the CAN drivers, preferably with the helper
-  functions can_dlc2len() and can_len2dlc().
-
-  The CAN netdevice driver capabilities can be distinguished by the network
-  devices maximum transfer unit (MTU):
-
-  MTU = 16 (CAN_MTU)   => sizeof(struct can_frame)   => 'legacy' CAN device
-  MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device
-
-  The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
-  N.B. CAN FD capable devices can also handle and send legacy CAN frames.
-
-  When configuring CAN FD capable CAN controllers an additional 'data' bitrate
-  has to be set. This bitrate for the data phase of the CAN FD frame has to be
-  at least the bitrate which was configured for the arbitration phase. This
-  second bitrate is specified analogue to the first bitrate but the bitrate
-  setting keywords for the 'data' bitrate start with 'd' e.g. dbitrate,
-  dsample-point, dsjw or dtq and similar settings. When a data bitrate is set
-  within the configuration process the controller option "fd on" can be
-  specified to enable the CAN FD mode in the CAN controller. This controller
-  option also switches the device MTU to 72 (CANFD_MTU).
-
-  The first CAN FD specification presented as whitepaper at the International
-  CAN Conference 2012 needed to be improved for data integrity reasons.
-  Therefore two CAN FD implementations have to be distinguished today:
-
-  - ISO compliant:     The ISO 11898-1:2015 CAN FD implementation (default)
-  - non-ISO compliant: The CAN FD implementation following the 2012 whitepaper
-
-  Finally there are three types of CAN FD controllers:
-
-  1. ISO compliant (fixed)
-  2. non-ISO compliant (fixed, like the M_CAN IP core v3.0.1 in m_can.c)
-  3. ISO/non-ISO CAN FD controllers (switchable, like the PEAK PCAN-USB FD)
-
-  The current ISO/non-ISO mode is announced by the CAN controller driver via
-  netlink and displayed by the 'ip' tool (controller option FD-NON-ISO).
-  The ISO/non-ISO-mode can be altered by setting 'fd-non-iso {on|off}' for
-  switchable CAN FD controllers only.
-
-  Example configuring 500 kbit/s arbitration bitrate and 4 Mbit/s data bitrate:
-
-    $ ip link set can0 up type can bitrate 500000 sample-point 0.75 \
-                                   dbitrate 4000000 dsample-point 0.8 fd on
-    $ ip -details link show can0
-    5: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UNKNOWN \
-             mode DEFAULT group default qlen 10
-    link/can  promiscuity 0
-    can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
-          bitrate 500000 sample-point 0.750
-          tq 50 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 1
-          pcan_usb_pro_fd: tseg1 1..64 tseg2 1..16 sjw 1..16 brp 1..1024 \
-          brp-inc 1
-          dbitrate 4000000 dsample-point 0.800
-          dtq 12 dprop-seg 7 dphase-seg1 8 dphase-seg2 4 dsjw 1
-          pcan_usb_pro_fd: dtseg1 1..16 dtseg2 1..8 dsjw 1..4 dbrp 1..1024 \
-          dbrp-inc 1
-          clock 80000000
-
-  Example when 'fd-non-iso on' is added on this switchable CAN FD adapter:
-   can <FD,FD-NON-ISO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
-
-  6.7 Supported CAN hardware
-
-  Please check the "Kconfig" file in "drivers/net/can" to get an actual
-  list of the support CAN hardware. On the SocketCAN project website
-  (see chapter 7) there might be further drivers available, also for
-  older kernel versions.
-
-7. SocketCAN resources
------------------------
-
-  The Linux CAN / SocketCAN project resources (project site / mailing list)
-  are referenced in the MAINTAINERS file in the Linux source tree.
-  Search for CAN NETWORK [LAYERS|DRIVERS].
-
-8. Credits
-----------
-
-  Oliver Hartkopp (PF_CAN core, filters, drivers, bcm, SJA1000 driver)
-  Urs Thuermann (PF_CAN core, kernel integration, socket interfaces, raw, vcan)
-  Jan Kizka (RT-SocketCAN core, Socket-API reconciliation)
-  Wolfgang Grandegger (RT-SocketCAN core & drivers, Raw Socket-API reviews,
-                       CAN device driver interface, MSCAN driver)
-  Robert Schwebel (design reviews, PTXdist integration)
-  Marc Kleine-Budde (design reviews, Kernel 2.6 cleanups, drivers)
-  Benedikt Spranger (reviews)
-  Thomas Gleixner (LKML reviews, coding style, posting hints)
-  Andrey Volkov (kernel subtree structure, ioctls, MSCAN driver)
-  Matthias Brukner (first SJA1000 CAN netdevice implementation Q2/2003)
-  Klaus Hitschler (PEAK driver integration)
-  Uwe Koppe (CAN netdevices with PF_PACKET approach)
-  Michael Schulze (driver layer loopback requirement, RT CAN drivers review)
-  Pavel Pisa (Bit-timing calculation)
-  Sascha Hauer (SJA1000 platform driver)
-  Sebastian Haas (SJA1000 EMS PCI driver)
-  Markus Plessing (SJA1000 EMS PCI driver)
-  Per Dalen (SJA1000 Kvaser PCI driver)
-  Sam Ravnborg (reviews, coding style, kbuild help)
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index b8b40753133e..25170ad7d25b 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -385,11 +385,6 @@ Switch configuration
   avoid relying on what a previous software agent such as a bootloader/firmware
   may have previously configured.
 
-- set_addr: Some switches require the programming of the management interface's
-  Ethernet MAC address, switch drivers can also disable ageing of MAC addresses
-  on the management interface and "hardcode"/"force" this MAC address for the
-  CPU/management interface as an optimization
-
 PHY devices and link management
 -------------------------------
 
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 87814859cfc2..a4508ec1816b 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -1134,7 +1134,7 @@ The verifier's knowledge about the variable offset consists of:
 mask and value; no bit should ever be 1 in both.  For example, if a byte is read
 into a register from memory, the register's top 56 bits are known zero, while
 the low 8 are unknown - which is represented as the tnum (0x0; 0xff).  If we
-then OR this with 0x40, we get (0x40; 0xcf), then if we add 1 we get (0x0;
+then OR this with 0x40, we get (0x40; 0xbf), then if we add 1 we get (0x0;
 0x1ff), because of potential carries.
 Besides arithmetic, the register state can also be updated by conditional
 branches.  For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 057e9fdbfac9..e74d8e1da0e2 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -97,6 +97,46 @@ The include/net/mac802154.h defines following functions:
  - void ieee802154_unregister_hw(struct ieee802154_hw *hw):
    freeing registered PHY
 
+ - void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
+                              u8 lqi):
+   telling 802.15.4 module there is a new received frame in the skb with
+   the RF Link Quality Indicator (LQI) from the hardware device
+
+ - void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+                                 bool ifs_handling):
+   telling 802.15.4 module the frame in the skb is or going to be
+   transmitted through the hardware device
+
+The device driver must implement the following callbacks in the IEEE 802.15.4
+operations structure at least:
+struct ieee802154_ops {
+	...
+	int	(*start)(struct ieee802154_hw *hw);
+	void	(*stop)(struct ieee802154_hw *hw);
+	...
+	int	(*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb);
+	int	(*ed)(struct ieee802154_hw *hw, u8 *level);
+	int	(*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel);
+	...
+};
+
+ - int start(struct ieee802154_hw *hw):
+   handler that 802.15.4 module calls for the hardware device initialization.
+
+ - void stop(struct ieee802154_hw *hw):
+   handler that 802.15.4 module calls for the hardware device cleanup.
+
+ - int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb):
+   handler that 802.15.4 module calls for each frame in the skb going to be
+   transmitted through the hardware device.
+
+ - int ed(struct ieee802154_hw *hw, u8 *level):
+   handler that 802.15.4 module calls for Energy Detection from the hardware
+   device.
+
+ - int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel):
+   set radio for listening on specific channel of the hardware device.
+
 Moreover IEEE 802.15.4 device operations structure should be filled.
 
 Fake drivers
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 7d4b15977d61..90966c2692d8 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -7,6 +7,7 @@ Contents:
    :maxdepth: 2
 
    batman-adv
+   can
    kapi
    z8530book
    msg_zerocopy
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 46c7e1085efc..3f2c40d8e6aa 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -606,6 +606,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
 	This time period will grow exponentially when more blackhole issues
 	get detected right after Fastopen is re-enabled and will reset to
 	initial value when the blackhole issue goes away.
+	0 to disable the blackhole detection.
 	By default, it is set to 1hr.
 
 tcp_syn_retries - INTEGER
diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst
index 580289f345da..f03ae64be8bc 100644
--- a/Documentation/networking/kapi.rst
+++ b/Documentation/networking/kapi.rst
@@ -145,3 +145,27 @@ PHY Support
 
 .. kernel-doc:: drivers/net/phy/mdio_bus.c
    :internal:
+
+PHYLINK
+-------
+
+  PHYLINK interfaces traditional network drivers with PHYLIB, fixed-links,
+  and SFF modules (eg, hot-pluggable SFP) that may contain PHYs.  PHYLINK
+  provides management of the link state and link modes.
+
+.. kernel-doc:: include/linux/phylink.h
+   :internal:
+
+.. kernel-doc:: drivers/net/phy/phylink.c
+
+SFP support
+-----------
+
+.. kernel-doc:: drivers/net/phy/sfp-bus.c
+   :internal:
+
+.. kernel-doc:: include/linux/sfp.h
+   :internal:
+
+.. kernel-doc:: drivers/net/phy/sfp-bus.c
+   :export:
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt
index 7413eb05223b..c77f9d57eb91 100644
--- a/Documentation/networking/netdev-features.txt
+++ b/Documentation/networking/netdev-features.txt
@@ -163,3 +163,12 @@ This requests that the NIC receive all possible frames, including errored
 frames (such as bad FCS, etc).  This can be helpful when sniffing a link with
 bad packets on it.  Some NICs may receive more packets if also put into normal
 PROMISC mode.
+
+*  rx-gro-hw
+
+This requests that the NIC enables Hardware GRO (generic receive offload).
+Hardware GRO is basically the exact reverse of TSO, and is generally
+stricter than Hardware LRO.  A packet stream merged by Hardware GRO must
+be re-segmentable by GSO or TSO back to the exact original packet stream.
+Hardware GRO is dependent on RXCSUM since every packet successfully merged
+by hardware must also have the checksum verified by hardware.
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 2c4e3354e128..d2fd78f85aa4 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -12,8 +12,8 @@ suitable sample script and configure that.
 On a dual CPU:
 
 ps aux | grep pkt
-root       129  0.3  0.0     0    0 ?        SW    2003 523:20 [pktgen/0]
-root       130  0.3  0.0     0    0 ?        SW    2003 509:50 [pktgen/1]
+root       129  0.3  0.0     0    0 ?        SW    2003 523:20 [kpktgend_0]
+root       130  0.3  0.0     0    0 ?        SW    2003 509:50 [kpktgend_1]
 
 
 For monitoring and control pktgen creates:
@@ -113,9 +113,16 @@ Configuring devices
 ===================
 This is done via the /proc interface, and most easily done via pgset
 as defined in the sample scripts.
+You need to specify PGDEV environment variable to use functions from sample
+scripts, i.e.:
+export PGDEV=/proc/net/pktgen/eth4@0
+source samples/pktgen/functions.sh
 
 Examples:
 
+ pg_ctrl start           starts injection.
+ pg_ctrl stop            aborts injection. Also, ^C aborts generator.
+
  pgset "clone_skb 1"     sets the number of copies of the same packet
  pgset "clone_skb 0"     use single SKB for all transmits
  pgset "burst 8"         uses xmit_more API to queue 8 copies of the same
@@ -165,8 +172,12 @@ Examples:
                               IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
                               NODE_ALLOC # node specific memory allocation
                               NO_TIMESTAMP # disable timestamping
+ pgset 'flag ![name]'    Clear a flag to determine behaviour.
+                         Note that you might need to use single quote in
+                         interactive mode, so that your shell wouldn't expand
+                         the specified flag as a history command.
 
- pgset spi SPI_VALUE     Set specific SA used to transform packet.
+ pgset "spi [SPI_VALUE]" Set specific SA used to transform packet.
 
  pgset "udp_src_min 9"   set UDP source port min, If < udp_src_max, then
                          cycle through the port range.
@@ -207,8 +218,6 @@ Examples:
  pgset "tos XX"           set former IPv4 TOS field (e.g. "tos 28" for AF11 no ECN, default 00)
  pgset "traffic_class XX" set former IPv6 TRAFFIC CLASS (e.g. "traffic_class B8" for EF no ECN, default 00)
 
- pgset stop    	          aborts injection. Also, ^C aborts generator.
-
  pgset "rate 300M"        set rate to 300 Mb/s
  pgset "ratep 1000000"    set rate to 1Mpps
 
diff --git a/Documentation/networking/xfrm_device.txt b/Documentation/networking/xfrm_device.txt
new file mode 100644
index 000000000000..50c34ca65efe
--- /dev/null
+++ b/Documentation/networking/xfrm_device.txt
@@ -0,0 +1,135 @@
+
+===============================================
+XFRM device - offloading the IPsec computations
+===============================================
+Shannon Nelson <shannon.nelson@oracle.com>
+
+
+Overview
+========
+
+IPsec is a useful feature for securing network traffic, but the
+computational cost is high: a 10Gbps link can easily be brought down
+to under 1Gbps, depending on the traffic and link configuration.
+Luckily, there are NICs that offer a hardware based IPsec offload which
+can radically increase throughput and decrease CPU utilization.  The XFRM
+Device interface allows NIC drivers to offer to the stack access to the
+hardware offload.
+
+Userland access to the offload is typically through a system such as
+libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can
+be handy when experimenting.  An example command might look something
+like this:
+
+  ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
+     reqid 0x07 replay-window 32 \
+     aead 'rfc4106(gcm(aes))' 0x44434241343332312423222114131211f4f3f2f1 128 \
+     sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
+     offload dev eth4 dir in
+
+Yes, that's ugly, but that's what shell scripts and/or libreswan are for.
+
+
+
+Callbacks to implement
+======================
+
+/* from include/linux/netdevice.h */
+struct xfrmdev_ops {
+	int	(*xdo_dev_state_add) (struct xfrm_state *x);
+	void	(*xdo_dev_state_delete) (struct xfrm_state *x);
+	void	(*xdo_dev_state_free) (struct xfrm_state *x);
+	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
+				       struct xfrm_state *x);
+	void    (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+};
+
+The NIC driver offering ipsec offload will need to implement these
+callbacks to make the offload available to the network stack's
+XFRM subsytem.  Additionally, the feature bits NETIF_F_HW_ESP and
+NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
+
+
+
+Flow
+====
+
+At probe time and before the call to register_netdev(), the driver should
+set up local data structures and XFRM callbacks, and set the feature bits.
+The XFRM code's listener will finish the setup on NETDEV_REGISTER.
+
+		adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+		adapter->netdev->features |= NETIF_F_HW_ESP;
+		adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+When new SAs are set up with a request for "offload" feature, the
+driver's xdo_dev_state_add() will be given the new SA to be offloaded
+and an indication of whether it is for Rx or Tx.  The driver should
+	- verify the algorithm is supported for offloads
+	- store the SA information (key, salt, target-ip, protocol, etc)
+	- enable the HW offload of the SA
+
+The driver can also set an offload_handle in the SA, an opaque void pointer
+that can be used to convey context into the fast-path offload requests.
+
+		xs->xso.offload_handle = context;
+
+
+When the network stack is preparing an IPsec packet for an SA that has
+been setup for offload, it first calls into xdo_dev_offload_ok() with
+the skb and the intended offload state to ask the driver if the offload
+will serviceable.  This can check the packet information to be sure the
+offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and
+return true of false to signify its support.
+
+When ready to send, the driver needs to inspect the Tx packet for the
+offload information, including the opaque context, and set up the packet
+send accordingly.
+
+		xs = xfrm_input_state(skb);
+		context = xs->xso.offload_handle;
+		set up HW for send
+
+The stack has already inserted the appropriate IPsec headers in the
+packet data, the offload just needs to do the encryption and fix up the
+header values.
+
+
+When a packet is received and the HW has indicated that it offloaded a
+decryption, the driver needs to add a reference to the decoded SA into
+the packet's skb.  At this point the data should be decrypted but the
+IPsec headers are still in the packet data; they are removed later up
+the stack in xfrm_input().
+
+	find and hold the SA that was used to the Rx skb
+		get spi, protocol, and destination IP from packet headers
+		xs = find xs from (spi, protocol, dest_IP)
+		xfrm_state_hold(xs);
+
+	store the state information into the skb
+		skb->sp = secpath_dup(skb->sp);
+		skb->sp->xvec[skb->sp->len++] = xs;
+		skb->sp->olen++;
+
+	indicate the success and/or error status of the offload
+		xo = xfrm_offload(skb);
+		xo->flags = CRYPTO_DONE;
+		xo->status = crypto_status;
+
+	hand the packet to napi_gro_receive() as usual
+
+In ESN mode, xdo_dev_state_advance_esn() is called from xfrm_replay_advance_esn().
+Driver will check packet seq number and update HW ESN state machine if needed.
+
+When the SA is removed by the user, the driver's xdo_dev_state_delete()
+is asked to disable the offload.  Later, xdo_dev_state_free() is called
+from a garbage collection routine after all reference counts to the state
+have been removed and any remaining resources can be cleared for the
+offload state.  How these are used by the driver will depend on specific
+hardware needs.
+
+As a netdev is set to DOWN the XFRM stack's netdev listener will call
+xdo_dev_state_delete() and xdo_dev_state_free() on any remaining offloaded
+states.
+
+
diff --git a/Documentation/networking/xfrm_proc.txt b/Documentation/networking/xfrm_proc.txt
index d0d8bafa9016..2eae619ab67b 100644
--- a/Documentation/networking/xfrm_proc.txt
+++ b/Documentation/networking/xfrm_proc.txt
@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>
 
 Transformation Statistics
 -------------------------
-xfrm_proc is a statistics shown factor dropped by transformation
-for developer.
-It is a counter designed from current transformation source code
-and defined like linux private MIB.
 
-Inbound statistics
-~~~~~~~~~~~~~~~~~~
+The xfrm_proc code is a set of statistics showing numbers of packets
+dropped by the transformation code and why.  These counters are defined
+as part of the linux private MIB.  These counters can be viewed in
+/proc/net/xfrm_stat.
+
+
+Inbound errors
+~~~~~~~~~~~~~~
 XfrmInError:
 	All errors which is not matched others
 XfrmInBufferError:
@@ -46,6 +48,10 @@ XfrmInPolBlock:
 	Policy discards
 XfrmInPolError:
 	Policy error
+XfrmAcquireError:
+	State hasn't been fully acquired before use
+XfrmFwdHdrError:
+	Forward routing of a packet is not allowed
 
 Outbound errors
 ~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@ XfrmOutPolDead:
 	Policy is dead
 XfrmOutPolError:
 	Policy error
+XfrmOutStateInvalid:
+	State is invalid, perhaps expired
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index b67044a2575f..35c62f522754 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -95,7 +95,9 @@ dev_weight
 --------------
 
 The maximum number of packets that kernel can handle on a NAPI interrupt,
-it's a Per-CPU variable.
+it's a Per-CPU variable. For drivers that support LRO or GRO_HW, a hardware
+aggregated packet is counted as one packet in this context.
+
 Default: 64
 
 dev_weight_rx_bias
diff --git a/MAINTAINERS b/MAINTAINERS
index e6aa3922a32b..e262e234d76d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1348,8 +1348,10 @@ T:	git git://github.com/ulli-kroll/linux.git
 S:	Maintained
 F:	Documentation/devicetree/bindings/arm/gemini.txt
 F:	Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
+F:	Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
 F:	Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
 F:	arch/arm/mach-gemini/
+F:	drivers/net/ethernet/cortina/gemini/*
 F:	drivers/pinctrl/pinctrl-gemini.c
 F:	drivers/rtc/rtc-ftrtc010.c
 
@@ -2579,6 +2581,7 @@ S:	Maintained
 F:	Documentation/ABI/testing/sysfs-class-net-batman-adv
 F:	Documentation/ABI/testing/sysfs-class-net-mesh
 F:	Documentation/networking/batman-adv.rst
+F:	include/uapi/linux/batadv_packet.h
 F:	include/uapi/linux/batman_adv.h
 F:	net/batman-adv/
 
@@ -2702,7 +2705,6 @@ F:	drivers/mtd/devices/block2mtd.c
 
 BLUETOOTH DRIVERS
 M:	Marcel Holtmann <marcel@holtmann.org>
-M:	Gustavo Padovan <gustavo@padovan.org>
 M:	Johan Hedberg <johan.hedberg@gmail.com>
 L:	linux-bluetooth@vger.kernel.org
 W:	http://www.bluez.org/
@@ -2713,7 +2715,6 @@ F:	drivers/bluetooth/
 
 BLUETOOTH SUBSYSTEM
 M:	Marcel Holtmann <marcel@holtmann.org>
-M:	Gustavo Padovan <gustavo@padovan.org>
 M:	Johan Hedberg <johan.hedberg@gmail.com>
 L:	linux-bluetooth@vger.kernel.org
 W:	http://www.bluez.org/
@@ -2738,12 +2739,16 @@ M:	Alexei Starovoitov <ast@kernel.org>
 M:	Daniel Borkmann <daniel@iogearbox.net>
 L:	netdev@vger.kernel.org
 L:	linux-kernel@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
 S:	Supported
 F:	arch/x86/net/bpf_jit*
 F:	Documentation/networking/filter.txt
 F:	Documentation/bpf/
 F:	include/linux/bpf*
 F:	include/linux/filter.h
+F:	include/trace/events/bpf.h
+F:	include/trace/events/xdp.h
 F:	include/uapi/linux/bpf*
 F:	include/uapi/linux/filter.h
 F:	kernel/bpf/
@@ -3208,7 +3213,7 @@ W:	https://github.com/linux-can
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:	Maintained
-F:	Documentation/networking/can.txt
+F:	Documentation/networking/can.rst
 F:	net/can/
 F:	include/linux/can/core.h
 F:	include/uapi/linux/can.h
@@ -4956,6 +4961,11 @@ S:	Maintained
 F:	lib/dynamic_debug.c
 F:	include/linux/dynamic_debug.h
 
+DYNAMIC INTERRUPT MODERATION
+M:	Tal Gilboa <talgi@mellanox.com>
+S:	Maintained
+F:	include/linux/net_dim.h
+
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
@@ -8743,6 +8753,13 @@ L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/net/ethernet/mediatek/
 
+MEDIATEK SWITCH DRIVER
+M:	Sean Wang <sean.wang@mediatek.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/dsa/mt7530.*
+F:	net/dsa/tag_mtk.c
+
 MEDIATEK JPEG DRIVER
 M:	Rick Chang <rick.chang@mediatek.com>
 M:	Bin Liu <bin.liu@mediatek.com>
@@ -9628,6 +9645,11 @@ NETWORKING [WIRELESS]
 L:	linux-wireless@vger.kernel.org
 Q:	http://patchwork.kernel.org/project/linux-wireless/list/
 
+NETDEVSIM
+M:	Jakub Kicinski <jakub.kicinski@netronome.com>
+S:	Maintained
+F:	drivers/net/netdevsim/*
+
 NETXEN (1/10) GbE SUPPORT
 M:	Manish Chopra <manish.chopra@cavium.com>
 M:	Rahul Verma <rahul.verma@cavium.com>
@@ -11792,15 +11814,13 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
 S:	Maintained
 F:	drivers/net/wireless/realtek/rtl818x/rtl8187/
 
-RTL8192CE WIRELESS DRIVER
-M:	Larry Finger <Larry.Finger@lwfinger.net>
-M:	Chaoming Li <chaoming_li@realsil.com.cn>
+REALTEK WIRELESS DRIVER (rtlwifi family)
+M:	Ping-Ke Shih <pkshih@realtek.com>
 L:	linux-wireless@vger.kernel.org
 W:	http://wireless.kernel.org/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
 S:	Maintained
 F:	drivers/net/wireless/realtek/rtlwifi/
-F:	drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
 
 RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
 M:	Jes Sorensen <Jes.Sorensen@gmail.com>
@@ -12684,6 +12704,13 @@ F:	drivers/md/raid*
 F:	include/linux/raid/
 F:	include/uapi/linux/raid/
 
+SOCIONEXT (SNI) NETSEC NETWORK DRIVER
+M:	Jassi Brar <jaswinder.singh@linaro.org>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/socionext/netsec.c
+F:	Documentation/devicetree/bindings/net/socionext-netsec.txt
+
 SONIC NETWORK DRIVER
 M:	Thomas Bogendoerfer <tsbogend@alpha.franken.de>
 L:	netdev@vger.kernel.org
@@ -14822,9 +14849,9 @@ S:	Maintained
 F:	drivers/hid/hid-wiimote*
 
 WILOCITY WIL6210 WIRELESS DRIVER
-M:	Maya Erez <qca_merez@qca.qualcomm.com>
+M:	Maya Erez <merez@codeaurora.org>
 L:	linux-wireless@vger.kernel.org
-L:	wil6210@qca.qualcomm.com
+L:	wil6210@qti.qualcomm.com
 S:	Supported
 W:	http://wireless.kernel.org/en/users/Drivers/wil6210
 F:	drivers/net/wireless/ath/wil6210/
diff --git a/arch/Kconfig b/arch/Kconfig
index 1b560e7f06dc..d007b2a15b22 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -196,6 +196,9 @@ config HAVE_OPTPROBES
 config HAVE_KPROBES_ON_FTRACE
 	bool
 
+config HAVE_FUNCTION_ERROR_INJECTION
+	bool
+
 config HAVE_NMI
 	bool
 
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 09ce8b81fafa..fcaff1c66bcb 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -122,7 +122,7 @@
 			};
 
 			can1: can@43f88000 {
-				compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx25-flexcan";
 				reg = <0x43f88000 0x4000>;
 				interrupts = <43>;
 				clocks = <&clks 75>, <&clks 75>;
@@ -131,7 +131,7 @@
 			};
 
 			can2: can@43f8c000 {
-				compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx25-flexcan";
 				reg = <0x43f8c000 0x4000>;
 				interrupts = <44>;
 				clocks = <&clks 76>, <&clks 76>;
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 2f4ebe0318d3..e52e05c0fe56 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -1038,7 +1038,7 @@
 			};
 
 			can0: can@80032000 {
-				compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx28-flexcan";
 				reg = <0x80032000 0x2000>;
 				interrupts = <8>;
 				clocks = <&clks 58>, <&clks 58>;
@@ -1047,7 +1047,7 @@
 			};
 
 			can1: can@80034000 {
-				compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx28-flexcan";
 				reg = <0x80034000 0x2000>;
 				interrupts = <9>;
 				clocks = <&clks 59>, <&clks 59>;
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 6d5e6a60bee7..1f0e2203b576 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -303,7 +303,7 @@
 			};
 
 			can1: can@53fe4000 {
-				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx35-flexcan";
 				reg = <0x53fe4000 0x1000>;
 				clocks = <&clks 33>, <&clks 33>;
 				clock-names = "ipg", "per";
@@ -312,7 +312,7 @@
 			};
 
 			can2: can@53fe8000 {
-				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx35-flexcan";
 				reg = <0x53fe8000 0x1000>;
 				clocks = <&clks 34>, <&clks 34>;
 				clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 84f17f7abb71..85071ff8c639 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -536,7 +536,7 @@
 			};
 
 			can1: can@53fc8000 {
-				compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx53-flexcan";
 				reg = <0x53fc8000 0x4000>;
 				interrupts = <82>;
 				clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
@@ -546,7 +546,7 @@
 			};
 
 			can2: can@53fcc000 {
-				compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx53-flexcan";
 				reg = <0x53fcc000 0x4000>;
 				interrupts = <83>;
 				clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index 404a93d9596b..3ec58500e9c2 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -112,3 +112,55 @@
                 line-name = "PCA9539-P07";
         };
 };
+
+&pci_root {
+	/* Intel Corporation I210 Gigabit Network Connection */
+	switch_nic: ethernet@3,0 {
+		compatible = "pci8086,1533";
+		reg = <0x00010000 0 0 0 0>;
+	};
+};
+
+&switch_ports {
+	port@0 {
+		reg = <0>;
+		label = "enacq";
+		phy-handle = <&switchphy0>;
+	};
+
+	port@1 {
+		reg = <1>;
+		label = "eneport1";
+		phy-handle = <&switchphy1>;
+	};
+
+	port@2 {
+		reg = <2>;
+		label = "enix";
+		phy-handle = <&switchphy2>;
+	};
+
+	port@3 {
+		reg = <3>;
+		label = "enid";
+		phy-handle = <&switchphy3>;
+	};
+
+	port@4 {
+		reg = <4>;
+		label = "cpu";
+		ethernet = <&switch_nic>;
+		phy-handle = <&switchphy4>;
+	};
+
+	port@5 {
+		reg = <5>;
+		label = "enembc";
+
+		/* connected to Ethernet MAC of AT91RM9200 in MII mode */
+		fixed-link {
+			speed = <100>;
+			full-duplex;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 7f9f176901d4..5650a9b11091 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -111,3 +111,55 @@
 	fsl,tx-cal-45-dp-ohms = <55>;
 	fsl,tx-d-cal = <100>;
 };
+
+&pci_root {
+	/* Intel Corporation I210 Gigabit Network Connection */
+	switch_nic: ethernet@3,0 {
+		compatible = "pci8086,1533";
+		reg = <0x00010000 0 0 0 0>;
+	};
+};
+
+&switch_ports {
+	port@0 {
+		reg = <0>;
+		label = "enacq";
+		phy-handle = <&switchphy0>;
+	};
+
+	port@1 {
+		reg = <1>;
+		label = "eneport1";
+		phy-handle = <&switchphy1>;
+	};
+
+	port@2 {
+		reg = <2>;
+		label = "enix";
+		phy-handle = <&switchphy2>;
+	};
+
+	port@3 {
+		reg = <3>;
+		label = "enid";
+		phy-handle = <&switchphy3>;
+	};
+
+	port@4 {
+		reg = <4>;
+		label = "cpu";
+		ethernet = <&switch_nic>;
+		phy-handle = <&switchphy4>;
+	};
+
+	port@5 {
+		reg = <5>;
+		label = "enembc";
+
+		/* connected to Ethernet MAC of AT91RM9200 in MII mode */
+		fixed-link {
+			speed = <100>;
+			full-duplex;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
index 46bdc6722715..35edbdc7bcd1 100644
--- a/arch/arm/boot/dts/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/imx6q-b850v3.dts
@@ -212,3 +212,78 @@
 		};
 	};
 };
+
+&pci_root {
+	/* PLX Technology, Inc. PEX 8605 PCI Express 4-port Gen2 Switch */
+	bridge@1,0 {
+		compatible = "pci10b5,8605";
+		reg = <0x00010000 0 0 0 0>;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+
+		bridge@2,1 {
+			compatible = "pci10b5,8605";
+			reg = <0x00020800 0 0 0 0>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+
+			/* Intel Corporation I210 Gigabit Network Connection */
+			ethernet@3,0 {
+				compatible = "pci8086,1533";
+				reg = <0x00030000 0 0 0 0>;
+			};
+		};
+
+		bridge@2,2 {
+			compatible = "pci10b5,8605";
+			reg = <0x00021000 0 0 0 0>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+
+			/* Intel Corporation I210 Gigabit Network Connection */
+			switch_nic: ethernet@4,0 {
+				compatible = "pci8086,1533";
+				reg = <0x00040000 0 0 0 0>;
+			};
+		};
+	};
+};
+
+&switch_ports {
+	port@0 {
+		reg = <0>;
+		label = "eneport1";
+		phy-handle = <&switchphy0>;
+	};
+
+	port@1 {
+		reg = <1>;
+		label = "eneport2";
+		phy-handle = <&switchphy1>;
+	};
+
+	port@2 {
+		reg = <2>;
+		label = "enix";
+		phy-handle = <&switchphy2>;
+	};
+
+	port@3 {
+		reg = <3>;
+		label = "enid";
+		phy-handle = <&switchphy3>;
+	};
+
+	port@4 {
+		reg = <4>;
+		label = "cpu";
+		ethernet = <&switch_nic>;
+		phy-handle = <&switchphy4>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index b915837bbb5f..916ea94d75ca 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -92,6 +92,56 @@
 		mux-int-port = <1>;
 		mux-ext-port = <4>;
 	};
+
+	aliases {
+		mdio-gpio0 = &mdio0;
+	};
+
+	mdio0: mdio-gpio {
+		compatible = "virtual,mdio-gpio";
+		gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>, /* mdc */
+			<&gpio2 7 GPIO_ACTIVE_HIGH>; /* mdio */
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		switch@0 {
+			compatible = "marvell,mv88e6085"; /* 88e6240*/
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+
+			switch_ports: ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+			};
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				switchphy0: switchphy@0 {
+					reg = <0>;
+				};
+
+				switchphy1: switchphy@1 {
+					reg = <1>;
+				};
+
+				switchphy2: switchphy@2 {
+					reg = <2>;
+				};
+
+				switchphy3: switchphy@3 {
+					reg = <3>;
+				};
+
+				switchphy4: switchphy@4 {
+					reg = <4>;
+				};
+			};
+		};
+	};
 };
 
 &ecspi5 {
@@ -326,3 +376,15 @@
 		tcxo-clock-frequency = <26000000>;
 	};
 };
+
+&pcie {
+	/* Synopsys, Inc. Device */
+	pci_root: root@0,0 {
+		compatible = "pci16c3,abcd";
+		reg = <0x00000000 0 0 0 0>;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 67b4de0e3439..7bb402d3e9d0 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -331,3 +331,19 @@
 &uart1 {
 	status = "okay";
 };
+
+&can0 {
+	status = "okay";
+};
+
+&can1 {
+	status = "okay";
+};
+
+&can2 {
+	status = "disabled";
+};
+
+&can3 {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 44715c8ef756..860b898141f0 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -243,3 +243,19 @@
 &uart1 {
 	status = "okay";
 };
+
+&can0 {
+	status = "okay";
+};
+
+&can1 {
+	status = "okay";
+};
+
+&can2 {
+	status = "disabled";
+};
+
+&can3 {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 9319e1f0f1d8..7789031898b0 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -730,5 +730,41 @@
 					<0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
 					<0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
 		};
+
+		can0: can@2a70000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2a70000 0x0 0x1000>;
+			interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
+
+		can1: can@2a80000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2a80000 0x0 0x1000>;
+			interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
+
+		can2: can@2a90000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2a90000 0x0 0x1000>;
+			interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
+
+		can3: can@2aa0000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2aa0000 0x0 0x1000>;
+			interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
 	};
 };
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 323a4df59a6c..b5030e1a41d8 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -25,8 +25,6 @@
 
 #include "bpf_jit_32.h"
 
-int bpf_jit_enable __read_mostly;
-
 /*
  * eBPF prog stack layout:
  *
@@ -365,15 +363,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
 {
 	const u8 *tmp = bpf2a32[TMP_REG_1];
-	s32 jmp_offset;
 
-	/* checks if divisor is zero or not. If it is, then
-	 * exit directly.
-	 */
-	emit(ARM_CMP_I(rn, 0), ctx);
-	_emit(ARM_COND_EQ, ARM_MOV_I(ARM_R0, 0), ctx);
-	jmp_offset = epilogue_offset(ctx);
-	_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
 #if __LINUX_ARM_ARCH__ == 7
 	if (elf_hwcap & HWCAP_IDIVA) {
 		if (op == BPF_DIV)
@@ -1821,7 +1811,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	/* If BPF JIT was not enabled then we must fall back to
 	 * the interpreter.
 	 */
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	/* If constant blinding was enabled and we failed during blinding
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index bb32f7f6dd0f..1d4f1da7c58f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -31,8 +31,6 @@
 
 #include "bpf_jit.h"
 
-int bpf_jit_enable __read_mostly;
-
 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
@@ -99,6 +97,20 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
 	}
 }
 
+static inline void emit_addr_mov_i64(const int reg, const u64 val,
+				     struct jit_ctx *ctx)
+{
+	u64 tmp = val;
+	int shift = 0;
+
+	emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
+	for (;shift < 48;) {
+		tmp >>= 16;
+		shift += 16;
+		emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
+	}
+}
+
 static inline void emit_a64_mov_i(const int is64, const int reg,
 				  const s32 val, struct jit_ctx *ctx)
 {
@@ -378,18 +390,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 	case BPF_ALU64 | BPF_DIV | BPF_X:
 	case BPF_ALU | BPF_MOD | BPF_X:
 	case BPF_ALU64 | BPF_MOD | BPF_X:
-	{
-		const u8 r0 = bpf2a64[BPF_REG_0];
-
-		/* if (src == 0) return 0 */
-		jmp_offset = 3; /* skip ahead to else path */
-		check_imm19(jmp_offset);
-		emit(A64_CBNZ(is64, src, jmp_offset), ctx);
-		emit(A64_MOVZ(1, r0, 0, 0), ctx);
-		jmp_offset = epilogue_offset(ctx);
-		check_imm26(jmp_offset);
-		emit(A64_B(jmp_offset), ctx);
-		/* else */
 		switch (BPF_OP(code)) {
 		case BPF_DIV:
 			emit(A64_UDIV(is64, dst, dst, src), ctx);
@@ -401,7 +401,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 			break;
 		}
 		break;
-	}
 	case BPF_ALU | BPF_LSH | BPF_X:
 	case BPF_ALU64 | BPF_LSH | BPF_X:
 		emit(A64_LSLV(is64, dst, dst, src), ctx);
@@ -605,7 +604,10 @@ emit_cond_jmp:
 		const u8 r0 = bpf2a64[BPF_REG_0];
 		const u64 func = (u64)__bpf_call_base + imm;
 
-		emit_a64_mov_i64(tmp, func, ctx);
+		if (ctx->prog->is_func)
+			emit_addr_mov_i64(tmp, func, ctx);
+		else
+			emit_a64_mov_i64(tmp, func, ctx);
 		emit(A64_BLR(tmp), ctx);
 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
 		break;
@@ -837,16 +839,24 @@ static inline void bpf_flush_icache(void *start, void *end)
 	flush_icache_range((unsigned long)start, (unsigned long)end);
 }
 
+struct arm64_jit_data {
+	struct bpf_binary_header *header;
+	u8 *image;
+	struct jit_ctx ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_prog *tmp, *orig_prog = prog;
 	struct bpf_binary_header *header;
+	struct arm64_jit_data *jit_data;
 	bool tmp_blinded = false;
+	bool extra_pass = false;
 	struct jit_ctx ctx;
 	int image_size;
 	u8 *image_ptr;
 
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	tmp = bpf_jit_blind_constants(prog);
@@ -860,13 +870,30 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = tmp;
 	}
 
+	jit_data = prog->aux->jit_data;
+	if (!jit_data) {
+		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+		if (!jit_data) {
+			prog = orig_prog;
+			goto out;
+		}
+		prog->aux->jit_data = jit_data;
+	}
+	if (jit_data->ctx.offset) {
+		ctx = jit_data->ctx;
+		image_ptr = jit_data->image;
+		header = jit_data->header;
+		extra_pass = true;
+		image_size = sizeof(u32) * ctx.idx;
+		goto skip_init_ctx;
+	}
 	memset(&ctx, 0, sizeof(ctx));
 	ctx.prog = prog;
 
 	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
 	if (ctx.offset == NULL) {
 		prog = orig_prog;
-		goto out;
+		goto out_off;
 	}
 
 	/* 1. Initial fake pass to compute ctx->idx. */
@@ -897,6 +924,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	/* 2. Now, the actual pass. */
 
 	ctx.image = (__le32 *)image_ptr;
+skip_init_ctx:
 	ctx.idx = 0;
 
 	build_prologue(&ctx);
@@ -922,13 +950,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	bpf_flush_icache(header, ctx.image + ctx.idx);
 
-	bpf_jit_binary_lock_ro(header);
+	if (!prog->is_func || extra_pass) {
+		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
+			pr_err_once("multi-func JIT bug %d != %d\n",
+				    ctx.idx, jit_data->ctx.idx);
+			bpf_jit_binary_free(header);
+			prog->bpf_func = NULL;
+			prog->jited = 0;
+			goto out_off;
+		}
+		bpf_jit_binary_lock_ro(header);
+	} else {
+		jit_data->ctx = ctx;
+		jit_data->image = image_ptr;
+		jit_data->header = header;
+	}
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
 	prog->jited_len = image_size;
 
+	if (!prog->is_func || extra_pass) {
 out_off:
-	kfree(ctx.offset);
+		kfree(ctx.offset);
+		kfree(jit_data);
+		prog->aux->jit_data = NULL;
+	}
 out:
 	if (tmp_blinded)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index e8284771d620..07b4c65a88a4 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -265,6 +265,14 @@ int __init bcm63xx_enet_register(int unit,
 		dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
 	}
 
+	if (unit == 0) {
+		dpd->rx_chan = 0;
+		dpd->tx_chan = 1;
+	} else {
+		dpd->rx_chan = 2;
+		dpd->tx_chan = 3;
+	}
+
 	ret = platform_device_register(pdev);
 	if (ret)
 		return ret;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index c0bd47444cff..da39e4d326ba 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -55,6 +55,10 @@ struct bcm63xx_enet_platform_data {
 
 	/* DMA descriptor shift */
 	unsigned int dma_desc_shift;
+
+	/* dma channel ids */
+	int rx_chan;
+	int tx_chan;
 };
 
 /*
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 44b925005dd3..4d8cb9bb8365 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1207,8 +1207,6 @@ jmp_cmp:
 	return 0;
 }
 
-int bpf_jit_enable __read_mostly;
-
 void bpf_jit_compile(struct bpf_prog *fp)
 {
 	struct jit_ctx ctx;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 962b0259b4b6..3e2798bfea4f 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -177,8 +177,6 @@ static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
 		(ctx->idx * 4) - 4;
 }
 
-int bpf_jit_enable __read_mostly;
-
 enum which_ebpf_reg {
 	src_reg,
 	src_reg_no_fp,
@@ -743,16 +741,11 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 		break;
 	case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
 	case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+		if (insn->imm == 0)
+			return -EINVAL;
 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
 		if (dst < 0)
 			return dst;
-		if (insn->imm == 0) { /* Div by zero */
-			b_off = b_imm(exit_idx, ctx);
-			if (is_bad_offset(b_off))
-				return -E2BIG;
-			emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
-			emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
-		}
 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
 		if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
 			/* sign extend */
@@ -772,19 +765,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 		break;
 	case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
 	case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+		if (insn->imm == 0)
+			return -EINVAL;
 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
 		if (dst < 0)
 			return dst;
-		if (insn->imm == 0) { /* Div by zero */
-			b_off = b_imm(exit_idx, ctx);
-			if (is_bad_offset(b_off))
-				return -E2BIG;
-			emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
-			emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
-		}
 		if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
 			emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
-
 		if (insn->imm == 1) {
 			/* div by 1 is a nop, mod by 1 is zero */
 			if (bpf_op == BPF_MOD)
@@ -862,11 +849,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 			break;
 		case BPF_DIV:
 		case BPF_MOD:
-			b_off = b_imm(exit_idx, ctx);
-			if (is_bad_offset(b_off))
-				return -E2BIG;
-			emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
-			emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
 			emit_instr(ctx, ddivu, dst, src);
 			if (bpf_op == BPF_DIV)
 				emit_instr(ctx, mflo, dst);
@@ -945,11 +927,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 			break;
 		case BPF_DIV:
 		case BPF_MOD:
-			b_off = b_imm(exit_idx, ctx);
-			if (is_bad_offset(b_off))
-				return -E2BIG;
-			emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
-			emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
 			emit_instr(ctx, divu, dst, src);
 			if (bpf_op == BPF_DIV)
 				emit_instr(ctx, mflo, dst);
@@ -1869,7 +1846,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	unsigned int image_size;
 	u8 *image_ptr;
 
-	if (!bpf_jit_enable || !cpu_has_mips64r2)
+	if (!prog->jit_requested || !cpu_has_mips64r2)
 		return prog;
 
 	tmp = bpf_jit_blind_constants(prog);
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index af12ead88c5f..1b4aafc1f6a2 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -137,12 +137,14 @@
 		compatible = "fsl,p1010-flexcan";
 		reg = <0x1c000 0x1000>;
 		interrupts = <48 0x2 0 0>;
+		big-endian;
 	};
 
 	can1: can@1d000 {
 		compatible = "fsl,p1010-flexcan";
 		reg = <0x1d000 0x1000>;
 		interrupts = <61 0x2 0 0>;
+		big-endian;
 	};
 
 	L2: l2-cache-controller@20000 {
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index f9941b3b5770..872d1f6dd11e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -18,8 +18,6 @@
 
 #include "bpf_jit32.h"
 
-int bpf_jit_enable __read_mostly;
-
 static inline void bpf_flush_icache(void *start, void *end)
 {
 	smp_wmb();
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index d183b4801bdb..0a34b0cec7b7 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -21,8 +21,6 @@
 
 #include "bpf_jit64.h"
 
-int bpf_jit_enable __read_mostly;
-
 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 {
 	memset32(area, BREAKPOINT_INSTRUCTION, size/4);
@@ -383,10 +381,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 			goto bpf_alu32_trunc;
 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
-			PPC_CMPWI(src_reg, 0);
-			PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
-			PPC_LI(b2p[BPF_REG_0], 0);
-			PPC_JMP(exit_addr);
 			if (BPF_OP(code) == BPF_MOD) {
 				PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
 				PPC_MULW(b2p[TMP_REG_1], src_reg,
@@ -397,10 +391,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 			goto bpf_alu32_trunc;
 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
-			PPC_CMPDI(src_reg, 0);
-			PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
-			PPC_LI(b2p[BPF_REG_0], 0);
-			PPC_JMP(exit_addr);
 			if (BPF_OP(code) == BPF_MOD) {
 				PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
 				PPC_MULD(b2p[TMP_REG_1], src_reg,
@@ -995,7 +985,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 	struct bpf_prog *tmp_fp;
 	bool bpf_blinded = false;
 
-	if (!bpf_jit_enable)
+	if (!fp->jit_requested)
 		return org_fp;
 
 	tmp_fp = bpf_jit_blind_constants(org_fp);
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 6db78567294c..cdbaad50c7c7 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -229,13 +229,55 @@ struct diag204_x_phys_block {
 } __packed;
 
 enum diag26c_sc {
+	DIAG26C_PORT_VNIC    = 0x00000024,
 	DIAG26C_MAC_SERVICES = 0x00000030
 };
 
 enum diag26c_version {
-	DIAG26C_VERSION2 = 0x00000002	/* z/VM 5.4.0 */
+	DIAG26C_VERSION2	 = 0x00000002,	/* z/VM 5.4.0 */
+	DIAG26C_VERSION6_VM65918 = 0x00020006	/* z/VM 6.4.0 + VM65918 */
 };
 
+#define DIAG26C_VNIC_INFO	0x0002
+struct diag26c_vnic_req {
+	u32	resp_buf_len;
+	u32	resp_version;
+	u16	req_format;
+	u16	vlan_id;
+	u64	sys_name;
+	u8	res[2];
+	u16	devno;
+} __packed __aligned(8);
+
+#define VNIC_INFO_PROT_L3	1
+#define VNIC_INFO_PROT_L2	2
+/* Note: this is the bare minimum, use it for uninitialized VNICs only. */
+struct diag26c_vnic_resp {
+	u32	version;
+	u32	entry_cnt;
+	/* VNIC info: */
+	u32	next_entry;
+	u64	owner;
+	u16	devno;
+	u8	status;
+	u8	type;
+	u64	lan_owner;
+	u64	lan_name;
+	u64	port_name;
+	u8	port_type;
+	u8	ext_status:6;
+	u8	protocol:2;
+	u16	base_devno;
+	u32	port_num;
+	u32	ifindex;
+	u32	maxinfo;
+	u32	dev_count;
+	/* 3x device info: */
+	u8	dev_info1[28];
+	u8	dev_info2[28];
+	u8	dev_info3[28];
+} __packed __aligned(8);
+
 #define DIAG26C_GET_MAC	0x0000
 struct diag26c_mac_req {
 	u32	resp_buf_len;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9557d8b516df..78a19c93b380 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -28,8 +28,6 @@
 #include <asm/set_memory.h>
 #include "bpf_jit.h"
 
-int bpf_jit_enable __read_mostly;
-
 struct bpf_jit {
 	u32 seen;		/* Flags to remember seen eBPF instructions */
 	u32 seen_reg[16];	/* Array to remember which registers are used */
@@ -612,11 +610,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
 	{
 		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 
-		jit->seen |= SEEN_RET0;
-		/* ltr %src,%src (if src == 0 goto fail) */
-		EMIT2(0x1200, src_reg, src_reg);
-		/* jz <ret0> */
-		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
 		/* lhi %w0,0 */
 		EMIT4_IMM(0xa7080000, REG_W0, 0);
 		/* lr %w1,%dst */
@@ -632,11 +625,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
 	{
 		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 
-		jit->seen |= SEEN_RET0;
-		/* ltgr %src,%src (if src == 0 goto fail) */
-		EMIT4(0xb9020000, src_reg, src_reg);
-		/* jz <ret0> */
-		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
 		/* lghi %w0,0 */
 		EMIT4_IMM(0xa7090000, REG_W0, 0);
 		/* lgr %w1,%dst */
@@ -1299,7 +1287,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 	struct bpf_jit jit;
 	int pass;
 
-	if (!bpf_jit_enable)
+	if (!fp->jit_requested)
 		return orig_fp;
 
 	tmp = bpf_jit_blind_constants(fp);
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 7291e2f11a47..4d6be53058d6 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -79,7 +79,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh7763_eth_pdata = {
 	.phy = 0,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 0104c8199c48..1bde08dc067d 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -76,7 +76,6 @@ static struct resource sh_eth0_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth0_pdata = {
 	.phy = 1,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_set_mdio_gate,
 };
 
@@ -104,7 +103,6 @@ static struct resource sh_eth1_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth1_pdata = {
 	.phy = 1,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_set_mdio_gate,
 };
 
@@ -148,7 +146,6 @@ static struct resource sh_eth_giga0_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
 	.phy = 18,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
 	.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
 };
@@ -182,7 +179,6 @@ static struct resource sh_eth_giga1_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
 	.phy = 19,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
 	.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
 };
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 1faf6cb93dcb..6f929abe0b50 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh_eth_plat = {
 	.phy = 0x1f, /* SMSC LAN8700 */
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 	.ether_link_active_low = 1
 };
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index f1fecd395679..255952555656 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -374,7 +374,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh_eth_plat = {
 	.phy = 0x1f, /* SMSC LAN8187 */
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 2c8fb04685d4..6e62686b81b1 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -87,7 +87,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh7763_eth_pdata = {
 	.phy = 1,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 95796ad00fbe..d08db08dec38 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -122,7 +122,6 @@ static struct platform_device scif2_device = {
 
 static struct sh_eth_plat_data eth_platform_data = {
 	.phy		= 1,
-	.edmac_endian	= EDMAC_LITTLE_ENDIAN,
 	.phy_interface	= PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index 09e318eb34ee..3bd8ca95e521 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -11,8 +11,6 @@
 
 #include "bpf_jit_32.h"
 
-int bpf_jit_enable __read_mostly;
-
 static inline bool is_simm13(unsigned int value)
 {
 	return value + 0x1000 < 0x2000;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index ff5f9cb3039a..48a25869349b 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -12,8 +12,6 @@
 
 #include "bpf_jit_64.h"
 
-int bpf_jit_enable __read_mostly;
-
 static inline bool is_simm13(unsigned int value)
 {
 	return value + 0x1000 < 0x2000;
@@ -969,31 +967,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		emit_alu(MULX, src, dst, ctx);
 		break;
 	case BPF_ALU | BPF_DIV | BPF_X:
-		emit_cmp(src, G0, ctx);
-		emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
-		emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
 		emit_write_y(G0, ctx);
 		emit_alu(DIV, src, dst, ctx);
 		break;
-
 	case BPF_ALU64 | BPF_DIV | BPF_X:
-		emit_cmp(src, G0, ctx);
-		emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
-		emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
 		emit_alu(UDIVX, src, dst, ctx);
 		break;
-
 	case BPF_ALU | BPF_MOD | BPF_X: {
 		const u8 tmp = bpf2sparc[TMP_REG_1];
 
 		ctx->tmp_1_used = true;
 
-		emit_cmp(src, G0, ctx);
-		emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
-		emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
 		emit_write_y(G0, ctx);
 		emit_alu3(DIV, dst, src, tmp, ctx);
 		emit_alu3(MULX, tmp, src, tmp, ctx);
@@ -1005,10 +989,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 
 		ctx->tmp_1_used = true;
 
-		emit_cmp(src, G0, ctx);
-		emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
-		emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
 		emit_alu3(UDIVX, dst, src, tmp, ctx);
 		emit_alu3(MULX, tmp, src, tmp, ctx);
 		emit_alu3(SUB, dst, tmp, dst, ctx);
@@ -1509,17 +1489,25 @@ static void jit_fill_hole(void *area, unsigned int size)
 		*ptr++ = 0x91d02005; /* ta 5 */
 }
 
+struct sparc64_jit_data {
+	struct bpf_binary_header *header;
+	u8 *image;
+	struct jit_ctx ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_prog *tmp, *orig_prog = prog;
+	struct sparc64_jit_data *jit_data;
 	struct bpf_binary_header *header;
 	bool tmp_blinded = false;
+	bool extra_pass = false;
 	struct jit_ctx ctx;
 	u32 image_size;
 	u8 *image_ptr;
 	int pass;
 
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	tmp = bpf_jit_blind_constants(prog);
@@ -1533,13 +1521,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = tmp;
 	}
 
+	jit_data = prog->aux->jit_data;
+	if (!jit_data) {
+		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+		if (!jit_data) {
+			prog = orig_prog;
+			goto out;
+		}
+		prog->aux->jit_data = jit_data;
+	}
+	if (jit_data->ctx.offset) {
+		ctx = jit_data->ctx;
+		image_ptr = jit_data->image;
+		header = jit_data->header;
+		extra_pass = true;
+		image_size = sizeof(u32) * ctx.idx;
+		goto skip_init_ctx;
+	}
+
 	memset(&ctx, 0, sizeof(ctx));
 	ctx.prog = prog;
 
 	ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
 	if (ctx.offset == NULL) {
 		prog = orig_prog;
-		goto out;
+		goto out_off;
 	}
 
 	/* Fake pass to detect features used, and get an accurate assessment
@@ -1562,7 +1568,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	}
 
 	ctx.image = (u32 *)image_ptr;
-
+skip_init_ctx:
 	for (pass = 1; pass < 3; pass++) {
 		ctx.idx = 0;
 
@@ -1593,14 +1599,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
 
-	bpf_jit_binary_lock_ro(header);
+	if (!prog->is_func || extra_pass) {
+		bpf_jit_binary_lock_ro(header);
+	} else {
+		jit_data->ctx = ctx;
+		jit_data->image = image_ptr;
+		jit_data->header = header;
+	}
 
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
 	prog->jited_len = image_size;
 
+	if (!prog->is_func || extra_pass) {
 out_off:
-	kfree(ctx.offset);
+		kfree(ctx.offset);
+		kfree(jit_data);
+		prog->aux->jit_data = NULL;
+	}
 out:
 	if (tmp_blinded)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dbe5542a6666..fcd3b4d24eea 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -155,6 +155,7 @@ config X86
 	select HAVE_KERNEL_XZ
 	select HAVE_KPROBES
 	select HAVE_KPROBES_ON_FTRACE
+	select HAVE_FUNCTION_ERROR_INJECTION
 	select HAVE_KRETPROBES
 	select HAVE_KVM
 	select HAVE_LIVEPATCH			if X86_64
diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h
new file mode 100644
index 000000000000..47b7a1296245
--- /dev/null
+++ b/arch/x86/include/asm/error-injection.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ERROR_INJECTION_H
+#define _ASM_ERROR_INJECTION_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm-generic/error-injection.h>
+
+asmlinkage void just_return_func(void);
+void override_function_with_return(struct pt_regs *regs);
+
+#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 9f2e3102e0bb..367d99cff426 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -67,6 +67,8 @@ extern const int kretprobe_blacklist_size;
 void arch_remove_kprobe(struct kprobe *p);
 asmlinkage void kretprobe_trampoline(void);
 
+extern void arch_kprobe_override_function(struct pt_regs *regs);
+
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
 	/* copy of the original instruction */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 14131dd06b29..6de1fd3d0097 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -109,6 +109,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
 	return regs->ax;
 }
 
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+	regs->ax = rc;
+}
+
 /*
  * user_mode(regs) determines whether a register set came from user
  * mode.  On x86_32, this is true if V8086 mode was enabled OR if the
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 69a473919260..91e9700cc6dc 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+lib-$(CONFIG_FUNCTION_ERROR_INJECTION)	+= error-inject.o
 lib-$(CONFIG_RETPOLINE) += retpoline.o
 OBJECT_FILES_NON_STANDARD_retpoline.o :=y
 
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
new file mode 100644
index 000000000000..7b881d03d0dd
--- /dev/null
+++ b/arch/x86/lib/error-inject.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+asmlinkage void just_return_func(void);
+
+asm(
+	".type just_return_func, @function\n"
+	"just_return_func:\n"
+	"	ret\n"
+	".size just_return_func, .-just_return_func\n"
+);
+
+void override_function_with_return(struct pt_regs *regs)
+{
+	regs->ip = (unsigned long)&just_return_func;
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0554e8aef4d5..4923d92f918d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -15,8 +15,6 @@
 #include <asm/set_memory.h>
 #include <linux/bpf.h>
 
-int bpf_jit_enable __read_mostly;
-
 /*
  * assembly code in arch/x86/net/bpf_jit.S
  */
@@ -154,6 +152,11 @@ static bool is_ereg(u32 reg)
 			     BIT(BPF_REG_AX));
 }
 
+static bool is_axreg(u32 reg)
+{
+	return reg == BPF_REG_0;
+}
+
 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
 static u8 add_1mod(u8 byte, u32 reg)
 {
@@ -447,16 +450,36 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 			else if (is_ereg(dst_reg))
 				EMIT1(add_1mod(0x40, dst_reg));
 
+			/* b3 holds 'normal' opcode, b2 short form only valid
+			 * in case dst is eax/rax.
+			 */
 			switch (BPF_OP(insn->code)) {
-			case BPF_ADD: b3 = 0xC0; break;
-			case BPF_SUB: b3 = 0xE8; break;
-			case BPF_AND: b3 = 0xE0; break;
-			case BPF_OR: b3 = 0xC8; break;
-			case BPF_XOR: b3 = 0xF0; break;
+			case BPF_ADD:
+				b3 = 0xC0;
+				b2 = 0x05;
+				break;
+			case BPF_SUB:
+				b3 = 0xE8;
+				b2 = 0x2D;
+				break;
+			case BPF_AND:
+				b3 = 0xE0;
+				b2 = 0x25;
+				break;
+			case BPF_OR:
+				b3 = 0xC8;
+				b2 = 0x0D;
+				break;
+			case BPF_XOR:
+				b3 = 0xF0;
+				b2 = 0x35;
+				break;
 			}
 
 			if (is_imm8(imm32))
 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
+			else if (is_axreg(dst_reg))
+				EMIT1_off32(b2, imm32);
 			else
 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
 			break;
@@ -545,26 +568,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 			 */
 			EMIT2(0x31, 0xd2);
 
-			if (BPF_SRC(insn->code) == BPF_X) {
-				/* if (src_reg == 0) return 0 */
-
-				/* cmp r11, 0 */
-				EMIT4(0x49, 0x83, 0xFB, 0x00);
-
-				/* jne .+9 (skip over pop, pop, xor and jmp) */
-				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
-				EMIT1(0x5A); /* pop rdx */
-				EMIT1(0x58); /* pop rax */
-				EMIT2(0x31, 0xc0); /* xor eax, eax */
-
-				/* jmp cleanup_addr
-				 * addrs[i] - 11, because there are 11 bytes
-				 * after this insn: div, mov, pop, pop, mov
-				 */
-				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
-				EMIT1_off32(0xE9, jmp_offset);
-			}
-
 			if (BPF_CLASS(insn->code) == BPF_ALU64)
 				/* div r11 */
 				EMIT3(0x49, 0xF7, 0xF3);
@@ -1109,19 +1112,29 @@ common_load:
 	return proglen;
 }
 
+struct x64_jit_data {
+	struct bpf_binary_header *header;
+	int *addrs;
+	u8 *image;
+	int proglen;
+	struct jit_context ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_binary_header *header = NULL;
 	struct bpf_prog *tmp, *orig_prog = prog;
+	struct x64_jit_data *jit_data;
 	int proglen, oldproglen = 0;
 	struct jit_context ctx = {};
 	bool tmp_blinded = false;
+	bool extra_pass = false;
 	u8 *image = NULL;
 	int *addrs;
 	int pass;
 	int i;
 
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	tmp = bpf_jit_blind_constants(prog);
@@ -1135,10 +1148,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = tmp;
 	}
 
+	jit_data = prog->aux->jit_data;
+	if (!jit_data) {
+		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+		if (!jit_data) {
+			prog = orig_prog;
+			goto out;
+		}
+		prog->aux->jit_data = jit_data;
+	}
+	addrs = jit_data->addrs;
+	if (addrs) {
+		ctx = jit_data->ctx;
+		oldproglen = jit_data->proglen;
+		image = jit_data->image;
+		header = jit_data->header;
+		extra_pass = true;
+		goto skip_init_addrs;
+	}
 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
 	if (!addrs) {
 		prog = orig_prog;
-		goto out;
+		goto out_addrs;
 	}
 
 	/* Before first pass, make a rough estimation of addrs[]
@@ -1149,6 +1180,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		addrs[i] = proglen;
 	}
 	ctx.cleanup_addr = proglen;
+skip_init_addrs:
 
 	/* JITed image shrinks with every pass and the loop iterates
 	 * until the image stops shrinking. Very large bpf programs
@@ -1189,7 +1221,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	if (image) {
 		bpf_flush_icache(header, image + proglen);
-		bpf_jit_binary_lock_ro(header);
+		if (!prog->is_func || extra_pass) {
+			bpf_jit_binary_lock_ro(header);
+		} else {
+			jit_data->addrs = addrs;
+			jit_data->ctx = ctx;
+			jit_data->proglen = proglen;
+			jit_data->image = image;
+			jit_data->header = header;
+		}
 		prog->bpf_func = (void *)image;
 		prog->jited = 1;
 		prog->jited_len = proglen;
@@ -1197,8 +1237,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = orig_prog;
 	}
 
+	if (!prog->is_func || extra_pass) {
 out_addrs:
-	kfree(addrs);
+		kfree(addrs);
+		kfree(jit_data);
+		prog->aux->jit_data = NULL;
+	}
 out:
 	if (tmp_blinded)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index ce47eb17901d..6470e3c4c990 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -473,7 +473,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
 		ENI_PRV_POS(skb) = eni_vcc->descr+size+1;
 		skb_queue_tail(&eni_dev->rx_queue,skb);
 		eni_vcc->last = skb;
-rx_enqueued++;
+		rx_enqueued++;
 	}
 	eni_vcc->descr = here;
 	eni_out(dma_wr,MID_DMA_WR_RX);
@@ -715,7 +715,7 @@ static void get_service(struct atm_dev *dev)
 			else eni_dev->slow = vcc;
 			eni_dev->last_slow = vcc;
 		}
-putting++;
+		putting++;
 		ENI_VCC(vcc)->servicing++;
 	}
 }
@@ -744,7 +744,7 @@ static void dequeue_rx(struct atm_dev *dev)
 		}
 		EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb),
 		    ENI_PRV_POS(skb));
-rx_dequeued++;
+		rx_dequeued++;
 		vcc = ATM_SKB(skb)->vcc;
 		eni_vcc = ENI_VCC(vcc);
 		first = 0;
@@ -1174,7 +1174,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
 	DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos);
 	eni_out(dma_wr,MID_DMA_WR_TX);
 	skb_queue_tail(&eni_dev->tx_queue,skb);
-queued++;
+	queued++;
 	return enq_ok;
 }
 
@@ -1195,7 +1195,7 @@ static void poll_tx(struct atm_dev *dev)
 				if (res == enq_ok) continue;
 				DPRINTK("re-queuing TX PDU\n");
 				skb_queue_head(&tx->backlog,skb);
-requeued++;
+				requeued++;
 				if (res == enq_jam) return;
 				break;
 			}
@@ -1232,7 +1232,7 @@ static void dequeue_tx(struct atm_dev *dev)
 		else dev_kfree_skb_irq(skb);
 		atomic_inc(&vcc->stats->tx);
 		wake_up(&eni_dev->tx_wait);
-dma_complete++;
+		dma_complete++;
 	}
 }
 
@@ -1555,7 +1555,7 @@ static void eni_tasklet(unsigned long data)
 	}
 	if (events & MID_TX_COMPLETE) {
 		EVENT("INT: TX COMPLETE\n",0,0);
-tx_complete++;
+		tx_complete++;
 		wake_up(&eni_dev->tx_wait);
 		/* poll_rx ? */
 	}
@@ -2069,14 +2069,14 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
 		}
 		*(u32 *) skb->data = htonl(*(u32 *) skb->data);
 	}
-submitted++;
+	submitted++;
 	ATM_SKB(skb)->vcc = vcc;
 	tasklet_disable(&ENI_DEV(vcc->dev)->task);
 	res = do_tx(skb);
 	tasklet_enable(&ENI_DEV(vcc->dev)->task);
 	if (res == enq_ok) return 0;
 	skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb);
-backlogged++;
+	backlogged++;
 	tasklet_schedule(&ENI_DEV(vcc->dev)->task);
 	return 0;
 }
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 09eaac9400ed..96aa71c93daf 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -16,6 +16,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_graph.h>
+#include <linux/of_irq.h>
 #include <linux/property.h>
 #include <linux/etherdevice.h>
 #include <linux/phy.h>
@@ -997,6 +998,32 @@ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
 EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
 
 /**
+ * fwnode_get_next_available_child_node - Return the next
+ * available child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ */
+struct fwnode_handle *
+fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
+				     struct fwnode_handle *child)
+{
+	struct fwnode_handle *next_child = child;
+
+	if (!fwnode)
+		return NULL;
+
+	do {
+		next_child = fwnode_get_next_child_node(fwnode, next_child);
+
+		if (!next_child || fwnode_device_is_available(next_child))
+			break;
+	} while (next_child);
+
+	return next_child;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
+
+/**
  * device_get_next_child_node - Return the next child node handle for a device
  * @dev: Device to find the next child node for.
  * @child: Handle to one of the device's child nodes or a null handle.
@@ -1126,21 +1153,21 @@ enum dev_dma_attr device_get_dma_attr(struct device *dev)
 EXPORT_SYMBOL_GPL(device_get_dma_attr);
 
 /**
- * device_get_phy_mode - Get phy mode for given device
- * @dev:	Pointer to the given device
+ * fwnode_get_phy_mode - Get phy mode for given firmware node
+ * @fwnode:	Pointer to the given node
  *
  * The function gets phy interface string from property 'phy-mode' or
  * 'phy-connection-type', and return its index in phy_modes table, or errno in
  * error case.
  */
-int device_get_phy_mode(struct device *dev)
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode)
 {
 	const char *pm;
 	int err, i;
 
-	err = device_property_read_string(dev, "phy-mode", &pm);
+	err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
 	if (err < 0)
-		err = device_property_read_string(dev,
+		err = fwnode_property_read_string(fwnode,
 						  "phy-connection-type", &pm);
 	if (err < 0)
 		return err;
@@ -1151,13 +1178,27 @@ int device_get_phy_mode(struct device *dev)
 
 	return -ENODEV;
 }
+EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev:	Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+	return fwnode_get_phy_mode(dev_fwnode(dev));
+}
 EXPORT_SYMBOL_GPL(device_get_phy_mode);
 
-static void *device_get_mac_addr(struct device *dev,
+static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
 				 const char *name, char *addr,
 				 int alen)
 {
-	int ret = device_property_read_u8_array(dev, name, addr, alen);
+	int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
 
 	if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
 		return addr;
@@ -1165,8 +1206,8 @@ static void *device_get_mac_addr(struct device *dev,
 }
 
 /**
- * device_get_mac_address - Get the MAC for a given device
- * @dev:	Pointer to the device
+ * fwnode_get_mac_address - Get the MAC from the firmware node
+ * @fwnode:	Pointer to the firmware node
  * @addr:	Address of buffer to store the MAC in
  * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN
  *
@@ -1187,23 +1228,60 @@ static void *device_get_mac_addr(struct device *dev,
  * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
  * exists but is all zeros.
 */
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
 {
 	char *res;
 
-	res = device_get_mac_addr(dev, "mac-address", addr, alen);
+	res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
 	if (res)
 		return res;
 
-	res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+	res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
 	if (res)
 		return res;
 
-	return device_get_mac_addr(dev, "address", addr, alen);
+	return fwnode_get_mac_addr(fwnode, "address", addr, alen);
+}
+EXPORT_SYMBOL(fwnode_get_mac_address);
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev:	Pointer to the device
+ * @addr:	Address of buffer to store the MAC in
+ * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN
+ */
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+	return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
 }
 EXPORT_SYMBOL(device_get_mac_address);
 
 /**
+ * fwnode_irq_get - Get IRQ directly from a fwnode
+ * @fwnode:	Pointer to the firmware node
+ * @index:	Zero-based index of the IRQ
+ *
+ * Returns Linux IRQ number on success. Other values are determined
+ * accordingly to acpi_/of_ irq_get() operation.
+ */
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index)
+{
+	struct device_node *of_node = to_of_node(fwnode);
+	struct resource res;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_OF) && of_node)
+		return of_irq_get(of_node, index);
+
+	ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
+	if (ret)
+		return ret;
+
+	return res.start;
+}
+EXPORT_SYMBOL(fwnode_irq_get);
+
+/**
  * device_graph_get_next_endpoint - Get next endpoint firmware node
  * @fwnode: Pointer to the parent firmware node
  * @prev: Previous endpoint node or %NULL to get the first
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
index b1a6e327cb23..cf889fc62ac7 100644
--- a/drivers/bcma/driver_pcie2.c
+++ b/drivers/bcma/driver_pcie2.c
@@ -83,7 +83,8 @@ static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
 		bcma_core_pcie2_set_ltr_vals(pcie2);
 
 		/* TODO:
-		si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
+		 *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
+		 */
 
 		/* enable the LTR */
 		devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 60e1c7d6986d..07e55cd8f8c8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -31,6 +31,16 @@ config BT_HCIBTUSB
 	  Say Y here to compile support for Bluetooth USB devices into the
 	  kernel or say M to compile it as module (btusb).
 
+config BT_HCIBTUSB_AUTOSUSPEND
+	bool "Enable USB autosuspend for Bluetooth USB devices by default"
+	depends on BT_HCIBTUSB
+	help
+	  Say Y here to enable USB autosuspend for Bluetooth USB devices by
+	  default.
+
+	  This can be overridden by passing btusb.enable_autosuspend=[y|n]
+	  on the kernel commandline.
+
 config BT_HCIBTUSB_BCM
 	bool "Broadcom protocol support"
 	depends on BT_HCIBTUSB
@@ -67,6 +77,7 @@ config BT_HCIBTSDIO
 config BT_HCIUART
 	tristate "HCI UART driver"
 	depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
+	depends on NVMEM || !NVMEM
 	depends on TTY
 	help
 	  Bluetooth HCI UART driver.
@@ -97,6 +108,7 @@ config BT_HCIUART_NOKIA
 	tristate "UART Nokia H4+ protocol support"
 	depends on BT_HCIUART
 	depends on BT_HCIUART_SERDEV
+	depends on GPIOLIB
 	depends on PM
 	select BT_HCIUART_H4
 	select BT_BCM
@@ -158,6 +170,7 @@ config BT_HCIUART_3WIRE
 config BT_HCIUART_INTEL
 	bool "Intel protocol support"
 	depends on BT_HCIUART
+	depends on GPIOLIB
 	select BT_HCIUART_H4
 	select BT_INTEL
 	help
@@ -171,6 +184,7 @@ config BT_HCIUART_BCM
 	depends on BT_HCIUART
 	depends on BT_HCIUART_SERDEV
 	depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT)
+	depends on GPIOLIB
 	select BT_HCIUART_H4
 	select BT_BCM
 	help
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d513ef4743dc..82437a69f99c 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -302,9 +302,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
 			}
 
 			/* Wait until the command reaches the baseband */
-			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
-			schedule_timeout(HZ/10);
-			finish_wait(&wq, &wait);
+			mdelay(100);
 
 			/* Set baud on baseband */
 			info->ctrl_reg &= ~0x03;
@@ -316,9 +314,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
 			outb(info->ctrl_reg, iobase + REG_CONTROL);
 
 			/* Wait before the next HCI packet can be send */
-			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
-			schedule_timeout(HZ);
-			finish_wait(&wq, &wait);
+			mdelay(1000);
 		}
 
 		if (len == skb->len) {
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 7971bfbd4321..801ea4ca65e4 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -259,7 +259,7 @@ static int bpa10x_flush(struct hci_dev *hdev)
 
 static int bpa10x_setup(struct hci_dev *hdev)
 {
-	const u8 req[] = { 0x07 };
+	static const u8 req[] = { 0x07 };
 	struct sk_buff *skb;
 
 	BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index afa4cb3b16e3..6659f113042c 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -323,6 +323,7 @@ static const struct {
 	{ 0x410e, "BCM43341B0"	},	/* 002.001.014 */
 	{ 0x4406, "BCM4324B3"	},	/* 002.004.006 */
 	{ 0x610c, "BCM4354"	},	/* 003.001.012 */
+	{ 0x2122, "BCM4343A0"	},	/* 001.001.034 */
 	{ 0x2209, "BCM43430A1"  },	/* 001.002.009 */
 	{ 0x6119, "BCM4345C0"	},	/* 003.001.025 */
 	{ 0x230f, "BCM4356A2"	},	/* 001.003.015 */
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d9e6b41658e5..cfe6ad4cc621 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -44,8 +44,8 @@ struct bcm_set_sleep_mode {
 	__u8 tristate_control;
 	__u8 usb_auto_sleep;
 	__u8 usb_resume_timeout;
-	__u8 pulsed_host_wake;
 	__u8 break_to_host;
+	__u8 pulsed_host_wake;
 } __packed;
 
 struct bcm_set_pcm_int_params {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 4459555c9d88..5270d5513201 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/firmware.h>
 #include <linux/regmap.h>
+#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -75,7 +76,7 @@ EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
 
 int btintel_enter_mfg(struct hci_dev *hdev)
 {
-	const u8 param[] = { 0x01, 0x00 };
+	static const u8 param[] = { 0x01, 0x00 };
 	struct sk_buff *skb;
 
 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
@@ -569,6 +570,160 @@ struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
 }
 EXPORT_SYMBOL_GPL(btintel_regmap_init);
 
+int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
+{
+	struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 };
+	struct sk_buff *skb;
+
+	params.boot_param = cpu_to_le32(boot_param);
+
+	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		bt_dev_err(hdev, "Failed to send Intel Reset command");
+		return PTR_ERR(skb);
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_send_intel_reset);
+
+int btintel_read_boot_params(struct hci_dev *hdev,
+			     struct intel_boot_params *params)
+{
+	struct sk_buff *skb;
+
+	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
+			   PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+
+	if (skb->len != sizeof(*params)) {
+		bt_dev_err(hdev, "Intel boot parameters size mismatch");
+		kfree_skb(skb);
+		return -EILSEQ;
+	}
+
+	memcpy(params, skb->data, sizeof(*params));
+
+	kfree_skb(skb);
+
+	if (params->status) {
+		bt_dev_err(hdev, "Intel boot parameters command failed (%02x)",
+			   params->status);
+		return -bt_to_errno(params->status);
+	}
+
+	bt_dev_info(hdev, "Device revision is %u",
+		    le16_to_cpu(params->dev_revid));
+
+	bt_dev_info(hdev, "Secure boot is %s",
+		    params->secure_boot ? "enabled" : "disabled");
+
+	bt_dev_info(hdev, "OTP lock is %s",
+		    params->otp_lock ? "enabled" : "disabled");
+
+	bt_dev_info(hdev, "API lock is %s",
+		    params->api_lock ? "enabled" : "disabled");
+
+	bt_dev_info(hdev, "Debug lock is %s",
+		    params->debug_lock ? "enabled" : "disabled");
+
+	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
+		    params->min_fw_build_nn, params->min_fw_build_cw,
+		    2000 + params->min_fw_build_yy);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_read_boot_params);
+
+int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
+			      u32 *boot_param)
+{
+	int err;
+	const u8 *fw_ptr;
+	u32 frag_len;
+
+	/* Start the firmware download transaction with the Init fragment
+	 * represented by the 128 bytes of CSS header.
+	 */
+	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+	if (err < 0) {
+		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
+		goto done;
+	}
+
+	/* Send the 256 bytes of public key information from the firmware
+	 * as the PKey fragment.
+	 */
+	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+	if (err < 0) {
+		bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
+		goto done;
+	}
+
+	/* Send the 256 bytes of signature information from the firmware
+	 * as the Sign fragment.
+	 */
+	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+	if (err < 0) {
+		bt_dev_err(hdev, "Failed to send firmware signature (%d)", err);
+		goto done;
+	}
+
+	fw_ptr = fw->data + 644;
+	frag_len = 0;
+
+	while (fw_ptr - fw->data < fw->size) {
+		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+		/* Each SKU has a different reset parameter to use in the
+		 * HCI_Intel_Reset command and it is embedded in the firmware
+		 * data. So, instead of using static value per SKU, check
+		 * the firmware data and save it for later use.
+		 */
+		if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
+			/* The boot parameter is the first 32-bit value
+			 * and rest of 3 octets are reserved.
+			 */
+			*boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
+
+			bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
+		}
+
+		frag_len += sizeof(*cmd) + cmd->plen;
+
+		/* The parameter length of the secure send command requires
+		 * a 4 byte alignment. It happens so that the firmware file
+		 * contains proper Intel_NOP commands to align the fragments
+		 * as needed.
+		 *
+		 * Send set of commands with 4 byte alignment from the
+		 * firmware data buffer as a single Data fragement.
+		 */
+		if (!(frag_len % 4)) {
+			err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+			if (err < 0) {
+				bt_dev_err(hdev,
+					   "Failed to send firmware data (%d)",
+					   err);
+				goto done;
+			}
+
+			fw_ptr += frag_len;
+			frag_len = 0;
+		}
+	}
+
+done:
+	return err;
+}
+EXPORT_SYMBOL_GPL(btintel_download_firmware);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 1e8955aaafed..41c642cc523f 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -69,6 +69,14 @@ struct intel_secure_send_result {
 	__u8     status;
 } __packed;
 
+struct intel_reset {
+	__u8     reset_type;
+	__u8     patch_enable;
+	__u8     ddc_reload;
+	__u8     boot_option;
+	__le32   boot_param;
+} __packed;
+
 #if IS_ENABLED(CONFIG_BT_INTEL)
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
@@ -89,7 +97,11 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver);
 
 struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
 				   u16 opcode_write);
-
+int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param);
+int btintel_read_boot_params(struct hci_dev *hdev,
+			     struct intel_boot_params *params);
+int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
+			      u32 *boot_param);
 #else
 
 static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -165,4 +177,23 @@ static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
 {
 	return ERR_PTR(-EINVAL);
 }
+
+static inline int btintel_send_intel_reset(struct hci_dev *hdev,
+					   u32 reset_param)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int btintel_read_boot_params(struct hci_dev *hdev,
+					   struct intel_boot_params *params)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int btintel_download_firmware(struct hci_dev *dev,
+					    const struct firmware *fw,
+					    u32 *boot_param)
+{
+	return -EOPNOTSUPP;
+}
 #endif
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 663bed63b871..2c9a5fc9137d 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -88,7 +88,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
 		break;
 	}
 
-	kfree_skb(skb);
+	if (!ret)
+		kfree_skb(skb);
 
 	return ret;
 }
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index c8e945d19ffe..20142bc77554 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -31,6 +31,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 
+#include <linux/mmc/host.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 
@@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
 		tuple = tuple->next;
 	}
 
+	/* BCM43341 devices soldered onto the PCB (non-removable) use an
+	 * uart connection for bluetooth, ignore the BT SDIO interface.
+	 */
+	if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
+	    func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
+	    !mmc_card_is_removable(func->card->host))
+		return -ENODEV;
+
 	data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f7120c9eb9bd..2a55380ad730 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
 
 #include <linux/module.h>
 #include <linux/usb.h>
+#include <linux/usb/quirks.h>
 #include <linux/firmware.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
@@ -40,6 +41,7 @@
 
 static bool disable_scofix;
 static bool force_scofix;
+static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
 
 static bool reset = true;
 
@@ -263,6 +265,7 @@ static const struct usb_device_id blacklist_table[] = {
 	/* QCA ROME chipset */
 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
@@ -270,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
 	{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+	{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
 
 	/* Broadcom BCM2035 */
@@ -387,9 +391,8 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_FIRMWARE_LOADED	7
 #define BTUSB_FIRMWARE_FAILED	8
 #define BTUSB_BOOTING		9
-#define BTUSB_RESET_RESUME	10
-#define BTUSB_DIAG_RUNNING	11
-#define BTUSB_OOB_WAKE_ENABLED	12
+#define BTUSB_DIAG_RUNNING	10
+#define BTUSB_OOB_WAKE_ENABLED	11
 
 struct btusb_data {
 	struct hci_dev       *hdev;
@@ -2006,15 +2009,11 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
 
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
-	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
-					  0x00, 0x08, 0x04, 0x00 };
 	struct btusb_data *data = hci_get_drvdata(hdev);
-	struct sk_buff *skb;
 	struct intel_version ver;
-	struct intel_boot_params *params;
+	struct intel_boot_params params;
 	const struct firmware *fw;
-	const u8 *fw_ptr;
-	u32 frag_len;
+	u32 boot_param;
 	char fwname[64];
 	ktime_t calltime, delta, rettime;
 	unsigned long long duration;
@@ -2022,6 +2021,12 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 
 	BT_DBG("%s", hdev->name);
 
+	/* Set the default boot parameter to 0x0 and it is updated to
+	 * SKU specific boot parameter after reading Intel_Write_Boot_Params
+	 * command while downloading the firmware.
+	 */
+	boot_param = 0x00000000;
+
 	calltime = ktime_get();
 
 	/* Read the Intel version information to determine if the device
@@ -2092,55 +2097,24 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 	/* Read the secure boot parameters to identify the operating
 	 * details of the bootloader.
 	 */
-	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return PTR_ERR(skb);
-	}
-
-	if (skb->len != sizeof(*params)) {
-		BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
-		kfree_skb(skb);
-		return -EILSEQ;
-	}
-
-	params = (struct intel_boot_params *)skb->data;
-
-	bt_dev_info(hdev, "Device revision is %u",
-		    le16_to_cpu(params->dev_revid));
-
-	bt_dev_info(hdev, "Secure boot is %s",
-		    params->secure_boot ? "enabled" : "disabled");
-
-	bt_dev_info(hdev, "OTP lock is %s",
-		    params->otp_lock ? "enabled" : "disabled");
-
-	bt_dev_info(hdev, "API lock is %s",
-		    params->api_lock ? "enabled" : "disabled");
-
-	bt_dev_info(hdev, "Debug lock is %s",
-		    params->debug_lock ? "enabled" : "disabled");
-
-	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
-		    params->min_fw_build_nn, params->min_fw_build_cw,
-		    2000 + params->min_fw_build_yy);
+	err = btintel_read_boot_params(hdev, &params);
+	if (err)
+		return err;
 
 	/* It is required that every single firmware fragment is acknowledged
 	 * with a command complete event. If the boot parameters indicate
 	 * that this bootloader does not send them, then abort the setup.
 	 */
-	if (params->limited_cce != 0x00) {
+	if (params.limited_cce != 0x00) {
 		BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
-		       hdev->name, params->limited_cce);
-		kfree_skb(skb);
+		       hdev->name, params.limited_cce);
 		return -EINVAL;
 	}
 
 	/* If the OTP has no valid Bluetooth device address, then there will
 	 * also be no valid address for the operational firmware.
 	 */
-	if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+	if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
 		bt_dev_info(hdev, "No device address configured");
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
 	}
@@ -2171,7 +2145,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 	case 0x0c:	/* WsP */
 		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
 			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(params->dev_revid));
+			 le16_to_cpu(params.dev_revid));
 		break;
 	case 0x11:	/* JfP */
 	case 0x12:	/* ThP */
@@ -2189,7 +2163,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 	if (err < 0) {
 		BT_ERR("%s: Failed to load Intel firmware file (%d)",
 		       hdev->name, err);
-		kfree_skb(skb);
 		return err;
 	}
 
@@ -2203,7 +2176,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 	case 0x0c:	/* WsP */
 		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
 			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(params->dev_revid));
+			 le16_to_cpu(params.dev_revid));
 		break;
 	case 0x11:	/* JfP */
 	case 0x12:	/* ThP */
@@ -2217,8 +2190,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 		return -EINVAL;
 	}
 
-	kfree_skb(skb);
-
 	if (fw->size < 644) {
 		BT_ERR("%s: Invalid size of firmware file (%zu)",
 		       hdev->name, fw->size);
@@ -2228,64 +2199,10 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
 
 	set_bit(BTUSB_DOWNLOADING, &data->flags);
 
-	/* Start the firmware download transaction with the Init fragment
-	 * represented by the 128 bytes of CSS header.
-	 */
-	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
-	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware header (%d)",
-		       hdev->name, err);
-		goto done;
-	}
-
-	/* Send the 256 bytes of public key information from the firmware
-	 * as the PKey fragment.
-	 */
-	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
-	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware public key (%d)",
-		       hdev->name, err);
-		goto done;
-	}
-
-	/* Send the 256 bytes of signature information from the firmware
-	 * as the Sign fragment.
-	 */
-	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
-	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware signature (%d)",
-		       hdev->name, err);
+	/* Start firmware downloading and get boot parameter */
+	err = btintel_download_firmware(hdev, fw, &boot_param);
+	if (err < 0)
 		goto done;
-	}
-
-	fw_ptr = fw->data + 644;
-	frag_len = 0;
-
-	while (fw_ptr - fw->data < fw->size) {
-		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
-
-		frag_len += sizeof(*cmd) + cmd->plen;
-
-		/* The parameter length of the secure send command requires
-		 * a 4 byte alignment. It happens so that the firmware file
-		 * contains proper Intel_NOP commands to align the fragments
-		 * as needed.
-		 *
-		 * Send set of commands with 4 byte alignment from the
-		 * firmware data buffer as a single Data fragement.
-		 */
-		if (!(frag_len % 4)) {
-			err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
-			if (err < 0) {
-				BT_ERR("%s: Failed to send firmware data (%d)",
-				       hdev->name, err);
-				goto done;
-			}
-
-			fw_ptr += frag_len;
-			frag_len = 0;
-		}
-	}
 
 	set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
 
@@ -2338,12 +2255,9 @@ done:
 
 	set_bit(BTUSB_BOOTING, &data->flags);
 
-	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
-			     HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
-
-	kfree_skb(skb);
+	err = btintel_send_intel_reset(hdev, boot_param);
+	if (err)
+		return err;
 
 	/* The bootloader will not indicate when the device is ready. This
 	 * is done by the operational firmware sending bootup notification.
@@ -3120,9 +3034,9 @@ static int btusb_probe(struct usb_interface *intf,
 
 		/* QCA Rome devices lose their updated firmware over suspend,
 		 * but the USB hub doesn't notice any status change.
-		 * Explicitly request a device reset on resume.
+		 * explicitly request a device reset on resume.
 		 */
-		set_bit(BTUSB_RESET_RESUME, &data->flags);
+		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
 	}
 
 #ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3133,7 +3047,7 @@ static int btusb_probe(struct usb_interface *intf,
 		 * but the USB hub doesn't notice any status change.
 		 * Explicitly request a device reset on resume.
 		 */
-		set_bit(BTUSB_RESET_RESUME, &data->flags);
+		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
 	}
 #endif
 
@@ -3213,6 +3127,9 @@ static int btusb_probe(struct usb_interface *intf,
 	}
 #endif
 
+	if (enable_autosuspend)
+		usb_enable_autosuspend(data->udev);
+
 	err = hci_register_dev(hdev);
 	if (err < 0)
 		goto out_free_dev;
@@ -3299,14 +3216,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
 		enable_irq(data->oob_wake_irq);
 	}
 
-	/* Optionally request a device reset on resume, but only when
-	 * wakeups are disabled. If wakeups are enabled we assume the
-	 * device will stay powered up throughout suspend.
-	 */
-	if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
-	    !device_may_wakeup(&data->udev->dev))
-		data->udev->reset_resume = 1;
-
 	return 0;
 }
 
@@ -3425,6 +3334,9 @@ MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
 module_param(force_scofix, bool, 0644);
 MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
 
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default");
+
 module_param(reset, bool, 0644);
 MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
 
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 707c2d1b84c7..0438a64b8185 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -29,6 +29,7 @@
 #include <linux/acpi.h>
 #include <linux/of.h>
 #include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
@@ -52,7 +53,37 @@
 
 #define BCM_AUTOSUSPEND_DELAY	5000 /* default autosleep delay */
 
-/* device driver resources */
+/**
+ * struct bcm_device - device driver resources
+ * @serdev_hu: HCI UART controller struct
+ * @list: bcm_device_list node
+ * @dev: physical UART slave
+ * @name: device name logged by bt_dev_*() functions
+ * @device_wakeup: BT_WAKE pin,
+ *	assert = Bluetooth device must wake up or remain awake,
+ *	deassert = Bluetooth device may sleep when sleep criteria are met
+ * @shutdown: BT_REG_ON pin,
+ *	power up or power down Bluetooth device internal regulators
+ * @set_device_wakeup: callback to toggle BT_WAKE pin
+ *	either by accessing @device_wakeup or by calling @btlp
+ * @set_shutdown: callback to toggle BT_REG_ON pin
+ *	either by accessing @shutdown or by calling @btpu/@btpd
+ * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power")
+ * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up")
+ * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down")
+ * @clk: clock used by Bluetooth device
+ * @clk_enabled: whether @clk is prepared and enabled
+ * @init_speed: default baudrate of Bluetooth device;
+ *	the host UART is initially set to this baudrate so that
+ *	it can configure the Bluetooth device for @oper_speed
+ * @oper_speed: preferred baudrate of Bluetooth device;
+ *	set to 0 if @init_speed is already the preferred baudrate
+ * @irq: interrupt triggered by HOST_WAKE_BT pin
+ * @irq_active_low: whether @irq is active low
+ * @hu: pointer to HCI UART controller struct,
+ *	used to disable flow control during runtime suspend and system sleep
+ * @is_suspended: whether flow control is currently disabled
+ */
 struct bcm_device {
 	/* Must be the first member, hci_serdev.c expects this. */
 	struct hci_uart		serdev_hu;
@@ -63,6 +94,11 @@ struct bcm_device {
 	const char		*name;
 	struct gpio_desc	*device_wakeup;
 	struct gpio_desc	*shutdown;
+	int			(*set_device_wakeup)(struct bcm_device *, bool);
+	int			(*set_shutdown)(struct bcm_device *, bool);
+#ifdef CONFIG_ACPI
+	acpi_handle		btlp, btpu, btpd;
+#endif
 
 	struct clk		*clk;
 	bool			clk_enabled;
@@ -74,7 +110,7 @@ struct bcm_device {
 
 #ifdef CONFIG_PM
 	struct hci_uart		*hu;
-	bool			is_suspended; /* suspend/resume flag */
+	bool			is_suspended;
 #endif
 };
 
@@ -170,11 +206,21 @@ static bool bcm_device_exists(struct bcm_device *device)
 
 static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
 {
-	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
-		clk_prepare_enable(dev->clk);
+	int err;
 
-	gpiod_set_value(dev->shutdown, powered);
-	gpiod_set_value(dev->device_wakeup, powered);
+	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) {
+		err = clk_prepare_enable(dev->clk);
+		if (err)
+			return err;
+	}
+
+	err = dev->set_shutdown(dev, powered);
+	if (err)
+		goto err_clk_disable;
+
+	err = dev->set_device_wakeup(dev, powered);
+	if (err)
+		goto err_revert_shutdown;
 
 	if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
 		clk_disable_unprepare(dev->clk);
@@ -182,6 +228,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
 	dev->clk_enabled = powered;
 
 	return 0;
+
+err_revert_shutdown:
+	dev->set_shutdown(dev, !powered);
+err_clk_disable:
+	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+		clk_disable_unprepare(dev->clk);
+	return err;
 }
 
 #ifdef CONFIG_PM
@@ -191,9 +244,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
 
 	bt_dev_dbg(bdev, "Host wake IRQ");
 
-	pm_runtime_get(bdev->dev);
-	pm_runtime_mark_last_busy(bdev->dev);
-	pm_runtime_put_autosuspend(bdev->dev);
+	pm_request_resume(bdev->dev);
 
 	return IRQ_HANDLED;
 }
@@ -218,8 +269,10 @@ static int bcm_request_irq(struct bcm_data *bcm)
 			       bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
 						      IRQF_TRIGGER_RISING,
 			       "host_wake", bdev);
-	if (err)
+	if (err) {
+		bdev->irq = err;
 		goto unlock;
+	}
 
 	device_init_wakeup(bdev->dev, true);
 
@@ -247,8 +300,8 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
 	/* Irrelevant USB flags */
 	.usb_auto_sleep = 0,
 	.usb_resume_timeout = 0,
+	.break_to_host = 0,
 	.pulsed_host_wake = 0,
-	.break_to_host = 0
 };
 
 static int bcm_setup_sleep(struct hci_uart *hu)
@@ -304,6 +357,7 @@ static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
 	struct list_head *p;
+	int err;
 
 	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
@@ -318,7 +372,10 @@ static int bcm_open(struct hci_uart *hu)
 	mutex_lock(&bcm_device_lock);
 
 	if (hu->serdev) {
-		serdev_device_open(hu->serdev);
+		err = serdev_device_open(hu->serdev);
+		if (err)
+			goto err_free;
+
 		bcm->dev = serdev_device_get_drvdata(hu->serdev);
 		goto out;
 	}
@@ -346,17 +403,33 @@ out:
 	if (bcm->dev) {
 		hu->init_speed = bcm->dev->init_speed;
 		hu->oper_speed = bcm->dev->oper_speed;
-		bcm_gpio_set_power(bcm->dev, true);
+		err = bcm_gpio_set_power(bcm->dev, true);
+		if (err)
+			goto err_unset_hu;
 	}
 
 	mutex_unlock(&bcm_device_lock);
 	return 0;
+
+err_unset_hu:
+	if (hu->serdev)
+		serdev_device_close(hu->serdev);
+#ifdef CONFIG_PM
+	else
+		bcm->dev->hu = NULL;
+#endif
+err_free:
+	mutex_unlock(&bcm_device_lock);
+	hu->priv = NULL;
+	kfree(bcm);
+	return err;
 }
 
 static int bcm_close(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
 	struct bcm_device *bdev = NULL;
+	int err;
 
 	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
@@ -374,16 +447,17 @@ static int bcm_close(struct hci_uart *hu)
 	}
 
 	if (bdev) {
-		bcm_gpio_set_power(bdev, false);
-#ifdef CONFIG_PM
-		pm_runtime_disable(bdev->dev);
-		pm_runtime_set_suspended(bdev->dev);
-
-		if (device_can_wakeup(bdev->dev)) {
+		if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
 			devm_free_irq(bdev->dev, bdev->irq, bdev);
 			device_init_wakeup(bdev->dev, false);
+			pm_runtime_disable(bdev->dev);
 		}
-#endif
+
+		err = bcm_gpio_set_power(bdev, false);
+		if (err)
+			bt_dev_err(hu->hdev, "Failed to power down");
+		else
+			pm_runtime_set_suspended(bdev->dev);
 	}
 	mutex_unlock(&bcm_device_lock);
 
@@ -512,11 +586,8 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
 	} else if (!bcm->rx_skb) {
 		/* Delay auto-suspend when receiving completed packet */
 		mutex_lock(&bcm_device_lock);
-		if (bcm->dev && bcm_device_exists(bcm->dev)) {
-			pm_runtime_get(bcm->dev->dev);
-			pm_runtime_mark_last_busy(bcm->dev->dev);
-			pm_runtime_put_autosuspend(bcm->dev->dev);
-		}
+		if (bcm->dev && bcm_device_exists(bcm->dev))
+			pm_request_resume(bcm->dev->dev);
 		mutex_unlock(&bcm_device_lock);
 	}
 
@@ -566,6 +637,7 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
 static int bcm_suspend_device(struct device *dev)
 {
 	struct bcm_device *bdev = dev_get_drvdata(dev);
+	int err;
 
 	bt_dev_dbg(bdev, "");
 
@@ -577,27 +649,37 @@ static int bcm_suspend_device(struct device *dev)
 	}
 
 	/* Suspend the device */
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, false);
-		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
-		mdelay(15);
+	err = bdev->set_device_wakeup(bdev, false);
+	if (err) {
+		if (bdev->is_suspended && bdev->hu) {
+			bdev->is_suspended = false;
+			hci_uart_set_flow_control(bdev->hu, false);
+		}
+		return -EBUSY;
 	}
 
+	bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+	msleep(15);
+
 	return 0;
 }
 
 static int bcm_resume_device(struct device *dev)
 {
 	struct bcm_device *bdev = dev_get_drvdata(dev);
+	int err;
 
 	bt_dev_dbg(bdev, "");
 
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, true);
-		bt_dev_dbg(bdev, "resume, delaying 15 ms");
-		mdelay(15);
+	err = bdev->set_device_wakeup(bdev, true);
+	if (err) {
+		dev_err(dev, "Failed to power up\n");
+		return err;
 	}
 
+	bt_dev_dbg(bdev, "resume, delaying 15 ms");
+	msleep(15);
+
 	/* When this executes, the device has woken up already */
 	if (bdev->is_suspended && bdev->hu) {
 		bdev->is_suspended = false;
@@ -632,7 +714,7 @@ static int bcm_suspend(struct device *dev)
 	if (pm_runtime_active(dev))
 		bcm_suspend_device(dev);
 
-	if (device_may_wakeup(dev)) {
+	if (device_may_wakeup(dev) && bdev->irq > 0) {
 		error = enable_irq_wake(bdev->irq);
 		if (!error)
 			bt_dev_dbg(bdev, "BCM irq: enabled");
@@ -648,6 +730,7 @@ unlock:
 static int bcm_resume(struct device *dev)
 {
 	struct bcm_device *bdev = dev_get_drvdata(dev);
+	int err = 0;
 
 	bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
 
@@ -662,19 +745,21 @@ static int bcm_resume(struct device *dev)
 	if (!bdev->hu)
 		goto unlock;
 
-	if (device_may_wakeup(dev)) {
+	if (device_may_wakeup(dev) && bdev->irq > 0) {
 		disable_irq_wake(bdev->irq);
 		bt_dev_dbg(bdev, "BCM irq: disabled");
 	}
 
-	bcm_resume_device(dev);
+	err = bcm_resume_device(dev);
 
 unlock:
 	mutex_unlock(&bcm_device_lock);
 
-	pm_runtime_disable(dev);
-	pm_runtime_set_active(dev);
-	pm_runtime_enable(dev);
+	if (!err) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
 
 	return 0;
 }
@@ -771,25 +856,84 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
 
 	return 0;
 }
+
+static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+	if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake)))
+		return -EIO;
+
+	return 0;
+}
+
+static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered)
+{
+	if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd,
+					      NULL, NULL, NULL)))
+		return -EIO;
+
+	return 0;
+}
+
+static int bcm_apple_get_resources(struct bcm_device *dev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(dev->dev);
+	const union acpi_object *obj;
+
+	if (!adev ||
+	    ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) ||
+	    ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) ||
+	    ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd)))
+		return -ENODEV;
+
+	if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) &&
+	    obj->buffer.length == 8)
+		dev->init_speed = *(u64 *)obj->buffer.pointer;
+
+	dev->set_device_wakeup = bcm_apple_set_device_wakeup;
+	dev->set_shutdown = bcm_apple_set_shutdown;
+
+	return 0;
+}
+#else
+static inline int bcm_apple_get_resources(struct bcm_device *dev)
+{
+	return -EOPNOTSUPP;
+}
 #endif /* CONFIG_ACPI */
 
+static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+	gpiod_set_value(dev->device_wakeup, awake);
+	return 0;
+}
+
+static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
+{
+	gpiod_set_value(dev->shutdown, powered);
+	return 0;
+}
+
 static int bcm_get_resources(struct bcm_device *dev)
 {
 	dev->name = dev_name(dev->dev);
 
+	if (x86_apple_machine && !bcm_apple_get_resources(dev))
+		return 0;
+
 	dev->clk = devm_clk_get(dev->dev, NULL);
 
-	dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
-						     "device-wakeup",
-						     GPIOD_OUT_LOW);
+	dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
+					    GPIOD_OUT_LOW);
 	if (IS_ERR(dev->device_wakeup))
 		return PTR_ERR(dev->device_wakeup);
 
-	dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
-						GPIOD_OUT_LOW);
+	dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
 	if (IS_ERR(dev->shutdown))
 		return PTR_ERR(dev->shutdown);
 
+	dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
+	dev->set_shutdown = bcm_gpio_set_shutdown;
+
 	/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
 	if (dev->irq <= 0) {
 		struct gpio_desc *gpio;
@@ -802,7 +946,7 @@ static int bcm_get_resources(struct bcm_device *dev)
 		dev->irq = gpiod_to_irq(gpio);
 	}
 
-	dev_info(dev->dev, "BCM irq: %d\n", dev->irq);
+	dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
 	return 0;
 }
 
@@ -892,7 +1036,9 @@ static int bcm_probe(struct platform_device *pdev)
 	list_add_tail(&dev->list, &bcm_device_list);
 	mutex_unlock(&bcm_device_lock);
 
-	bcm_gpio_set_power(dev, false);
+	ret = bcm_gpio_set_power(dev, false);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to power down\n");
 
 	return 0;
 }
@@ -939,6 +1085,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
 	{ "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+	{ "BCM2E72", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
@@ -993,7 +1140,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
 	if (err)
 		return err;
 
-	bcm_gpio_set_power(bcmdev, false);
+	err = bcm_gpio_set_power(bcmdev, false);
+	if (err)
+		dev_err(&serdev->dev, "Failed to power down\n");
 
 	return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
 }
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index aad07e40ea4f..7c166e3b308b 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -540,18 +540,15 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
 
 static int intel_setup(struct hci_uart *hu)
 {
-	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
-					  0x00, 0x08, 0x04, 0x00 };
 	struct intel_data *intel = hu->priv;
 	struct hci_dev *hdev = hu->hdev;
 	struct sk_buff *skb;
 	struct intel_version ver;
-	struct intel_boot_params *params;
+	struct intel_boot_params params;
 	struct list_head *p;
 	const struct firmware *fw;
-	const u8 *fw_ptr;
 	char fwname[64];
-	u32 frag_len;
+	u32 boot_param;
 	ktime_t calltime, delta, rettime;
 	unsigned long long duration;
 	unsigned int init_speed, oper_speed;
@@ -563,6 +560,12 @@ static int intel_setup(struct hci_uart *hu)
 	hu->hdev->set_diag = btintel_set_diag;
 	hu->hdev->set_bdaddr = btintel_set_bdaddr;
 
+	/* Set the default boot parameter to 0x0 and it is updated to
+	 * SKU specific boot parameter after reading Intel_Write_Boot_Params
+	 * command while downloading the firmware.
+	 */
+	boot_param = 0x00000000;
+
 	calltime = ktime_get();
 
 	if (hu->init_speed)
@@ -656,85 +659,95 @@ static int intel_setup(struct hci_uart *hu)
 	/* Read the secure boot parameters to identify the operating
 	 * details of the bootloader.
 	 */
-	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_CMD_TIMEOUT);
-	if (IS_ERR(skb)) {
-		bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
-			   PTR_ERR(skb));
-		return PTR_ERR(skb);
-	}
-
-	if (skb->len != sizeof(*params)) {
-		bt_dev_err(hdev, "Intel boot parameters size mismatch");
-		kfree_skb(skb);
-		return -EILSEQ;
-	}
-
-	params = (struct intel_boot_params *)skb->data;
-	if (params->status) {
-		bt_dev_err(hdev, "Intel boot parameters command failure (%02x)",
-			   params->status);
-		err = -bt_to_errno(params->status);
-		kfree_skb(skb);
+	err = btintel_read_boot_params(hdev, &params);
+	if (err)
 		return err;
-	}
-
-	bt_dev_info(hdev, "Device revision is %u",
-		    le16_to_cpu(params->dev_revid));
-
-	bt_dev_info(hdev, "Secure boot is %s",
-		    params->secure_boot ? "enabled" : "disabled");
-
-	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
-		params->min_fw_build_nn, params->min_fw_build_cw,
-		2000 + params->min_fw_build_yy);
 
 	/* It is required that every single firmware fragment is acknowledged
 	 * with a command complete event. If the boot parameters indicate
 	 * that this bootloader does not send them, then abort the setup.
 	 */
-	if (params->limited_cce != 0x00) {
+	if (params.limited_cce != 0x00) {
 		bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
-			   params->limited_cce);
-		kfree_skb(skb);
+			   params.limited_cce);
 		return -EINVAL;
 	}
 
 	/* If the OTP has no valid Bluetooth device address, then there will
 	 * also be no valid address for the operational firmware.
 	 */
-	if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+	if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
 		bt_dev_info(hdev, "No device address configured");
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
 	}
 
 	/* With this Intel bootloader only the hardware variant and device
-	 * revision information are used to select the right firmware.
+	 * revision information are used to select the right firmware for SfP
+	 * and WsP.
 	 *
 	 * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
 	 *
 	 * Currently the supported hardware variants are:
 	 *   11 (0x0b) for iBT 3.0 (LnP/SfP)
+	 *   12 (0x0c) for iBT 3.5 (WsP)
+	 *
+	 * For ThP/JfP and for future SKU's, the FW name varies based on HW
+	 * variant, HW revision and FW revision, as these are dependent on CNVi
+	 * and RF Combination.
+	 *
+	 *   18 (0x12) for iBT3.5 (ThP/JfP)
+	 *
+	 * The firmware file name for these will be
+	 * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
+	 *
 	 */
-	snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
-		le16_to_cpu(ver.hw_variant),
-		le16_to_cpu(params->dev_revid));
+	switch (ver.hw_variant) {
+	case 0x0b:      /* SfP */
+	case 0x0c:      /* WsP */
+		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
+			 le16_to_cpu(ver.hw_variant),
+			 le16_to_cpu(params.dev_revid));
+		break;
+	case 0x12:      /* ThP */
+		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+			 le16_to_cpu(ver.hw_variant),
+			 le16_to_cpu(ver.hw_revision),
+			 le16_to_cpu(ver.fw_revision));
+		break;
+	default:
+		bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+			   ver.hw_variant);
+		return -EINVAL;
+	}
 
 	err = request_firmware(&fw, fwname, &hdev->dev);
 	if (err < 0) {
 		bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
 			   err);
-		kfree_skb(skb);
 		return err;
 	}
 
 	bt_dev_info(hdev, "Found device firmware: %s", fwname);
 
 	/* Save the DDC file name for later */
-	snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
-		le16_to_cpu(ver.hw_variant),
-		le16_to_cpu(params->dev_revid));
-
-	kfree_skb(skb);
+	switch (ver.hw_variant) {
+	case 0x0b:      /* SfP */
+	case 0x0c:      /* WsP */
+		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
+			 le16_to_cpu(ver.hw_variant),
+			 le16_to_cpu(params.dev_revid));
+		break;
+	case 0x12:      /* ThP */
+		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+			 le16_to_cpu(ver.hw_variant),
+			 le16_to_cpu(ver.hw_revision),
+			 le16_to_cpu(ver.fw_revision));
+		break;
+	default:
+		bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+			   ver.hw_variant);
+		return -EINVAL;
+	}
 
 	if (fw->size < 644) {
 		bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
@@ -745,70 +758,10 @@ static int intel_setup(struct hci_uart *hu)
 
 	set_bit(STATE_DOWNLOADING, &intel->flags);
 
-	/* Start the firmware download transaction with the Init fragment
-	 * represented by the 128 bytes of CSS header.
-	 */
-	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
-	if (err < 0) {
-		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
-		goto done;
-	}
-
-	/* Send the 256 bytes of public key information from the firmware
-	 * as the PKey fragment.
-	 */
-	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
-	if (err < 0) {
-		bt_dev_err(hdev, "Failed to send firmware public key (%d)",
-			   err);
-		goto done;
-	}
-
-	/* Send the 256 bytes of signature information from the firmware
-	 * as the Sign fragment.
-	 */
-	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
-	if (err < 0) {
-		bt_dev_err(hdev, "Failed to send firmware signature (%d)",
-			   err);
+	/* Start firmware downloading and get boot parameter */
+	err = btintel_download_firmware(hdev, fw, &boot_param);
+	if (err < 0)
 		goto done;
-	}
-
-	fw_ptr = fw->data + 644;
-	frag_len = 0;
-
-	while (fw_ptr - fw->data < fw->size) {
-		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
-
-		frag_len += sizeof(*cmd) + cmd->plen;
-
-		bt_dev_dbg(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
-			   fw->size);
-
-		/* The parameter length of the secure send command requires
-		 * a 4 byte alignment. It happens so that the firmware file
-		 * contains proper Intel_NOP commands to align the fragments
-		 * as needed.
-		 *
-		 * Send set of commands with 4 byte alignment from the
-		 * firmware data buffer as a single Data fragement.
-		 */
-		if (frag_len % 4)
-			continue;
-
-		/* Send each command from the firmware data buffer as
-		 * a single Data fragment.
-		 */
-		err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
-		if (err < 0) {
-			bt_dev_err(hdev, "Failed to send firmware data (%d)",
-				   err);
-			goto done;
-		}
-
-		fw_ptr += frag_len;
-		frag_len = 0;
-	}
 
 	set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
 
@@ -869,12 +822,9 @@ done:
 
 	set_bit(STATE_BOOTING, &intel->flags);
 
-	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
-			     HCI_CMD_TIMEOUT);
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
-
-	kfree_skb(skb);
+	err = btintel_send_intel_reset(hdev, boot_param);
+	if (err)
+		return err;
 
 	/* The bootloader will not indicate when the device is ready. This
 	 * is done by the operational firmware sending bootup notification.
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index e2c078d61730..1b4417a623a4 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -53,9 +53,14 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <linux/gpio/consumer.h>
+#include <linux/nvmem-consumer.h>
 
 #include "hci_uart.h"
 
+/* Vendor-specific HCI commands */
+#define HCI_VS_WRITE_BD_ADDR			0xfc06
+#define HCI_VS_UPDATE_UART_HCI_BAUDRATE		0xff36
+
 /* HCILL commands */
 #define HCILL_GO_TO_SLEEP_IND	0x30
 #define HCILL_GO_TO_SLEEP_ACK	0x31
@@ -86,6 +91,7 @@ struct ll_device {
 	struct serdev_device *serdev;
 	struct gpio_desc *enable_gpio;
 	struct clk *ext_clk;
+	bdaddr_t bdaddr;
 };
 
 struct ll_struct {
@@ -620,7 +626,7 @@ static int download_firmware(struct ll_device *lldev)
 		case ACTION_SEND_COMMAND:	/* action send */
 			bt_dev_dbg(lldev->hu.hdev, "S");
 			cmd = (struct hci_command *)action_ptr;
-			if (cmd->opcode == 0xff36) {
+			if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
 				/* ignore remote change
 				 * baud rate HCI VS command
 				 */
@@ -628,11 +634,11 @@ static int download_firmware(struct ll_device *lldev)
 				break;
 			}
 			if (cmd->prefix != 1)
-				bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix);
+				bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
 
 			skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
 			if (IS_ERR(skb)) {
-				bt_dev_err(lldev->hu.hdev, "send command failed\n");
+				bt_dev_err(lldev->hu.hdev, "send command failed");
 				err = PTR_ERR(skb);
 				goto out_rel_fw;
 			}
@@ -659,6 +665,24 @@ out_rel_fw:
 	return err;
 }
 
+static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	bdaddr_t bdaddr_swapped;
+	struct sk_buff *skb;
+
+	/* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD
+	 * address to be MSB first, but bdaddr_t has the convention of being
+	 * LSB first.
+	 */
+	baswap(&bdaddr_swapped, bdaddr);
+	skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
+			     &bdaddr_swapped, HCI_INIT_TIMEOUT);
+	if (!IS_ERR(skb))
+		kfree_skb(skb);
+
+	return PTR_ERR_OR_ZERO(skb);
+}
+
 static int ll_setup(struct hci_uart *hu)
 {
 	int err, retry = 3;
@@ -671,14 +695,20 @@ static int ll_setup(struct hci_uart *hu)
 
 	lldev = serdev_device_get_drvdata(serdev);
 
+	hu->hdev->set_bdaddr = ll_set_bdaddr;
+
 	serdev_device_set_flow_control(serdev, true);
 
 	do {
-		/* Configure BT_EN to HIGH state */
+		/* Reset the Bluetooth device */
 		gpiod_set_value_cansleep(lldev->enable_gpio, 0);
 		msleep(5);
 		gpiod_set_value_cansleep(lldev->enable_gpio, 1);
-		msleep(100);
+		err = serdev_device_wait_for_cts(serdev, true, 200);
+		if (err) {
+			bt_dev_err(hu->hdev, "Failed to get CTS");
+			return err;
+		}
 
 		err = download_firmware(lldev);
 		if (!err)
@@ -691,6 +721,18 @@ static int ll_setup(struct hci_uart *hu)
 	if (err)
 		return err;
 
+	/* Set BD address if one was specified at probe */
+	if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) {
+		/* This means that there was an error getting the BD address
+		 * during probe, so mark the device as having a bad address.
+		 */
+		set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+	} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
+		err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
+		if (err)
+			set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+	}
+
 	/* Operational speed if any */
 	if (hu->oper_speed)
 		speed = hu->oper_speed;
@@ -700,7 +742,12 @@ static int ll_setup(struct hci_uart *hu)
 		speed = 0;
 
 	if (speed) {
-		struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT);
+		__le32 speed_le = cpu_to_le32(speed);
+		struct sk_buff *skb;
+
+		skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+				     sizeof(speed_le), &speed_le,
+				     HCI_INIT_TIMEOUT);
 		if (!IS_ERR(skb)) {
 			kfree_skb(skb);
 			serdev_device_set_baudrate(serdev, speed);
@@ -716,6 +763,7 @@ static int hci_ti_probe(struct serdev_device *serdev)
 {
 	struct hci_uart *hu;
 	struct ll_device *lldev;
+	struct nvmem_cell *bdaddr_cell;
 	u32 max_speed = 3000000;
 
 	lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
@@ -737,6 +785,52 @@ static int hci_ti_probe(struct serdev_device *serdev)
 	of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
 	hci_uart_set_speeds(hu, 115200, max_speed);
 
+	/* optional BD address from nvram */
+	bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address");
+	if (IS_ERR(bdaddr_cell)) {
+		int err = PTR_ERR(bdaddr_cell);
+
+		if (err == -EPROBE_DEFER)
+			return err;
+
+		/* ENOENT means there is no matching nvmem cell and ENOSYS
+		 * means that nvmem is not enabled in the kernel configuration.
+		 */
+		if (err != -ENOENT && err != -ENOSYS) {
+			/* If there was some other error, give userspace a
+			 * chance to fix the problem instead of failing to load
+			 * the driver. Using BDADDR_NONE as a flag that is
+			 * tested later in the setup function.
+			 */
+			dev_warn(&serdev->dev,
+				 "Failed to get \"bd-address\" nvmem cell (%d)\n",
+				 err);
+			bacpy(&lldev->bdaddr, BDADDR_NONE);
+		}
+	} else {
+		bdaddr_t *bdaddr;
+		size_t len;
+
+		bdaddr = nvmem_cell_read(bdaddr_cell, &len);
+		nvmem_cell_put(bdaddr_cell);
+		if (IS_ERR(bdaddr)) {
+			dev_err(&serdev->dev, "Failed to read nvmem bd-address\n");
+			return PTR_ERR(bdaddr);
+		}
+		if (len != sizeof(bdaddr_t)) {
+			dev_err(&serdev->dev, "Invalid nvmem bd-address length\n");
+			kfree(bdaddr);
+			return -EINVAL;
+		}
+
+		/* As per the device tree bindings, the value from nvmem is
+		 * expected to be MSB first, but in the kernel it is expected
+		 * that bdaddr_t is LSB first.
+		 */
+		baswap(&lldev->bdaddr, bdaddr);
+		kfree(bdaddr);
+	}
+
 	return hci_uart_register_device(hu, &llp);
 }
 
@@ -748,6 +842,7 @@ static void hci_ti_remove(struct serdev_device *serdev)
 }
 
 static const struct of_device_id hci_ti_of_match[] = {
+	{ .compatible = "ti,cc2560" },
 	{ .compatible = "ti,wl1271-st" },
 	{ .compatible = "ti,wl1273-st" },
 	{ .compatible = "ti,wl1281-st" },
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bbd7db7384e6..05ec530b8a3a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -932,6 +932,9 @@ static int qca_setup(struct hci_uart *hu)
 	if (!ret) {
 		set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
 		qca_debugfs_init(hdev);
+	} else if (ret == -ENOENT) {
+		/* No patch/nvm-config found, run with original fw/config */
+		ret = 0;
 	}
 
 	/* Setup bdaddr */
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 71664b22ec9d..e0e6461b9200 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -303,6 +303,7 @@ int hci_uart_register_device(struct hci_uart *hu,
 	hci_set_drvdata(hdev, hu);
 
 	INIT_WORK(&hu->write_work, hci_uart_write_work);
+	percpu_init_rwsem(&hu->proto_lock);
 
 	/* Only when vendor specific setup callback is provided, consider
 	 * the manufacturer information valid. This avoids filling in the
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 12eb8caa4263..50e071444a5c 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -140,6 +140,29 @@ static u32 hv_copyto_ringbuffer(
 	return start_write_offset;
 }
 
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static void
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+			     u32 *read, u32 *write)
+{
+	u32 read_loc, write_loc, dsize;
+
+	/* Capture the read/write indices before they changed */
+	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
+	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+	dsize = rbi->ring_datasize;
+
+	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+		read_loc - write_loc;
+	*read = dsize - *write;
+}
+
 /* Get various debug metrics for the specified ring buffer. */
 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
 				 struct hv_ring_buffer_debug_info *debug_info)
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 50812b33291b..a9c3378bca38 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
 			     struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int rc;
 
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index b7587f10e7de..78b49002fbd2 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -164,6 +164,13 @@ struct rdma_srq_sge {
 	__le32 l_key;
 };
 
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+	__le16 icid; /* internal CID */
+	u8 agg_flags; /* aggregative flags */
+	u8 reserved;
+};
+
 /* Rdma doorbell data for SQ and RQ */
 struct rdma_pwm_val16_data {
 	__le16 icid;
@@ -180,12 +187,16 @@ struct rdma_pwm_val32_data {
 	__le16 icid;
 	u8 agg_flags;
 	u8 params;
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK    0x3
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT   0
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK  0x1
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
-#define RDMA_PWM_VAL32_DATA_RESERVED_MASK   0x1F
-#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT  3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK		0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT		0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK		0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT		2
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK	0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT	3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK		0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT		4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK		0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT		5
 	__le32 value;
 };
 
@@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe {
 	__le16 dif_app_tag_mask;
 	__le16 dif_runt_crc_value;
 	__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0x1FF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              7
-	__le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT	0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK		0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT		1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT	2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT	3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT	4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT	5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK		0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT		6
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT	7
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK			0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT			8
+	__le32 reserved5;
 };
 
 /* First element (16 bytes) of fmr wqe */
@@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd {
 	__le16 dif_app_tag_mask;
 	__le16 dif_runt_crc_value;
 	__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
-	__le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT		0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK			0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT		1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT		2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT	3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT		4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT		5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK			0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT			6
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT		7
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK			0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT				8
+	__le32 reserved5;
 };
 
 struct rdma_sq_local_inv_wqe {
@@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe {
 	__le32 xrc_srq;
 	u8 req_type;
 	u8 flags;
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
-#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1
-#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x3
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                6
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK		0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT		0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT	1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT	2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK		0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT		3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT	4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT	5
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT	6
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK		0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT	7
 	u8 wqe_size;
 	u8 prev_wqe_size;
 	struct regpair remote_va;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0936da592e12..944ec3c9282c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -497,4 +497,15 @@ config THUNDERBOLT_NET
 
 source "drivers/net/hyperv/Kconfig"
 
+config NETDEVSIM
+	tristate "Simulated networking device"
+	depends on DEBUG_FS
+	help
+	  This driver is a developer testing tool and software model that can
+	  be used to test various control path networking APIs, especially
+	  HW-offload related.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called netdevsim.
+
 endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 766f62d02a0b..04c3b747812c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/
 
 thunderbolt-net-y += thunderbolt.o
 obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_NETDEVSIM) += netdevsim/
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 8a9b085c2a98..58c705f24f96 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
 {
 	u8 macaddr[ETH_ALEN];
 	u8 *mac;
-	int i;
 
 	if (newval->string) {
-		i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-			   &macaddr[0], &macaddr[1], &macaddr[2],
-			   &macaddr[3], &macaddr[4], &macaddr[5]);
-		if (i != ETH_ALEN)
+		if (!mac_pton(newval->string, macaddr))
 			goto err;
 		mac = macaddr;
 	} else {
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b8029ea03307..433a14b9f731 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -264,7 +264,6 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 	}
 
 	/* Create payload CAIF frames. */
-	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 	while (nfrms < CFHSI_MAX_PKTS) {
 		struct caif_payload_info *info;
 		int hpad;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index d065c0e2d18e..406b4847e5dc 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -251,14 +251,14 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 	pci_disable_device(pdev);
 }
 
-static struct c_can_pci_data c_can_sta2x11= {
+static const struct c_can_pci_data c_can_sta2x11= {
 	.type = BOSCH_C_CAN,
 	.reg_align = C_CAN_REG_ALIGN_32,
 	.freq = 52000000, /* 52 Mhz */
 	.bar = 0,
 };
 
-static struct c_can_pci_data c_can_pch = {
+static const struct c_can_pci_data c_can_pch = {
 	.type = BOSCH_C_CAN,
 	.reg_align = C_CAN_REG_32,
 	.freq = 50000000, /* 50 MHz */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 365a8cc62405..b1779566c5bb 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -27,6 +27,7 @@
 #include <linux/can/skb.h>
 #include <linux/can/netlink.h>
 #include <linux/can/led.h>
+#include <linux/of.h>
 #include <net/rtnetlink.h>
 
 #define MOD_DESC "CAN device driver interface"
@@ -411,7 +412,7 @@ EXPORT_SYMBOL_GPL(can_change_state);
  * Local echo of CAN messages
  *
  * CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.txt). To test the handling of CAN
+ * (see Documentation/networking/can.rst). To test the handling of CAN
  * interfaces that do not support the local echo both driver types are
  * implemented. In the case that the driver does not support the echo
  * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
@@ -814,6 +815,29 @@ int open_candev(struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(open_candev);
 
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+	struct device_node *dn;
+	struct can_priv *priv = netdev_priv(dev);
+	struct device_node *np = dev->dev.parent->of_node;
+	int ret;
+
+	dn = of_get_child_by_name(np, "can-transceiver");
+	if (!dn)
+		return;
+
+	ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+	if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+		netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
 /*
  * Common close function for cleanup before the device gets closed.
  *
@@ -913,6 +937,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
 					priv->bitrate_const_cnt);
 		if (err)
 			return err;
+
+		if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+			netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+				   priv->bitrate_max);
+			return -EINVAL;
+		}
+
 		memcpy(&priv->bittiming, &bt, sizeof(bt));
 
 		if (priv->do_set_bittiming) {
@@ -997,6 +1028,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
 					priv->data_bitrate_const_cnt);
 		if (err)
 			return err;
+
+		if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+			netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+				   priv->bitrate_max);
+			return -EINVAL;
+		}
+
 		memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
 
 		if (priv->do_set_data_bittiming) {
@@ -1064,6 +1102,7 @@ static size_t can_get_size(const struct net_device *dev)
 	if (priv->data_bitrate_const)				/* IFLA_CAN_DATA_BITRATE_CONST */
 		size += nla_total_size(sizeof(*priv->data_bitrate_const) *
 				       priv->data_bitrate_const_cnt);
+	size += sizeof(priv->bitrate_max);			/* IFLA_CAN_BITRATE_MAX */
 
 	return size;
 }
@@ -1121,7 +1160,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
 	     nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
 		     sizeof(*priv->data_bitrate_const) *
 		     priv->data_bitrate_const_cnt,
-		     priv->data_bitrate_const))
+		     priv->data_bitrate_const)) ||
+
+	    (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+		     sizeof(priv->bitrate_max),
+		     &priv->bitrate_max))
 	    )
 
 		return -EMSGSIZE;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 760d2c07e3a2..634c51e6b8ae 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -190,6 +190,7 @@
  *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
  *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
  *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
+ * LS1021A FlexCAN2  03.00.04.00     no       yes        no       no       yes
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -279,6 +280,10 @@ struct flexcan_priv {
 	struct clk *clk_per;
 	const struct flexcan_devtype_data *devtype_data;
 	struct regulator *reg_xceiver;
+
+	/* Read and Write APIs */
+	u32 (*read)(void __iomem *addr);
+	void (*write)(u32 val, void __iomem *addr);
 };
 
 static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
@@ -301,6 +306,12 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
 		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
+static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+};
+
 static const struct can_bittiming_const flexcan_bittiming_const = {
 	.name = DRV_NAME,
 	.tseg1_min = 4,
@@ -313,39 +324,45 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
 	.brp_inc = 1,
 };
 
-/* Abstract off the read/write for arm versus ppc. This
- * assumes that PPC uses big-endian registers and everything
- * else uses little-endian registers, independent of CPU
- * endianness.
+/* FlexCAN module is essentially modelled as a little-endian IP in most
+ * SoCs, i.e the registers as well as the message buffer areas are
+ * implemented in a little-endian fashion.
+ *
+ * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
+ * module in a big-endian fashion (i.e the registers as well as the
+ * message buffer areas are implemented in a big-endian way).
+ *
+ * In addition, the FlexCAN module can be found on SoCs having ARM or
+ * PPC cores. So, we need to abstract off the register read/write
+ * functions, ensuring that these cater to all the combinations of module
+ * endianness and underlying CPU endianness.
  */
-#if defined(CONFIG_PPC)
-static inline u32 flexcan_read(void __iomem *addr)
+static inline u32 flexcan_read_be(void __iomem *addr)
 {
-	return in_be32(addr);
+	return ioread32be(addr);
 }
 
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_be(u32 val, void __iomem *addr)
 {
-	out_be32(addr, val);
+	iowrite32be(val, addr);
 }
-#else
-static inline u32 flexcan_read(void __iomem *addr)
+
+static inline u32 flexcan_read_le(void __iomem *addr)
 {
-	return readl(addr);
+	return ioread32(addr);
 }
 
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_le(u32 val, void __iomem *addr)
 {
-	writel(val, addr);
+	iowrite32(val, addr);
 }
-#endif
 
 static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
 {
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
 
-	flexcan_write(reg_ctrl, &regs->ctrl);
+	priv->write(reg_ctrl, &regs->ctrl);
 }
 
 static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
@@ -353,7 +370,7 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
 
-	flexcan_write(reg_ctrl, &regs->ctrl);
+	priv->write(reg_ctrl, &regs->ctrl);
 }
 
 static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
@@ -378,14 +395,14 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg &= ~FLEXCAN_MCR_MDIS;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
 		udelay(10);
 
-	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+	if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -397,14 +414,14 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg |= FLEXCAN_MCR_MDIS;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
 		udelay(10);
 
-	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+	if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
 		return -ETIMEDOUT;
 
 	return 0;
@@ -416,14 +433,14 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
 	unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg |= FLEXCAN_MCR_HALT;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
 		udelay(100);
 
-	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+	if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
 		return -ETIMEDOUT;
 
 	return 0;
@@ -435,14 +452,14 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg &= ~FLEXCAN_MCR_HALT;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
 		udelay(10);
 
-	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+	if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -453,11 +470,11 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 
-	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
-	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+	priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
 		udelay(10);
 
-	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+	if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -468,7 +485,7 @@ static int __flexcan_get_berr_counter(const struct net_device *dev,
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
 	struct flexcan_regs __iomem *regs = priv->regs;
-	u32 reg = flexcan_read(&regs->ecr);
+	u32 reg = priv->read(&regs->ecr);
 
 	bec->txerr = (reg >> 0) & 0xff;
 	bec->rxerr = (reg >> 8) & 0xff;
@@ -524,24 +541,24 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	if (cf->can_dlc > 0) {
 		data = be32_to_cpup((__be32 *)&cf->data[0]);
-		flexcan_write(data, &priv->tx_mb->data[0]);
+		priv->write(data, &priv->tx_mb->data[0]);
 	}
 	if (cf->can_dlc > 4) {
 		data = be32_to_cpup((__be32 *)&cf->data[4]);
-		flexcan_write(data, &priv->tx_mb->data[1]);
+		priv->write(data, &priv->tx_mb->data[1]);
 	}
 
 	can_put_echo_skb(skb, dev, 0);
 
-	flexcan_write(can_id, &priv->tx_mb->can_id);
-	flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+	priv->write(can_id, &priv->tx_mb->can_id);
+	priv->write(ctrl, &priv->tx_mb->can_ctrl);
 
 	/* Errata ERR005829 step8:
 	 * Write twice INACTIVE(0x8) code to first MB.
 	 */
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
 		      &priv->tx_mb_reserved->can_ctrl);
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
 		      &priv->tx_mb_reserved->can_ctrl);
 
 	return NETDEV_TX_OK;
@@ -660,7 +677,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
 		u32 code;
 
 		do {
-			reg_ctrl = flexcan_read(&mb->can_ctrl);
+			reg_ctrl = priv->read(&mb->can_ctrl);
 		} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
 
 		/* is this MB empty? */
@@ -675,17 +692,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
 			offload->dev->stats.rx_errors++;
 		}
 	} else {
-		reg_iflag1 = flexcan_read(&regs->iflag1);
+		reg_iflag1 = priv->read(&regs->iflag1);
 		if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
 			return 0;
 
-		reg_ctrl = flexcan_read(&mb->can_ctrl);
+		reg_ctrl = priv->read(&mb->can_ctrl);
 	}
 
 	/* increase timstamp to full 32 bit */
 	*timestamp = reg_ctrl << 16;
 
-	reg_id = flexcan_read(&mb->can_id);
+	reg_id = priv->read(&mb->can_id);
 	if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
 		cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
 	else
@@ -695,19 +712,19 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
 		cf->can_id |= CAN_RTR_FLAG;
 	cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
 
-	*(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
-	*(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
+	*(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
+	*(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
 
 	/* mark as read */
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
 		/* Clear IRQ */
 		if (n < 32)
-			flexcan_write(BIT(n), &regs->iflag1);
+			priv->write(BIT(n), &regs->iflag1);
 		else
-			flexcan_write(BIT(n - 32), &regs->iflag2);
+			priv->write(BIT(n - 32), &regs->iflag2);
 	} else {
-		flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-		flexcan_read(&regs->timer);
+		priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+		priv->read(&regs->timer);
 	}
 
 	return 1;
@@ -719,8 +736,8 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 iflag1, iflag2;
 
-	iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
-	iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+	iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
+	iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
 		~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
 
 	return (u64)iflag2 << 32 | iflag1;
@@ -736,7 +753,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
 	u32 reg_iflag1, reg_esr;
 	enum can_state last_state = priv->can.state;
 
-	reg_iflag1 = flexcan_read(&regs->iflag1);
+	reg_iflag1 = priv->read(&regs->iflag1);
 
 	/* reception interrupt */
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -759,7 +776,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
 		/* FIFO overflow interrupt */
 		if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
 			handled = IRQ_HANDLED;
-			flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+			priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW,
+				    &regs->iflag1);
 			dev->stats.rx_over_errors++;
 			dev->stats.rx_errors++;
 		}
@@ -773,18 +791,18 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
 		can_led_event(dev, CAN_LED_EVENT_TX);
 
 		/* after sending a RTR frame MB is in RX mode */
-		flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-			      &priv->tx_mb->can_ctrl);
-		flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+		priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+			    &priv->tx_mb->can_ctrl);
+		priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
 		netif_wake_queue(dev);
 	}
 
-	reg_esr = flexcan_read(&regs->esr);
+	reg_esr = priv->read(&regs->esr);
 
 	/* ACK all bus error and state change IRQ sources */
 	if (reg_esr & FLEXCAN_ESR_ALL_INT) {
 		handled = IRQ_HANDLED;
-		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+		priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
 	}
 
 	/* state change interrupt or broken error state quirk fix is enabled */
@@ -846,7 +864,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg;
 
-	reg = flexcan_read(&regs->ctrl);
+	reg = priv->read(&regs->ctrl);
 	reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
 		 FLEXCAN_CTRL_RJW(0x3) |
 		 FLEXCAN_CTRL_PSEG1(0x7) |
@@ -870,11 +888,11 @@ static void flexcan_set_bittiming(struct net_device *dev)
 		reg |= FLEXCAN_CTRL_SMP;
 
 	netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
-	flexcan_write(reg, &regs->ctrl);
+	priv->write(reg, &regs->ctrl);
 
 	/* print chip status */
 	netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
-		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+		   priv->read(&regs->mcr), priv->read(&regs->ctrl));
 }
 
 /* flexcan_chip_start
@@ -913,7 +931,7 @@ static int flexcan_chip_start(struct net_device *dev)
 	 * choose format C
 	 * set max mailbox number
 	 */
-	reg_mcr = flexcan_read(&regs->mcr);
+	reg_mcr = priv->read(&regs->mcr);
 	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
 	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
 		FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
@@ -927,7 +945,7 @@ static int flexcan_chip_start(struct net_device *dev)
 			FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
 	}
 	netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
-	flexcan_write(reg_mcr, &regs->mcr);
+	priv->write(reg_mcr, &regs->mcr);
 
 	/* CTRL
 	 *
@@ -940,7 +958,7 @@ static int flexcan_chip_start(struct net_device *dev)
 	 * enable bus off interrupt
 	 * (== FLEXCAN_CTRL_ERR_STATE)
 	 */
-	reg_ctrl = flexcan_read(&regs->ctrl);
+	reg_ctrl = priv->read(&regs->ctrl);
 	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
 	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
 		FLEXCAN_CTRL_ERR_STATE;
@@ -960,45 +978,45 @@ static int flexcan_chip_start(struct net_device *dev)
 	/* leave interrupts disabled for now */
 	reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
 	netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
-	flexcan_write(reg_ctrl, &regs->ctrl);
+	priv->write(reg_ctrl, &regs->ctrl);
 
 	if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
-		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 = priv->read(&regs->ctrl2);
 		reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
-		flexcan_write(reg_ctrl2, &regs->ctrl2);
+		priv->write(reg_ctrl2, &regs->ctrl2);
 	}
 
 	/* clear and invalidate all mailboxes first */
 	for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
-		flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
-			      &regs->mb[i].can_ctrl);
+		priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+			    &regs->mb[i].can_ctrl);
 	}
 
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
 		for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
-			flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
-				      &regs->mb[i].can_ctrl);
+			priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
+				    &regs->mb[i].can_ctrl);
 	}
 
 	/* Errata ERR005829: mark first TX mailbox as INACTIVE */
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &priv->tx_mb_reserved->can_ctrl);
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		    &priv->tx_mb_reserved->can_ctrl);
 
 	/* mark TX mailbox as INACTIVE */
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &priv->tx_mb->can_ctrl);
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		    &priv->tx_mb->can_ctrl);
 
 	/* acceptance mask/acceptance code (accept everything) */
-	flexcan_write(0x0, &regs->rxgmask);
-	flexcan_write(0x0, &regs->rx14mask);
-	flexcan_write(0x0, &regs->rx15mask);
+	priv->write(0x0, &regs->rxgmask);
+	priv->write(0x0, &regs->rx14mask);
+	priv->write(0x0, &regs->rx15mask);
 
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
-		flexcan_write(0x0, &regs->rxfgmask);
+		priv->write(0x0, &regs->rxfgmask);
 
 	/* clear acceptance filters */
 	for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
-		flexcan_write(0, &regs->rximr[i]);
+		priv->write(0, &regs->rximr[i]);
 
 	/* On Vybrid, disable memory error detection interrupts
 	 * and freeze mode.
@@ -1011,16 +1029,16 @@ static int flexcan_chip_start(struct net_device *dev)
 		 * and Correction of Memory Errors" to write to
 		 * MECR register
 		 */
-		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 = priv->read(&regs->ctrl2);
 		reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
-		flexcan_write(reg_ctrl2, &regs->ctrl2);
+		priv->write(reg_ctrl2, &regs->ctrl2);
 
-		reg_mecr = flexcan_read(&regs->mecr);
+		reg_mecr = priv->read(&regs->mecr);
 		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
-		flexcan_write(reg_mecr, &regs->mecr);
+		priv->write(reg_mecr, &regs->mecr);
 		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
 			      FLEXCAN_MECR_FANCEI_MSK);
-		flexcan_write(reg_mecr, &regs->mecr);
+		priv->write(reg_mecr, &regs->mecr);
 	}
 
 	err = flexcan_transceiver_enable(priv);
@@ -1036,14 +1054,14 @@ static int flexcan_chip_start(struct net_device *dev)
 
 	/* enable interrupts atomically */
 	disable_irq(dev->irq);
-	flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
-	flexcan_write(priv->reg_imask1_default, &regs->imask1);
-	flexcan_write(priv->reg_imask2_default, &regs->imask2);
+	priv->write(priv->reg_ctrl_default, &regs->ctrl);
+	priv->write(priv->reg_imask1_default, &regs->imask1);
+	priv->write(priv->reg_imask2_default, &regs->imask2);
 	enable_irq(dev->irq);
 
 	/* print chip status */
 	netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
-		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+		   priv->read(&regs->mcr), priv->read(&regs->ctrl));
 
 	return 0;
 
@@ -1068,10 +1086,10 @@ static void flexcan_chip_stop(struct net_device *dev)
 	flexcan_chip_disable(priv);
 
 	/* Disable all interrupts */
-	flexcan_write(0, &regs->imask2);
-	flexcan_write(0, &regs->imask1);
-	flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
-		      &regs->ctrl);
+	priv->write(0, &regs->imask2);
+	priv->write(0, &regs->imask1);
+	priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+		    &regs->ctrl);
 
 	flexcan_transceiver_disable(priv);
 	priv->can.state = CAN_STATE_STOPPED;
@@ -1186,26 +1204,26 @@ static int register_flexcandev(struct net_device *dev)
 	err = flexcan_chip_disable(priv);
 	if (err)
 		goto out_disable_per;
-	reg = flexcan_read(&regs->ctrl);
+	reg = priv->read(&regs->ctrl);
 	reg |= FLEXCAN_CTRL_CLK_SRC;
-	flexcan_write(reg, &regs->ctrl);
+	priv->write(reg, &regs->ctrl);
 
 	err = flexcan_chip_enable(priv);
 	if (err)
 		goto out_chip_disable;
 
 	/* set freeze, halt and activate FIFO, restrict register access */
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
 		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
 	/* Currently we only support newer versions of this core
 	 * featuring a RX hardware FIFO (although this driver doesn't
 	 * make use of it on some cores). Older cores, found on some
 	 * Coldfire derivates are not tested.
 	 */
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	if (!(reg & FLEXCAN_MCR_FEN)) {
 		netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
 		err = -ENODEV;
@@ -1233,8 +1251,12 @@ static void unregister_flexcandev(struct net_device *dev)
 static const struct of_device_id flexcan_of_match[] = {
 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
 	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
 	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
 	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+	{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1314,6 +1336,21 @@ static int flexcan_probe(struct platform_device *pdev)
 	dev->flags |= IFF_ECHO;
 
 	priv = netdev_priv(dev);
+
+	if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+		priv->read = flexcan_read_be;
+		priv->write = flexcan_write_be;
+	} else {
+		if (of_device_is_compatible(pdev->dev.of_node,
+					    "fsl,p1010-flexcan")) {
+			priv->read = flexcan_read_be;
+			priv->write = flexcan_write_be;
+		} else {
+			priv->read = flexcan_read_le;
+			priv->write = flexcan_write_le;
+		}
+	}
+
 	priv->can.clock.freq = clock_freq;
 	priv->can.bittiming_const = &flexcan_bittiming_const;
 	priv->can.do_set_mode = flexcan_set_mode;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index f4947a74b65f..2594f7779c6f 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -23,6 +23,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/iopoll.h>
 #include <linux/can/dev.h>
 
@@ -126,6 +127,12 @@ enum m_can_mram_cfg {
 #define DBTP_DSJW_SHIFT		0
 #define DBTP_DSJW_MASK		(0xf << DBTP_DSJW_SHIFT)
 
+/* Transmitter Delay Compensation Register (TDCR) */
+#define TDCR_TDCO_SHIFT		8
+#define TDCR_TDCO_MASK		(0x7F << TDCR_TDCO_SHIFT)
+#define TDCR_TDCF_SHIFT		0
+#define TDCR_TDCF_MASK		(0x7F << TDCR_TDCF_SHIFT)
+
 /* Test Register (TEST) */
 #define TEST_LBCK		BIT(4)
 
@@ -625,21 +632,16 @@ static int m_can_clk_start(struct m_can_priv *priv)
 {
 	int err;
 
-	err = clk_prepare_enable(priv->hclk);
+	err = pm_runtime_get_sync(priv->device);
 	if (err)
-		return err;
-
-	err = clk_prepare_enable(priv->cclk);
-	if (err)
-		clk_disable_unprepare(priv->hclk);
+		pm_runtime_put_noidle(priv->device);
 
 	return err;
 }
 
 static void m_can_clk_stop(struct m_can_priv *priv)
 {
-	clk_disable_unprepare(priv->cclk);
-	clk_disable_unprepare(priv->hclk);
+	pm_runtime_put_sync(priv->device);
 }
 
 static int m_can_get_berr_counter(const struct net_device *dev,
@@ -987,13 +989,47 @@ static int m_can_set_bittiming(struct net_device *dev)
 	m_can_write(priv, M_CAN_NBTP, reg_btp);
 
 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+		reg_btp = 0;
 		brp = dbt->brp - 1;
 		sjw = dbt->sjw - 1;
 		tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
 		tseg2 = dbt->phase_seg2 - 1;
-		reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) |
-			(tseg1 << DBTP_DTSEG1_SHIFT) |
-			(tseg2 << DBTP_DTSEG2_SHIFT);
+
+		/* TDC is only needed for bitrates beyond 2.5 MBit/s.
+		 * This is mentioned in the "Bit Time Requirements for CAN FD"
+		 * paper presented at the International CAN Conference 2013
+		 */
+		if (dbt->bitrate > 2500000) {
+			u32 tdco, ssp;
+
+			/* Use the same value of secondary sampling point
+			 * as the data sampling point
+			 */
+			ssp = dbt->sample_point;
+
+			/* Equation based on Bosch's M_CAN User Manual's
+			 * Transmitter Delay Compensation Section
+			 */
+			tdco = (priv->can.clock.freq / 1000) *
+			       ssp / dbt->bitrate;
+
+			/* Max valid TDCO value is 127 */
+			if (tdco > 127) {
+				netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
+					    tdco);
+				tdco = 127;
+			}
+
+			reg_btp |= DBTP_TDC;
+			m_can_write(priv, M_CAN_TDCR,
+				    tdco << TDCR_TDCO_SHIFT);
+		}
+
+		reg_btp |= (brp << DBTP_DBRP_SHIFT) |
+			   (sjw << DBTP_DSJW_SHIFT) |
+			   (tseg1 << DBTP_DTSEG1_SHIFT) |
+			   (tseg2 << DBTP_DTSEG2_SHIFT);
+
 		m_can_write(priv, M_CAN_DBTP, reg_btp);
 	}
 
@@ -1143,11 +1179,6 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
 	return 0;
 }
 
-static void free_m_can_dev(struct net_device *dev)
-{
-	free_candev(dev);
-}
-
 /* Checks core release number of M_CAN
  * returns 0 if an unsupported device is detected
  * else it returns the release and step coded as:
@@ -1207,31 +1238,20 @@ static bool m_can_niso_supported(const struct m_can_priv *priv)
 	return !niso_timeout;
 }
 
-static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
-					  void __iomem *addr, u32 tx_fifo_size)
+static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev,
+			   void __iomem *addr)
 {
-	struct net_device *dev;
 	struct m_can_priv *priv;
 	int m_can_version;
-	unsigned int echo_buffer_count;
 
 	m_can_version = m_can_check_core_release(addr);
 	/* return if unsupported version */
 	if (!m_can_version) {
-		dev = NULL;
-		goto return_dev;
+		dev_err(&pdev->dev, "Unsupported version number: %2d",
+			m_can_version);
+		return -EINVAL;
 	}
 
-	/* If version < 3.1.x, then only one echo buffer is used */
-	echo_buffer_count = ((m_can_version == 30)
-				? 1U
-				: (unsigned int)tx_fifo_size);
-
-	dev = alloc_candev(sizeof(*priv), echo_buffer_count);
-	if (!dev) {
-		dev = NULL;
-		goto return_dev;
-	}
 	priv = netdev_priv(dev);
 	netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
 
@@ -1273,16 +1293,12 @@ static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
 						: 0);
 		break;
 	default:
-		/* Unsupported device: free candev */
-		free_m_can_dev(dev);
 		dev_err(&pdev->dev, "Unsupported version number: %2d",
 			priv->version);
-		dev = NULL;
-		break;
+		return -EINVAL;
 	}
 
-return_dev:
-	return dev;
+	return 0;
 }
 
 static int m_can_open(struct net_device *dev)
@@ -1574,37 +1590,26 @@ static int m_can_plat_probe(struct platform_device *pdev)
 		goto failed_ret;
 	}
 
-	/* Enable clocks. Necessary to read Core Release in order to determine
-	 * M_CAN version
-	 */
-	ret = clk_prepare_enable(hclk);
-	if (ret)
-		goto disable_hclk_ret;
-
-	ret = clk_prepare_enable(cclk);
-	if (ret)
-		goto disable_cclk_ret;
-
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
 	addr = devm_ioremap_resource(&pdev->dev, res);
 	irq = platform_get_irq_byname(pdev, "int0");
 
 	if (IS_ERR(addr) || irq < 0) {
 		ret = -EINVAL;
-		goto disable_cclk_ret;
+		goto failed_ret;
 	}
 
 	/* message ram could be shared */
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
 	if (!res) {
 		ret = -ENODEV;
-		goto disable_cclk_ret;
+		goto failed_ret;
 	}
 
 	mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
 	if (!mram_addr) {
 		ret = -ENOMEM;
-		goto disable_cclk_ret;
+		goto failed_ret;
 	}
 
 	/* get message ram configuration */
@@ -1613,7 +1618,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
 					 sizeof(mram_config_vals) / 4);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not get Message RAM configuration.");
-		goto disable_cclk_ret;
+		goto failed_ret;
 	}
 
 	/* Get TX FIFO size
@@ -1622,11 +1627,12 @@ static int m_can_plat_probe(struct platform_device *pdev)
 	tx_fifo_size = mram_config_vals[7];
 
 	/* allocate the m_can device */
-	dev = alloc_m_can_dev(pdev, addr, tx_fifo_size);
+	dev = alloc_candev(sizeof(*priv), tx_fifo_size);
 	if (!dev) {
 		ret = -ENOMEM;
-		goto disable_cclk_ret;
+		goto failed_ret;
 	}
+
 	priv = netdev_priv(dev);
 	dev->irq = irq;
 	priv->device = &pdev->dev;
@@ -1640,30 +1646,42 @@ static int m_can_plat_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
+	/* Enable clocks. Necessary to read Core Release in order to determine
+	 * M_CAN version
+	 */
+	pm_runtime_enable(&pdev->dev);
+	ret = m_can_clk_start(priv);
+	if (ret)
+		goto pm_runtime_fail;
+
+	ret = m_can_dev_setup(pdev, dev, addr);
+	if (ret)
+		goto clk_disable;
+
 	ret = register_m_can_dev(dev);
 	if (ret) {
 		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
 			KBUILD_MODNAME, ret);
-		goto failed_free_dev;
+		goto clk_disable;
 	}
 
 	devm_can_led_init(dev);
 
+	of_can_transceiver(dev);
+
 	dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
 		 KBUILD_MODNAME, dev->irq, priv->version);
 
 	/* Probe finished
 	 * Stop clocks. They will be reactivated once the M_CAN device is opened
 	 */
-
-	goto disable_cclk_ret;
-
-failed_free_dev:
-	free_m_can_dev(dev);
-disable_cclk_ret:
-	clk_disable_unprepare(cclk);
-disable_hclk_ret:
-	clk_disable_unprepare(hclk);
+clk_disable:
+	m_can_clk_stop(priv);
+pm_runtime_fail:
+	if (ret) {
+		pm_runtime_disable(&pdev->dev);
+		free_candev(dev);
+	}
 failed_ret:
 	return ret;
 }
@@ -1721,14 +1739,47 @@ static int m_can_plat_remove(struct platform_device *pdev)
 	struct net_device *dev = platform_get_drvdata(pdev);
 
 	unregister_m_can_dev(dev);
+
+	pm_runtime_disable(&pdev->dev);
+
 	platform_set_drvdata(pdev, NULL);
 
-	free_m_can_dev(dev);
+	free_candev(dev);
 
 	return 0;
 }
 
+static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct m_can_priv *priv = netdev_priv(ndev);
+
+	clk_disable_unprepare(priv->cclk);
+	clk_disable_unprepare(priv->hclk);
+
+	return 0;
+}
+
+static int __maybe_unused m_can_runtime_resume(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct m_can_priv *priv = netdev_priv(ndev);
+	int err;
+
+	err = clk_prepare_enable(priv->hclk);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->cclk);
+	if (err)
+		clk_disable_unprepare(priv->hclk);
+
+	return err;
+}
+
 static const struct dev_pm_ops m_can_pmops = {
+	SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
+			   m_can_runtime_resume, NULL)
 	SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
 };
 
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index f394f77d7528..d94dae216820 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -256,7 +256,7 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
 		weight = offload->mb_first - offload->mb_last;
 	}
 
-	return can_rx_offload_init_queue(dev, offload, weight);;
+	return can_rx_offload_init_queue(dev, offload, weight);
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
 
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 5d067c1b987f..89d60d8e467c 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -508,7 +508,7 @@ static void slc_sync(void)
 }
 
 /* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(dev_t line)
+static struct slcan *slc_alloc(void)
 {
 	int i;
 	char name[IFNAMSIZ];
@@ -583,7 +583,7 @@ static int slcan_open(struct tty_struct *tty)
 
 	/* OK.  Find a free SLCAN channel to use. */
 	err = -ENFILE;
-	sl = slc_alloc(tty_devnum(tty));
+	sl = slc_alloc();
 	if (sl == NULL)
 		goto err_exit;
 
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index f3f05fea8e1f..98d118b3aaf4 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -612,8 +612,7 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
 	return 0;
 }
 
-static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
-			 struct spi_device *spi)
+static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
 {
 	mcp251x_do_set_bittiming(net);
 
@@ -775,7 +774,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
 	mutex_lock(&priv->mcp_lock);
 	if (priv->after_suspend) {
 		mcp251x_hw_reset(spi);
-		mcp251x_setup(net, priv, spi);
+		mcp251x_setup(net, spi);
 		if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
 			mcp251x_set_normal_mode(spi);
 		} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -971,7 +970,7 @@ static int mcp251x_open(struct net_device *net)
 		mcp251x_open_clean(net);
 		goto open_unlock;
 	}
-	ret = mcp251x_setup(net, priv, spi);
+	ret = mcp251x_setup(net, spi);
 	if (ret) {
 		mcp251x_open_clean(net);
 		goto open_unlock;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 8bf80ad9dc44..17c21ad3b95e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -243,7 +243,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
 	return NULL;
 }
 
-static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *gsdev)
 {
 	struct gs_device_mode *dm;
 	struct usb_interface *intf = gsdev->iface;
@@ -709,7 +709,7 @@ static int gs_can_close(struct net_device *netdev)
 	atomic_set(&dev->active_tx_urbs, 0);
 
 	/* reset the device */
-	rc = gs_cmd_reset(parent, dev);
+	rc = gs_cmd_reset(dev);
 	if (rc < 0)
 		netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25a9b79cc42d..f530a80f5051 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -408,7 +408,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
 {
 	struct sk_buff *skb;
 	struct can_frame *cf;
-	struct timeval tv;
 	enum can_state new_state;
 
 	/* ignore this error until 1st ts received */
@@ -525,8 +524,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
 	if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
 		struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
 
-		peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
-		hwts->hwtstamp = timeval_to_ktime(tv);
+		peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16,
+				     &hwts->hwtstamp);
 	}
 
 	mc->netdev->stats.rx_packets++;
@@ -610,7 +609,6 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
 	u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
 	struct sk_buff *skb;
 	struct can_frame *cf;
-	struct timeval tv;
 	struct skb_shared_hwtstamps *hwts;
 
 	skb = alloc_can_skb(mc->netdev, &cf);
@@ -658,9 +656,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
 	}
 
 	/* convert timestamp into kernel time */
-	peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
 	hwts = skb_hwtstamps(skb);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
 
 	/* update statistics */
 	mc->netdev->stats.rx_packets++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1ca76e03e965..50e911428638 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -80,21 +80,6 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
 	}
 }
 
-static void peak_usb_add_us(struct timeval *tv, u32 delta_us)
-{
-	/* number of s. to add to final time */
-	u32 delta_s = delta_us / 1000000;
-
-	delta_us -= delta_s * 1000000;
-
-	tv->tv_usec += delta_us;
-	if (tv->tv_usec >= 1000000) {
-		tv->tv_usec -= 1000000;
-		delta_s++;
-	}
-	tv->tv_sec += delta_s;
-}
-
 /*
  * sometimes, another now may be  more recent than current one...
  */
@@ -103,7 +88,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
 	time_ref->ts_dev_2 = ts_now;
 
 	/* should wait at least two passes before computing */
-	if (time_ref->tv_host.tv_sec > 0) {
+	if (ktime_to_ns(time_ref->tv_host) > 0) {
 		u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
 
 		if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
@@ -118,26 +103,26 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
  */
 void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
 {
-	if (time_ref->tv_host_0.tv_sec == 0) {
+	if (ktime_to_ns(time_ref->tv_host_0) == 0) {
 		/* use monotonic clock to correctly compute further deltas */
-		time_ref->tv_host_0 = ktime_to_timeval(ktime_get());
-		time_ref->tv_host.tv_sec = 0;
+		time_ref->tv_host_0 = ktime_get();
+		time_ref->tv_host = ktime_set(0, 0);
 	} else {
 		/*
-		 * delta_us should not be >= 2^32 => delta_s should be < 4294
+		 * delta_us should not be >= 2^32 => delta should be < 4294s
 		 * handle 32-bits wrapping here: if count of s. reaches 4200,
 		 * reset counters and change time base
 		 */
-		if (time_ref->tv_host.tv_sec != 0) {
-			u32 delta_s = time_ref->tv_host.tv_sec
-						- time_ref->tv_host_0.tv_sec;
-			if (delta_s > 4200) {
+		if (ktime_to_ns(time_ref->tv_host)) {
+			ktime_t delta = ktime_sub(time_ref->tv_host,
+						  time_ref->tv_host_0);
+			if (ktime_to_ns(delta) > (4200ull * NSEC_PER_SEC)) {
 				time_ref->tv_host_0 = time_ref->tv_host;
 				time_ref->ts_total = 0;
 			}
 		}
 
-		time_ref->tv_host = ktime_to_timeval(ktime_get());
+		time_ref->tv_host = ktime_get();
 		time_ref->tick_count++;
 	}
 
@@ -146,13 +131,12 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
 }
 
 /*
- * compute timeval according to current ts and time_ref data
+ * compute time according to current ts and time_ref data
  */
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
-			struct timeval *tv)
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
 {
-	/* protect from getting timeval before setting now */
-	if (time_ref->tv_host.tv_sec > 0) {
+	/* protect from getting time before setting now */
+	if (ktime_to_ns(time_ref->tv_host)) {
 		u64 delta_us;
 
 		delta_us = ts - time_ref->ts_dev_2;
@@ -164,10 +148,9 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
 		delta_us *= time_ref->adapter->us_per_ts_scale;
 		delta_us >>= time_ref->adapter->us_per_ts_shift;
 
-		*tv = time_ref->tv_host_0;
-		peak_usb_add_us(tv, (u32)delta_us);
+		*time = ktime_add_us(time_ref->tv_host_0, delta_us);
 	} else {
-		*tv = ktime_to_timeval(ktime_get());
+		*time = ktime_get();
 	}
 }
 
@@ -175,13 +158,11 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
  * post received skb after having set any hw timestamp
  */
 int peak_usb_netif_rx(struct sk_buff *skb,
-		      struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
+		      struct peak_time_ref *time_ref, u32 ts_low)
 {
 	struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
-	struct timeval tv;
 
-	peak_usb_get_ts_tv(time_ref, ts_low, &tv);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp);
 
 	return netif_rx(skb);
 }
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index c01316cac354..fb23489e1cb8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -96,7 +96,7 @@ extern const struct peak_usb_adapter pcan_usb_pro_fd;
 extern const struct peak_usb_adapter pcan_usb_x6;
 
 struct peak_time_ref {
-	struct timeval tv_host_0, tv_host;
+	ktime_t tv_host_0, tv_host;
 	u32 ts_dev_1, ts_dev_2;
 	u64 ts_total;
 	u32 tick_count;
@@ -151,10 +151,9 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
 			    const struct peak_usb_adapter *adapter);
 void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
 void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
-			struct timeval *tv);
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
 int peak_usb_netif_rx(struct sk_buff *skb,
-		      struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
+		      struct peak_time_ref *time_ref, u32 ts_low);
 void peak_usb_async_complete(struct urb *urb);
 void peak_usb_restart_complete(struct peak_usb_device *dev);
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 53d6bb045e9e..dd161c5eea8e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -514,8 +514,7 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
 	else
 		memcpy(cfd->data, rm->d, cfd->len);
 
-	peak_usb_netif_rx(skb, &usb_if->time_ref,
-			  le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high));
+	peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
 
 	netdev->stats.rx_packets++;
 	netdev->stats.rx_bytes += cfd->len;
@@ -575,8 +574,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
 	if (!skb)
 		return -ENOMEM;
 
-	peak_usb_netif_rx(skb, &usb_if->time_ref,
-			  le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high));
+	peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
 
 	netdev->stats.rx_packets++;
 	netdev->stats.rx_bytes += cf->can_dlc;
@@ -618,8 +616,7 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
 	cf->can_id |= CAN_ERR_CRTL;
 	cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
 
-	peak_usb_netif_rx(skb, &usb_if->time_ref,
-			  le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high));
+	peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
 
 	netdev->stats.rx_over_errors++;
 	netdev->stats.rx_errors++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index bbdd6058cd2f..0105fbfea273 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -531,7 +531,6 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
 	struct net_device *netdev = dev->netdev;
 	struct can_frame *can_frame;
 	struct sk_buff *skb;
-	struct timeval tv;
 	struct skb_shared_hwtstamps *hwts;
 
 	skb = alloc_can_skb(netdev, &can_frame);
@@ -549,9 +548,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
 	else
 		memcpy(can_frame->data, rx->data, can_frame->can_dlc);
 
-	peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
 	hwts = skb_hwtstamps(skb);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
+			     &hwts->hwtstamp);
 
 	netdev->stats.rx_packets++;
 	netdev->stats.rx_bytes += can_frame->can_dlc;
@@ -571,7 +570,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
 	enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
 	u8 err_mask = 0;
 	struct sk_buff *skb;
-	struct timeval tv;
 	struct skb_shared_hwtstamps *hwts;
 
 	/* nothing should be sent while in BUS_OFF state */
@@ -667,9 +665,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
 
 	dev->can.state = new_state;
 
-	peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
 	hwts = skb_hwtstamps(skb);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
 	netdev->stats.rx_packets++;
 	netdev->stats.rx_bytes += can_frame->can_dlc;
 	netif_rx(skb);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a8cb33264ff1..c2b04f505e16 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -61,7 +61,7 @@ MODULE_ALIAS_RTNL_LINK(DRV_NAME);
 /*
  * CAN test feature:
  * Enable the echo on driver level for testing the CAN core echo modes.
- * See Documentation/networking/can.txt for details.
+ * See Documentation/networking/can.rst for details.
  */
 
 static bool echo; /* echo testing. Default: 0 (Off) */
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index b4c4a2c76437..ed6828821fbd 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -227,10 +227,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 	netif_carrier_off(peer);
 
 	err = rtnl_configure_link(peer, ifmp);
-	if (err < 0) {
-		unregister_netdevice(peer);
-		return err;
-	}
+	if (err < 0)
+		goto unregister_network_device;
 
 	/* register first device */
 	if (tb[IFLA_IFNAME])
@@ -239,10 +237,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
 
 	err = register_netdevice(dev);
-	if (err < 0) {
-		unregister_netdevice(peer);
-		return err;
-	}
+	if (err < 0)
+		goto unregister_network_device;
 
 	netif_carrier_off(dev);
 
@@ -254,6 +250,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 	rcu_assign_pointer(priv->peer, dev);
 
 	return 0;
+
+unregister_network_device:
+	unregister_netdevice(peer);
+	return err;
 }
 
 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 83a9bc892a3b..2b81b97e994f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,7 +33,7 @@ config NET_DSA_MT7530
 
 config NET_DSA_MV88E6060
 	tristate "Marvell 88E6060 ethernet switch chip support"
-	depends on NET_DSA
+	depends on NET_DSA && NET_DSA_LEGACY
 	select NET_DSA_TAG_TRAILER
 	---help---
 	  This enables support for the Marvell 88E6060 ethernet switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 4498ab897d94..db830a1141d9 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1029,8 +1029,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
 EXPORT_SYMBOL(b53_vlan_filtering);
 
 int b53_vlan_prepare(struct dsa_switch *ds, int port,
-		     const struct switchdev_obj_port_vlan *vlan,
-		     struct switchdev_trans *trans)
+		     const struct switchdev_obj_port_vlan *vlan)
 {
 	struct b53_device *dev = ds->priv;
 
@@ -1047,8 +1046,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
 EXPORT_SYMBOL(b53_vlan_prepare);
 
 void b53_vlan_add(struct dsa_switch *ds, int port,
-		  const struct switchdev_obj_port_vlan *vlan,
-		  struct switchdev_trans *trans)
+		  const struct switchdev_obj_port_vlan *vlan)
 {
 	struct b53_device *dev = ds->priv;
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1495,8 +1493,7 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
 	return false;
 }
 
-static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
-						  int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
 {
 	struct b53_device *dev = ds->priv;
 
@@ -1517,6 +1514,7 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
 
 	return DSA_TAG_PROTO_BRCM;
 }
+EXPORT_SYMBOL(b53_get_tag_protocol);
 
 int b53_mirror_add(struct dsa_switch *ds, int port,
 		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index daaaa1ecb996..d954cf36ecd8 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -295,11 +295,9 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
 void b53_br_fast_age(struct dsa_switch *ds, int port);
 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
 int b53_vlan_prepare(struct dsa_switch *ds, int port,
-		     const struct switchdev_obj_port_vlan *vlan,
-		     struct switchdev_trans *trans);
+		     const struct switchdev_obj_port_vlan *vlan);
 void b53_vlan_add(struct dsa_switch *ds, int port,
-		  const struct switchdev_obj_port_vlan *vlan,
-		  struct switchdev_trans *trans);
+		  const struct switchdev_obj_port_vlan *vlan);
 int b53_vlan_del(struct dsa_switch *ds, int port,
 		 const struct switchdev_obj_port_vlan *vlan);
 int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -310,6 +308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
 		 dsa_fdb_dump_cb_t *cb, void *data);
 int b53_mirror_add(struct dsa_switch *ds, int port,
 		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
 void b53_mirror_del(struct dsa_switch *ds, int port,
 		    struct dsa_mall_mirror_tc_entry *mirror);
 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b62d47210db8..0378eded31f2 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -34,12 +34,6 @@
 #include "b53/b53_priv.h"
 #include "b53/b53_regs.h"
 
-static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
-							 int port)
-{
-	return DSA_TAG_PROTO_BRCM;
-}
-
 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 {
 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -860,7 +854,7 @@ static const struct b53_io_ops bcm_sf2_io_ops = {
 };
 
 static const struct dsa_switch_ops bcm_sf2_ops = {
-	.get_tag_protocol	= bcm_sf2_sw_get_tag_protocol,
+	.get_tag_protocol	= b53_get_tag_protocol,
 	.setup			= bcm_sf2_sw_setup,
 	.get_strings		= b53_get_strings,
 	.get_ethtool_stats	= b53_get_ethtool_stats,
@@ -954,6 +948,9 @@ static const struct of_device_id bcm_sf2_of_match[] = {
 	{ .compatible = "brcm,bcm7278-switch-v4.0",
 	  .data = &bcm_sf2_7278_data
 	},
+	{ .compatible = "brcm,bcm7278-switch-v4.8",
+	  .data = &bcm_sf2_7278_data
+	},
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bb71d3d6f65b..7aa84ee4e771 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -174,9 +174,9 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
 	return 0;
 }
 
-static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
-				      const struct switchdev_obj_port_vlan *vlan,
-				      struct switchdev_trans *trans)
+static int
+dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
+			   const struct switchdev_obj_port_vlan *vlan)
 {
 	struct dsa_loop_priv *ps = ds->priv;
 	struct mii_bus *bus = ps->bus;
@@ -193,8 +193,7 @@ static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
 }
 
 static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
-				   const struct switchdev_obj_port_vlan *vlan,
-				   struct switchdev_trans *trans)
+				   const struct switchdev_obj_port_vlan *vlan)
 {
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b24566bb74d2..6171c0853ff1 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -249,6 +249,28 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
 	return -EIO;
 }
 
+static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
+{
+	int i;
+
+	for (i = 0; i < 25; i++) {
+		u32 reg;
+		int ret;
+
+		ret = lan9303_read(chip->regmap, offset, &reg);
+		if (ret) {
+			dev_err(chip->dev, "%s failed to read offset %d: %d\n",
+				__func__, offset, ret);
+			return ret;
+		}
+		if (!(reg & mask))
+			return 0;
+		usleep_range(1000, 2000);
+	}
+
+	return -ETIMEDOUT;
+}
+
 static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
 {
 	int ret;
@@ -274,22 +296,8 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
 
 static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
 {
-	int ret, i;
-	u32 reg;
-
-	for (i = 0; i < 25; i++) {
-		ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, &reg);
-		if (ret) {
-			dev_err(chip->dev,
-				"Failed to read pmi access status: %d\n", ret);
-			return ret;
-		}
-		if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
-			return 0;
-		usleep_range(1000, 2000);
-	}
-
-	return -EIO;
+	return lan9303_read_wait(chip, LAN9303_PMI_ACCESS,
+				 LAN9303_PMI_ACCESS_MII_BUSY);
 }
 
 static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum)
@@ -366,22 +374,8 @@ EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops);
 
 static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
 {
-	int ret, i;
-	u32 reg;
-
-	for (i = 0; i < 25; i++) {
-		ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, &reg);
-		if (ret) {
-			dev_err(chip->dev,
-				"Failed to read csr command status: %d\n", ret);
-			return ret;
-		}
-		if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
-			return 0;
-		usleep_range(1000, 2000);
-	}
-
-	return -EIO;
+	return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD,
+				 LAN9303_SWITCH_CSR_CMD_BUSY);
 }
 
 static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
@@ -485,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
 {
 	int reg;
 
-	/* depending on the 'phy_addr_sel_strap' setting, the three phys are
+	/* Calculate chip->phy_addr_base:
+	 * Depending on the 'phy_addr_sel_strap' setting, the three phys are
 	 * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
 	 * 'phy_addr_sel_strap' setting directly, so we need a test, which
 	 * configuration is active:
@@ -500,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
 		return reg;
 	}
 
-	if ((reg != 0) && (reg != 0xffff))
-		chip->phy_addr_sel_strap = 1;
-	else
-		chip->phy_addr_sel_strap = 0;
+	chip->phy_addr_base = reg != 0 && reg != 0xffff;
 
 	dev_dbg(chip->dev, "Phy setup '%s' detected\n",
-		chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+		chip->phy_addr_base ? "1-2-3" : "0-1-2");
 
 	return 0;
 }
@@ -546,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
 	return NULL;
 }
 
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
-static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
-				int mask, char value)
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
 {
 	int i;
 
-	for (i = 0; i < 0x1000; i++) {
+	for (i = 0; i < 25; i++) {
 		u32 reg;
 
 		lan9303_read_switch_reg(chip, regno, &reg);
-		if ((reg & mask) == value)
+		if (!(reg & mask))
 			return 0;
 		usleep_range(1000, 2000);
 	}
+
 	return -ETIMEDOUT;
 }
 
@@ -569,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
 				 LAN9303_ALR_CMD_MAKE_ENTRY);
-	lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
-			     0);
+	lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
 
 	return 0;
@@ -583,6 +573,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 {
 	int i;
 
+	mutex_lock(&chip->alr_mutex);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
 				 LAN9303_ALR_CMD_GET_FIRST);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
@@ -606,6 +597,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 					 LAN9303_ALR_CMD_GET_NEXT);
 		lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
 	}
+	mutex_unlock(&chip->alr_mutex);
 }
 
 static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -694,16 +686,20 @@ static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
 {
 	struct lan9303_alr_cache_entry *entr;
 
+	mutex_lock(&chip->alr_mutex);
 	entr = lan9303_alr_cache_find_mac(chip, mac);
 	if (!entr) { /*New entry */
 		entr = lan9303_alr_cache_find_free(chip);
-		if (!entr)
+		if (!entr) {
+			mutex_unlock(&chip->alr_mutex);
 			return -ENOSPC;
+		}
 		ether_addr_copy(entr->mac_addr, mac);
 	}
 	entr->port_map |= BIT(port);
 	entr->stp_override = stp_override;
 	lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
+	mutex_unlock(&chip->alr_mutex);
 
 	return 0;
 }
@@ -713,15 +709,18 @@ static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
 {
 	struct lan9303_alr_cache_entry *entr;
 
+	mutex_lock(&chip->alr_mutex);
 	entr = lan9303_alr_cache_find_mac(chip, mac);
 	if (!entr)
-		return 0;  /* no static entry found */
+		goto out;  /* no static entry found */
 
 	entr->port_map &= ~BIT(port);
 	if (entr->port_map == 0) /* zero means its free again */
 		eth_zero_addr(entr->mac_addr);
 	lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
 
+out:
+	mutex_unlock(&chip->alr_mutex);
 	return 0;
 }
 
@@ -818,18 +817,16 @@ static void lan9303_bridge_ports(struct lan9303 *chip)
 	lan9303_alr_add_port(chip, eth_stp_addr, 0, true);
 }
 
-static int lan9303_handle_reset(struct lan9303 *chip)
+static void lan9303_handle_reset(struct lan9303 *chip)
 {
 	if (!chip->reset_gpio)
-		return 0;
+		return;
 
 	if (chip->reset_duration != 0)
 		msleep(chip->reset_duration);
 
 	/* release (deassert) reset and activate the device */
 	gpiod_set_value_cansleep(chip->reset_gpio, 0);
-
-	return 0;
 }
 
 /* stop processing packets for all ports */
@@ -866,7 +863,7 @@ static int lan9303_check_device(struct lan9303 *chip)
 	if ((reg >> 16) != LAN9303_CHIP_ID) {
 		dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
 			reg >> 16);
-		return ret;
+		return -ENODEV;
 	}
 
 	/* The default state of the LAN9303 device is to forward packets between
@@ -1018,7 +1015,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds)
 static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
 {
 	struct lan9303 *chip = ds->priv;
-	int phy_base = chip->phy_addr_sel_strap;
+	int phy_base = chip->phy_addr_base;
 
 	if (phy == phy_base)
 		return lan9303_virt_phy_reg_read(chip, regnum);
@@ -1032,7 +1029,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
 			     u16 val)
 {
 	struct lan9303 *chip = ds->priv;
-	int phy_base = chip->phy_addr_sel_strap;
+	int phy_base = chip->phy_addr_base;
 
 	if (phy == phy_base)
 		return lan9303_virt_phy_reg_write(chip, regnum, val);
@@ -1069,7 +1066,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
 
 	res =  lan9303_phy_write(ds, port, MII_BMCR, ctl);
 
-	if (port == chip->phy_addr_sel_strap) {
+	if (port == chip->phy_addr_base) {
 		/* Virtual Phy: Remove Turbo 200Mbit mode */
 		lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
 
@@ -1093,8 +1090,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port,
 	struct lan9303 *chip = ds->priv;
 
 	lan9303_disable_processing_port(chip, port);
-	lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
-			  MII_BMCR, BMCR_PDOWN);
+	lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
 }
 
 static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1217,8 +1213,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
 }
 
 static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
-				    const struct switchdev_obj_port_mdb *mdb,
-				    struct switchdev_trans *trans)
+				    const struct switchdev_obj_port_mdb *mdb)
 {
 	struct lan9303 *chip = ds->priv;
 
@@ -1235,8 +1230,7 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
 }
 
 static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_mdb *mdb,
-				 struct switchdev_trans *trans)
+				 const struct switchdev_obj_port_mdb *mdb)
 {
 	struct lan9303 *chip = ds->priv;
 
@@ -1284,26 +1278,31 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
 
 static int lan9303_register_switch(struct lan9303 *chip)
 {
+	int base;
+
 	chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
 	if (!chip->ds)
 		return -ENOMEM;
 
 	chip->ds->priv = chip;
 	chip->ds->ops = &lan9303_switch_ops;
-	chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+	base = chip->phy_addr_base;
+	chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
 
 	return dsa_register_switch(chip->ds);
 }
 
-static void lan9303_probe_reset_gpio(struct lan9303 *chip,
+static int lan9303_probe_reset_gpio(struct lan9303 *chip,
 				     struct device_node *np)
 {
 	chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
 						   GPIOD_OUT_LOW);
+	if (IS_ERR(chip->reset_gpio))
+		return PTR_ERR(chip->reset_gpio);
 
-	if (IS_ERR(chip->reset_gpio)) {
+	if (!chip->reset_gpio) {
 		dev_dbg(chip->dev, "No reset GPIO defined\n");
-		return;
+		return 0;
 	}
 
 	chip->reset_duration = 200;
@@ -1318,6 +1317,8 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip,
 	/* A sane reset duration should not be longer than 1s */
 	if (chip->reset_duration > 1000)
 		chip->reset_duration = 1000;
+
+	return 0;
 }
 
 int lan9303_probe(struct lan9303 *chip, struct device_node *np)
@@ -1325,13 +1326,14 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
 	int ret;
 
 	mutex_init(&chip->indirect_mutex);
+	mutex_init(&chip->alr_mutex);
 
-	lan9303_probe_reset_gpio(chip, np);
-
-	ret = lan9303_handle_reset(chip);
+	ret = lan9303_probe_reset_gpio(chip, np);
 	if (ret)
 		return ret;
 
+	lan9303_handle_reset(chip);
+
 	ret = lan9303_check_device(chip);
 	if (ret)
 		return ret;
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b5be93a1e0df..663b0d5b982b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -559,8 +559,7 @@ static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
 }
 
 static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_vlan *vlan,
-				 struct switchdev_trans *trans)
+				 const struct switchdev_obj_port_vlan *vlan)
 {
 	/* nothing needed */
 
@@ -568,8 +567,7 @@ static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
 }
 
 static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
-			      const struct switchdev_obj_port_vlan *vlan,
-			      struct switchdev_trans *trans)
+			      const struct switchdev_obj_port_vlan *vlan)
 {
 	struct ksz_device *dev = ds->priv;
 	u32 vlan_table[3];
@@ -858,16 +856,14 @@ exit:
 }
 
 static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_mdb *mdb,
-				struct switchdev_trans *trans)
+				const struct switchdev_obj_port_mdb *mdb)
 {
 	/* nothing to do */
 	return 0;
 }
 
 static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
-			     const struct switchdev_obj_port_mdb *mdb,
-			     struct switchdev_trans *trans)
+			     const struct switchdev_obj_port_mdb *mdb)
 {
 	struct ksz_device *dev = ds->priv;
 	u32 static_table[4];
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2820d69810b3..8a0bb000d056 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -805,6 +805,69 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
 }
 
 static void
+mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+{
+	struct mt7530_priv *priv = ds->priv;
+	bool all_user_ports_removed = true;
+	int i;
+
+	/* When a port is removed from the bridge, the port would be set up
+	 * back to the default as is at initial boot which is a VLAN-unaware
+	 * port.
+	 */
+	mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+		   MT7530_PORT_MATRIX_MODE);
+	mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+		   VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+
+	priv->ports[port].vlan_filtering = false;
+
+	for (i = 0; i < MT7530_NUM_PORTS; i++) {
+		if (dsa_is_user_port(ds, i) &&
+		    priv->ports[i].vlan_filtering) {
+			all_user_ports_removed = false;
+			break;
+		}
+	}
+
+	/* CPU port also does the same thing until all user ports belonging to
+	 * the CPU port get out of VLAN filtering mode.
+	 */
+	if (all_user_ports_removed) {
+		mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+			     PCR_MATRIX(dsa_user_ports(priv->ds)));
+		mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
+			     PORT_SPEC_TAG);
+	}
+}
+
+static void
+mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+{
+	struct mt7530_priv *priv = ds->priv;
+
+	/* The real fabric path would be decided on the membership in the
+	 * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
+	 * means potential VLAN can be consisting of certain subset of all
+	 * ports.
+	 */
+	mt7530_rmw(priv, MT7530_PCR_P(port),
+		   PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
+
+	/* Trapped into security mode allows packet forwarding through VLAN
+	 * table lookup.
+	 */
+	mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+		   MT7530_PORT_SECURITY_MODE);
+
+	/* Set the port as a user port which is to be able to recognize VID
+	 * from incoming packets before fetching entry within the VLAN table.
+	 */
+	mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+		   VLAN_ATTR(MT7530_VLAN_USER));
+}
+
+static void
 mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
 			 struct net_device *bridge)
 {
@@ -817,8 +880,11 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
 		/* Remove this port from the port matrix of the other ports
 		 * in the same bridge. If the port is disabled, port matrix
 		 * is kept and not being setup until the port becomes enabled.
+		 * And the other port's port matrix cannot be broken when the
+		 * other port is still a VLAN-aware port.
 		 */
-		if (dsa_is_user_port(ds, i) && i != port) {
+		if (!priv->ports[i].vlan_filtering &&
+		    dsa_is_user_port(ds, i) && i != port) {
 			if (dsa_to_port(ds, i)->bridge_dev != bridge)
 				continue;
 			if (priv->ports[i].enable)
@@ -836,6 +902,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
 			   PCR_MATRIX(BIT(MT7530_CPU_PORT)));
 	priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
 
+	mt7530_port_set_vlan_unaware(ds, port);
+
 	mutex_unlock(&priv->reg_mutex);
 }
 
@@ -906,6 +974,220 @@ err:
 	return 0;
 }
 
+static int
+mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
+{
+	struct mt7530_dummy_poll p;
+	u32 val;
+	int ret;
+
+	val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
+	mt7530_write(priv, MT7530_VTCR, val);
+
+	INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
+	ret = readx_poll_timeout(_mt7530_read, &p, val,
+				 !(val & VTCR_BUSY), 20, 20000);
+	if (ret < 0) {
+		dev_err(priv->dev, "poll timeout\n");
+		return ret;
+	}
+
+	val = mt7530_read(priv, MT7530_VTCR);
+	if (val & VTCR_INVALID) {
+		dev_err(priv->dev, "read VTCR invalid\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
+			   bool vlan_filtering)
+{
+	struct mt7530_priv *priv = ds->priv;
+
+	priv->ports[port].vlan_filtering = vlan_filtering;
+
+	if (vlan_filtering) {
+		/* The port is being kept as VLAN-unaware port when bridge is
+		 * set up with vlan_filtering not being set, Otherwise, the
+		 * port and the corresponding CPU port is required the setup
+		 * for becoming a VLAN-aware port.
+		 */
+		mt7530_port_set_vlan_aware(ds, port);
+		mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+	}
+
+	return 0;
+}
+
+static int
+mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
+			 const struct switchdev_obj_port_vlan *vlan)
+{
+	/* nothing needed */
+
+	return 0;
+}
+
+static void
+mt7530_hw_vlan_add(struct mt7530_priv *priv,
+		   struct mt7530_hw_vlan_entry *entry)
+{
+	u8 new_members;
+	u32 val;
+
+	new_members = entry->old_members | BIT(entry->port) |
+		      BIT(MT7530_CPU_PORT);
+
+	/* Validate the entry with independent learning, create egress tag per
+	 * VLAN and joining the port as one of the port members.
+	 */
+	val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID;
+	mt7530_write(priv, MT7530_VAWD1, val);
+
+	/* Decide whether adding tag or not for those outgoing packets from the
+	 * port inside the VLAN.
+	 */
+	val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
+				MT7530_VLAN_EGRESS_TAG;
+	mt7530_rmw(priv, MT7530_VAWD2,
+		   ETAG_CTRL_P_MASK(entry->port),
+		   ETAG_CTRL_P(entry->port, val));
+
+	/* CPU port is always taken as a tagged port for serving more than one
+	 * VLANs across and also being applied with egress type stack mode for
+	 * that VLAN tags would be appended after hardware special tag used as
+	 * DSA tag.
+	 */
+	mt7530_rmw(priv, MT7530_VAWD2,
+		   ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
+		   ETAG_CTRL_P(MT7530_CPU_PORT,
+			       MT7530_VLAN_EGRESS_STACK));
+}
+
+static void
+mt7530_hw_vlan_del(struct mt7530_priv *priv,
+		   struct mt7530_hw_vlan_entry *entry)
+{
+	u8 new_members;
+	u32 val;
+
+	new_members = entry->old_members & ~BIT(entry->port);
+
+	val = mt7530_read(priv, MT7530_VAWD1);
+	if (!(val & VLAN_VALID)) {
+		dev_err(priv->dev,
+			"Cannot be deleted due to invalid entry\n");
+		return;
+	}
+
+	/* If certain member apart from CPU port is still alive in the VLAN,
+	 * the entry would be kept valid. Otherwise, the entry is got to be
+	 * disabled.
+	 */
+	if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+		val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
+		      VLAN_VALID;
+		mt7530_write(priv, MT7530_VAWD1, val);
+	} else {
+		mt7530_write(priv, MT7530_VAWD1, 0);
+		mt7530_write(priv, MT7530_VAWD2, 0);
+	}
+}
+
+static void
+mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
+		      struct mt7530_hw_vlan_entry *entry,
+		      mt7530_vlan_op vlan_op)
+{
+	u32 val;
+
+	/* Fetch entry */
+	mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
+
+	val = mt7530_read(priv, MT7530_VAWD1);
+
+	entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
+
+	/* Manipulate entry */
+	vlan_op(priv, entry);
+
+	/* Flush result to hardware */
+	mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
+}
+
+static void
+mt7530_port_vlan_add(struct dsa_switch *ds, int port,
+		     const struct switchdev_obj_port_vlan *vlan)
+{
+	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+	struct mt7530_hw_vlan_entry new_entry;
+	struct mt7530_priv *priv = ds->priv;
+	u16 vid;
+
+	/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+	 * being set.
+	 */
+	if (!priv->ports[port].vlan_filtering)
+		return;
+
+	mutex_lock(&priv->reg_mutex);
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+		mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+		mt7530_hw_vlan_update(priv, vid, &new_entry,
+				      mt7530_hw_vlan_add);
+	}
+
+	if (pvid) {
+		mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+			   G0_PORT_VID(vlan->vid_end));
+		priv->ports[port].pvid = vlan->vid_end;
+	}
+
+	mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_vlan_del(struct dsa_switch *ds, int port,
+		     const struct switchdev_obj_port_vlan *vlan)
+{
+	struct mt7530_hw_vlan_entry target_entry;
+	struct mt7530_priv *priv = ds->priv;
+	u16 vid, pvid;
+
+	/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+	 * being set.
+	 */
+	if (!priv->ports[port].vlan_filtering)
+		return 0;
+
+	mutex_lock(&priv->reg_mutex);
+
+	pvid = priv->ports[port].pvid;
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+		mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+		mt7530_hw_vlan_update(priv, vid, &target_entry,
+				      mt7530_hw_vlan_del);
+
+		/* PVID is being restored to the default whenever the PVID port
+		 * is being removed from the VLAN.
+		 */
+		if (pvid == vid)
+			pvid = G0_PORT_VID_DEF;
+	}
+
+	mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
+	priv->ports[port].pvid = pvid;
+
+	mutex_unlock(&priv->reg_mutex);
+
+	return 0;
+}
+
 static enum dsa_tag_protocol
 mtk_get_tag_protocol(struct dsa_switch *ds, int port)
 {
@@ -1035,6 +1317,10 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
 	.port_fdb_add		= mt7530_port_fdb_add,
 	.port_fdb_del		= mt7530_port_fdb_del,
 	.port_fdb_dump		= mt7530_port_fdb_dump,
+	.port_vlan_filtering	= mt7530_port_vlan_filtering,
+	.port_vlan_prepare	= mt7530_port_vlan_prepare,
+	.port_vlan_add		= mt7530_port_vlan_add,
+	.port_vlan_del		= mt7530_port_vlan_del,
 };
 
 static int
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 74db9822eb40..d9b407a22a58 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -17,6 +17,7 @@
 #define MT7530_NUM_PORTS		7
 #define MT7530_CPU_PORT			6
 #define MT7530_NUM_FDB_RECORDS		2048
+#define MT7530_ALL_MEMBERS		0xff
 
 #define	NUM_TRGMII_CTRL			5
 
@@ -88,21 +89,42 @@ enum mt7530_fdb_cmd {
 /* Register for vlan table control */
 #define MT7530_VTCR			0x90
 #define  VTCR_BUSY			BIT(31)
-#define  VTCR_FUNC			(((x) & 0xf) << 12)
-#define  VTCR_FUNC_RD_VID		0x1
-#define  VTCR_FUNC_WR_VID		0x2
-#define  VTCR_FUNC_INV_VID		0x3
-#define  VTCR_FUNC_VAL_VID		0x4
+#define  VTCR_INVALID			BIT(16)
+#define  VTCR_FUNC(x)			(((x) & 0xf) << 12)
 #define  VTCR_VID			((x) & 0xfff)
 
+enum mt7530_vlan_cmd {
+	/* Read/Write the specified VID entry from VAWD register based
+	 * on VID.
+	 */
+	MT7530_VTCR_RD_VID = 0,
+	MT7530_VTCR_WR_VID = 1,
+};
+
 /* Register for setup vlan and acl write data */
 #define MT7530_VAWD1			0x94
 #define  PORT_STAG			BIT(31)
+/* Independent VLAN Learning */
 #define  IVL_MAC			BIT(30)
+/* Per VLAN Egress Tag Control */
+#define  VTAG_EN			BIT(28)
+/* VLAN Member Control */
 #define  PORT_MEM(x)			(((x) & 0xff) << 16)
-#define  VALID				BIT(1)
+/* VLAN Entry Valid */
+#define  VLAN_VALID			BIT(0)
+#define  PORT_MEM_SHFT			16
+#define  PORT_MEM_MASK			0xff
 
 #define MT7530_VAWD2			0x98
+/* Egress Tag Control */
+#define  ETAG_CTRL_P(p, x)		(((x) & 0x3) << ((p) << 1))
+#define  ETAG_CTRL_P_MASK(p)		ETAG_CTRL_P(p, 3)
+
+enum mt7530_vlan_egress_attr {
+	MT7530_VLAN_EGRESS_UNTAG = 0,
+	MT7530_VLAN_EGRESS_TAG = 2,
+	MT7530_VLAN_EGRESS_STACK = 3,
+};
 
 /* Register for port STP state control */
 #define MT7530_SSP_P(x)			(0x2000 + ((x) * 0x100))
@@ -120,11 +142,23 @@ enum mt7530_stp_state {
 /* Register for port control */
 #define MT7530_PCR_P(x)			(0x2004 + ((x) * 0x100))
 #define  PORT_VLAN(x)			((x) & 0x3)
+
+enum mt7530_port_mode {
+	/* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
+	MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+
+	/* Security Mode: Discard any frame due to ingress membership
+	 * violation or VID missed on the VLAN table.
+	 */
+	MT7530_PORT_SECURITY_MODE = PORT_VLAN(3),
+};
+
 #define  PCR_MATRIX(x)			(((x) & 0xff) << 16)
 #define  PORT_PRI(x)			(((x) & 0x7) << 24)
 #define  EG_TAG(x)			(((x) & 0x3) << 28)
 #define  PCR_MATRIX_MASK		PCR_MATRIX(0xff)
 #define  PCR_MATRIX_CLR			PCR_MATRIX(0)
+#define  PCR_PORT_VLAN_MASK		PORT_VLAN(3)
 
 /* Register for port security control */
 #define MT7530_PSC_P(x)			(0x200c + ((x) * 0x100))
@@ -134,10 +168,20 @@ enum mt7530_stp_state {
 #define MT7530_PVC_P(x)			(0x2010 + ((x) * 0x100))
 #define  PORT_SPEC_TAG			BIT(5)
 #define  VLAN_ATTR(x)			(((x) & 0x3) << 6)
+#define  VLAN_ATTR_MASK			VLAN_ATTR(3)
+
+enum mt7530_vlan_port_attr {
+	MT7530_VLAN_USER = 0,
+	MT7530_VLAN_TRANSPARENT = 3,
+};
+
 #define  STAG_VPID			(((x) & 0xffff) << 16)
 
 /* Register for port port-and-protocol based vlan 1 control */
 #define MT7530_PPBV1_P(x)		(0x2014 + ((x) * 0x100))
+#define  G0_PORT_VID(x)			(((x) & 0xfff) << 0)
+#define  G0_PORT_VID_MASK		G0_PORT_VID(0xfff)
+#define  G0_PORT_VID_DEF		G0_PORT_VID(1)
 
 /* Register for port MAC control register */
 #define MT7530_PMCR_P(x)		(0x3000 + ((x) * 0x100))
@@ -345,9 +389,20 @@ struct mt7530_fdb {
 	bool noarp;
 };
 
+/* struct mt7530_port -	This is the main data structure for holding the state
+ *			of the port.
+ * @enable:	The status used for show port is enabled or not.
+ * @pm:		The matrix used to show all connections with the port.
+ * @pvid:	The VLAN specified is to be considered a PVID at ingress.  Any
+ *		untagged frames will be assigned to the related VLAN.
+ * @vlan_filtering: The flags indicating whether the port that can recognize
+ *		    VLAN-tagged frames.
+ */
 struct mt7530_port {
 	bool enable;
 	u32 pm;
+	u16 pvid;
+	bool vlan_filtering;
 };
 
 /* struct mt7530_priv -	This is the main data structure for holding the state
@@ -382,6 +437,22 @@ struct mt7530_priv {
 	struct mutex reg_mutex;
 };
 
+struct mt7530_hw_vlan_entry {
+	int port;
+	u8  old_members;
+	bool untagged;
+};
+
+static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e,
+					     int port, bool untagged)
+{
+	e->port = port;
+	e->untagged = untagged;
+}
+
+typedef void (*mt7530_vlan_op)(struct mt7530_priv *,
+			       struct mt7530_hw_vlan_entry *);
+
 struct mt7530_hw_stats {
 	const char	*string;
 	u16		reg;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 66d33e97cbc5..eb328bade225 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1185,8 +1185,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
 
 static int
 mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_vlan *vlan,
-			    struct switchdev_trans *trans)
+			    const struct switchdev_obj_port_vlan *vlan)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
 	int err;
@@ -1295,8 +1294,7 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
 }
 
 static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
-				    const struct switchdev_obj_port_vlan *vlan,
-				    struct switchdev_trans *trans)
+				    const struct switchdev_obj_port_vlan *vlan)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1725,9 +1723,11 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
 
 static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
 {
-	bool flood = port == dsa_upstream_port(chip->ds);
+	struct dsa_switch *ds = chip->ds;
+	bool flood;
 
 	/* Upstream ports flood frames with unknown unicast or multicast DA */
+	flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
 	if (chip->info->ops->port_set_egress_floods)
 		return chip->info->ops->port_set_egress_floods(chip, port,
 							       flood, flood);
@@ -1744,6 +1744,39 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
 	return 0;
 }
 
+static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
+{
+	struct dsa_switch *ds = chip->ds;
+	int upstream_port;
+	int err;
+
+	upstream_port = dsa_upstream_port(ds, port);
+	if (chip->info->ops->port_set_upstream_port) {
+		err = chip->info->ops->port_set_upstream_port(chip, port,
+							      upstream_port);
+		if (err)
+			return err;
+	}
+
+	if (port == upstream_port) {
+		if (chip->info->ops->set_cpu_port) {
+			err = chip->info->ops->set_cpu_port(chip,
+							    upstream_port);
+			if (err)
+				return err;
+		}
+
+		if (chip->info->ops->set_egress_port) {
+			err = chip->info->ops->set_egress_port(chip,
+							       upstream_port);
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
 static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
 {
 	struct dsa_switch *ds = chip->ds;
@@ -1814,13 +1847,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
 	if (err)
 		return err;
 
-	reg = 0;
-	if (chip->info->ops->port_set_upstream_port) {
-		err = chip->info->ops->port_set_upstream_port(
-			chip, port, dsa_upstream_port(ds));
-		if (err)
-			return err;
-	}
+	err = mv88e6xxx_setup_upstream_port(chip, port);
+	if (err)
+		return err;
 
 	err = mv88e6xxx_port_set_8021q_mode(chip, port,
 				MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
@@ -1946,21 +1975,8 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
 static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
 {
 	struct dsa_switch *ds = chip->ds;
-	u32 upstream_port = dsa_upstream_port(ds);
 	int err;
 
-	if (chip->info->ops->set_cpu_port) {
-		err = chip->info->ops->set_cpu_port(chip, upstream_port);
-		if (err)
-			return err;
-	}
-
-	if (chip->info->ops->set_egress_port) {
-		err = chip->info->ops->set_egress_port(chip, upstream_port);
-		if (err)
-			return err;
-	}
-
 	/* Disable remote management, and set the switch's DSA device number. */
 	err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
 				 MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
@@ -3741,6 +3757,7 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
 	return chip->info->tag_protocol;
 }
 
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
 				       struct device *host_dev, int sw_addr,
 				       void **priv)
@@ -3788,10 +3805,10 @@ free:
 
 	return NULL;
 }
+#endif
 
 static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
-				      const struct switchdev_obj_port_mdb *mdb,
-				      struct switchdev_trans *trans)
+				      const struct switchdev_obj_port_mdb *mdb)
 {
 	/* We don't need any dynamic resource from the kernel (yet),
 	 * so skip the prepare phase.
@@ -3801,8 +3818,7 @@ static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
 }
 
 static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
-				   const struct switchdev_obj_port_mdb *mdb,
-				   struct switchdev_trans *trans)
+				   const struct switchdev_obj_port_mdb *mdb)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
 
@@ -3829,7 +3845,9 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
 }
 
 static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 	.probe			= mv88e6xxx_drv_probe,
+#endif
 	.get_tag_protocol	= mv88e6xxx_get_tag_protocol,
 	.setup			= mv88e6xxx_setup,
 	.adjust_link		= mv88e6xxx_adjust_link,
@@ -3958,11 +3976,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
 			if (err)
 				goto out_g1_irq;
 		}
+
+		err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
+		if (err)
+			goto out_g2_irq;
+
+		err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
+		if (err)
+			goto out_g1_atu_prob_irq;
 	}
 
 	err = mv88e6xxx_mdios_register(chip, np);
 	if (err)
-		goto out_g2_irq;
+		goto out_g1_vtu_prob_irq;
 
 	err = mv88e6xxx_register_switch(chip);
 	if (err)
@@ -3972,6 +3998,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
 
 out_mdio:
 	mv88e6xxx_mdios_unregister(chip);
+out_g1_vtu_prob_irq:
+	if (chip->irq > 0)
+		mv88e6xxx_g1_vtu_prob_irq_free(chip);
+out_g1_atu_prob_irq:
+	if (chip->irq > 0)
+		mv88e6xxx_g1_atu_prob_irq_free(chip);
 out_g2_irq:
 	if (chip->info->g2_irqs > 0 && chip->irq > 0)
 		mv88e6xxx_g2_irq_free(chip);
@@ -3995,6 +4027,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
 	mv88e6xxx_mdios_unregister(chip);
 
 	if (chip->irq > 0) {
+		mv88e6xxx_g1_vtu_prob_irq_free(chip);
+		mv88e6xxx_g1_atu_prob_irq_free(chip);
 		if (chip->info->g2_irqs > 0)
 			mv88e6xxx_g2_irq_free(chip);
 		mutex_lock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 334f6f7544ba..3dba6e90adcf 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -207,6 +207,8 @@ struct mv88e6xxx_chip {
 	int irq;
 	int device_irq;
 	int watchdog_irq;
+	int atu_prob_irq;
+	int vtu_prob_irq;
 };
 
 struct mv88e6xxx_bus_ops {
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index b0dc7518b47f..6aee7316fea6 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -29,9 +29,9 @@
 #define MV88E6XXX_G1_STS_IRQ_AVB			8
 #define MV88E6XXX_G1_STS_IRQ_DEVICE			7
 #define MV88E6XXX_G1_STS_IRQ_STATS			6
-#define MV88E6XXX_G1_STS_IRQ_VTU_PROBLEM		5
+#define MV88E6XXX_G1_STS_IRQ_VTU_PROB			5
 #define MV88E6XXX_G1_STS_IRQ_VTU_DONE			4
-#define MV88E6XXX_G1_STS_IRQ_ATU_PROBLEM		3
+#define MV88E6XXX_G1_STS_IRQ_ATU_PROB			3
 #define MV88E6XXX_G1_STS_IRQ_ATU_DONE			2
 #define MV88E6XXX_G1_STS_IRQ_TCAM_DONE			1
 #define MV88E6XXX_G1_STS_IRQ_EEPROM_DONE		0
@@ -82,6 +82,10 @@
 #define MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT	0x4000
 #define MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE	0x5000
 #define MV88E6XXX_G1_VTU_OP_STU_GET_NEXT	0x6000
+#define MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION	0x7000
+#define MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION	BIT(6)
+#define MV88E6XXX_G1_VTU_OP_MISS_VIOLATION	BIT(5)
+#define MV88E6XXX_G1_VTU_OP_SPID_MASK		0xf
 
 /* Offset 0x06: VTU VID Register */
 #define MV88E6XXX_G1_VTU_VID		0x06
@@ -122,6 +126,10 @@
 #define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB		0x5000
 #define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB	0x6000
 #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION		0x7000
+#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION		BIT(7)
+#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION		BIT(6)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION		BIT(5)
+#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION		BIT(4)
 
 /* Offset 0x0C: ATU Data Register */
 #define MV88E6XXX_G1_ATU_DATA				0x0c
@@ -255,6 +263,8 @@ int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
 int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
 int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
 			    bool all);
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
 
 int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
 			     struct mv88e6xxx_vtu_entry *entry);
@@ -269,5 +279,7 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
 int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
 			       struct mv88e6xxx_vtu_entry *entry);
 int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
 
 #endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index efeef4b01442..20d941f4273b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -9,6 +9,8 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 
 #include "chip.h"
 #include "global1.h"
@@ -307,3 +309,88 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
 
 	return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
 }
+
+static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+	struct mv88e6xxx_chip *chip = dev_id;
+	struct mv88e6xxx_atu_entry entry;
+	int err;
+	u16 val;
+
+	mutex_lock(&chip->reg_lock);
+
+	err = mv88e6xxx_g1_atu_op(chip, 0,
+				  MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+	if (err)
+		goto out;
+
+	err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
+	if (err)
+		goto out;
+
+	err = mv88e6xxx_g1_atu_data_read(chip, &entry);
+	if (err)
+		goto out;
+
+	err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
+	if (err)
+		goto out;
+
+	mutex_unlock(&chip->reg_lock);
+
+	if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
+		dev_err_ratelimited(chip->dev,
+				    "ATU age out violation for %pM\n",
+				    entry.mac);
+	}
+
+	if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+		dev_err_ratelimited(chip->dev,
+				    "ATU member violation for %pM portvec %x\n",
+				    entry.mac, entry.portvec);
+	}
+
+	if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION)
+		dev_err_ratelimited(chip->dev,
+				    "ATU miss violation for %pM portvec %x\n",
+				    entry.mac, entry.portvec);
+
+	if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION)
+		dev_err_ratelimited(chip->dev,
+				    "ATU full violation for %pM portvec %x\n",
+				    entry.mac, entry.portvec);
+
+	return IRQ_HANDLED;
+
+out:
+	mutex_unlock(&chip->reg_lock);
+
+	dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
+		err);
+	return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+	int err;
+
+	chip->atu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+					      MV88E6XXX_G1_STS_IRQ_ATU_PROB);
+	if (chip->atu_prob_irq < 0)
+		return chip->atu_prob_irq;
+
+	err = request_threaded_irq(chip->atu_prob_irq, NULL,
+				   mv88e6xxx_g1_atu_prob_irq_thread_fn,
+				   IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob",
+				   chip);
+	if (err)
+		irq_dispose_mapping(chip->atu_prob_irq);
+
+	return err;
+}
+
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+	free_irq(chip->atu_prob_irq, chip);
+	irq_dispose_mapping(chip->atu_prob_irq);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 8c8a0ec3d6e9..7997961647de 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -11,6 +11,9 @@
  * (at your option) any later version.
  */
 
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+
 #include "chip.h"
 #include "global1.h"
 
@@ -513,3 +516,74 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
 
 	return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
 }
+
+static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+	struct mv88e6xxx_chip *chip = dev_id;
+	struct mv88e6xxx_vtu_entry entry;
+	int spid;
+	int err;
+	u16 val;
+
+	mutex_lock(&chip->reg_lock);
+
+	err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION);
+	if (err)
+		goto out;
+
+	err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+	if (err)
+		goto out;
+
+	err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+	if (err)
+		goto out;
+
+	mutex_unlock(&chip->reg_lock);
+
+	spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
+
+	if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
+		dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
+				    entry.vid, spid);
+	}
+
+	if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION)
+		dev_err_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
+				    entry.vid, spid);
+
+	return IRQ_HANDLED;
+
+out:
+	mutex_unlock(&chip->reg_lock);
+
+	dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n",
+		err);
+
+	return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+	int err;
+
+	chip->vtu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+					      MV88E6XXX_G1_STS_IRQ_VTU_PROB);
+	if (chip->vtu_prob_irq < 0)
+		return chip->vtu_prob_irq;
+
+	err = request_threaded_irq(chip->vtu_prob_irq, NULL,
+				   mv88e6xxx_g1_vtu_prob_irq_thread_fn,
+				   IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob",
+				   chip);
+	if (err)
+		irq_dispose_mapping(chip->vtu_prob_irq);
+
+	return err;
+}
+
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+	free_irq(chip->vtu_prob_irq, chip);
+	irq_dispose_mapping(chip->vtu_prob_irq);
+}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 58483af80bdb..30b1c8512049 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -42,48 +42,7 @@
 #define DRV_NAME	"dummy"
 #define DRV_VERSION	"1.0"
 
-#undef pr_fmt
-#define pr_fmt(fmt) DRV_NAME ": " fmt
-
 static int numdummies = 1;
-static int num_vfs;
-
-struct vf_data_storage {
-	u8	vf_mac[ETH_ALEN];
-	u16	pf_vlan; /* When set, guest VLAN config not allowed. */
-	u16	pf_qos;
-	__be16	vlan_proto;
-	u16	min_tx_rate;
-	u16	max_tx_rate;
-	u8	spoofchk_enabled;
-	bool	rss_query_enabled;
-	u8	trusted;
-	int	link_state;
-};
-
-struct dummy_priv {
-	struct vf_data_storage	*vfinfo;
-};
-
-static int dummy_num_vf(struct device *dev)
-{
-	return num_vfs;
-}
-
-static struct bus_type dummy_bus = {
-	.name	= "dummy",
-	.num_vf	= dummy_num_vf,
-};
-
-static void release_dummy_parent(struct device *dev)
-{
-}
-
-static struct device dummy_parent = {
-	.init_name	= "dummy",
-	.bus		= &dummy_bus,
-	.release	= release_dummy_parent,
-};
 
 /* fake multicast ability */
 static void set_multicast_list(struct net_device *dev)
@@ -133,25 +92,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int dummy_dev_init(struct net_device *dev)
 {
-	struct dummy_priv *priv = netdev_priv(dev);
-
 	dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
 	if (!dev->dstats)
 		return -ENOMEM;
 
-	priv->vfinfo = NULL;
-
-	if (!num_vfs)
-		return 0;
-
-	dev->dev.parent = &dummy_parent;
-	priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
-			       GFP_KERNEL);
-	if (!priv->vfinfo) {
-		free_percpu(dev->dstats);
-		return -ENOMEM;
-	}
-
 	return 0;
 }
 
@@ -169,117 +113,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
 	return 0;
 }
 
-static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
-		return -EINVAL;
-
-	memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
-
-	return 0;
-}
-
-static int dummy_set_vf_vlan(struct net_device *dev, int vf,
-			     u16 vlan, u8 qos, __be16 vlan_proto)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
-		return -EINVAL;
-
-	priv->vfinfo[vf].pf_vlan = vlan;
-	priv->vfinfo[vf].pf_qos = qos;
-	priv->vfinfo[vf].vlan_proto = vlan_proto;
-
-	return 0;
-}
-
-static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].min_tx_rate = min;
-	priv->vfinfo[vf].max_tx_rate = max;
-
-	return 0;
-}
-
-static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].spoofchk_enabled = val;
-
-	return 0;
-}
-
-static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].rss_query_enabled = val;
-
-	return 0;
-}
-
-static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].trusted = val;
-
-	return 0;
-}
-
-static int dummy_get_vf_config(struct net_device *dev,
-			       int vf, struct ifla_vf_info *ivi)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	ivi->vf = vf;
-	memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
-	ivi->vlan = priv->vfinfo[vf].pf_vlan;
-	ivi->qos = priv->vfinfo[vf].pf_qos;
-	ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
-	ivi->linkstate = priv->vfinfo[vf].link_state;
-	ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
-	ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
-	ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
-	ivi->trusted = priv->vfinfo[vf].trusted;
-	ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
-
-	return 0;
-}
-
-static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].link_state = state;
-
-	return 0;
-}
-
 static const struct net_device_ops dummy_netdev_ops = {
 	.ndo_init		= dummy_dev_init,
 	.ndo_uninit		= dummy_dev_uninit,
@@ -289,14 +122,6 @@ static const struct net_device_ops dummy_netdev_ops = {
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_get_stats64	= dummy_get_stats64,
 	.ndo_change_carrier	= dummy_change_carrier,
-	.ndo_set_vf_mac		= dummy_set_vf_mac,
-	.ndo_set_vf_vlan	= dummy_set_vf_vlan,
-	.ndo_set_vf_rate	= dummy_set_vf_rate,
-	.ndo_set_vf_spoofchk	= dummy_set_vf_spoofchk,
-	.ndo_set_vf_trust	= dummy_set_vf_trust,
-	.ndo_get_vf_config	= dummy_get_vf_config,
-	.ndo_set_vf_link_state	= dummy_set_vf_link_state,
-	.ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
 };
 
 static void dummy_get_drvinfo(struct net_device *dev,
@@ -323,13 +148,6 @@ static const struct ethtool_ops dummy_ethtool_ops = {
 	.get_ts_info		= dummy_get_ts_info,
 };
 
-static void dummy_free_netdev(struct net_device *dev)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	kfree(priv->vfinfo);
-}
-
 static void dummy_setup(struct net_device *dev)
 {
 	ether_setup(dev);
@@ -338,7 +156,6 @@ static void dummy_setup(struct net_device *dev)
 	dev->netdev_ops = &dummy_netdev_ops;
 	dev->ethtool_ops = &dummy_ethtool_ops;
 	dev->needs_free_netdev = true;
-	dev->priv_destructor = dummy_free_netdev;
 
 	/* Fill in device structure with ethernet-generic values. */
 	dev->flags |= IFF_NOARP;
@@ -370,7 +187,6 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
 
 static struct rtnl_link_ops dummy_link_ops __read_mostly = {
 	.kind		= DRV_NAME,
-	.priv_size	= sizeof(struct dummy_priv),
 	.setup		= dummy_setup,
 	.validate	= dummy_validate,
 };
@@ -379,16 +195,12 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
 module_param(numdummies, int, 0);
 MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
 
-module_param(num_vfs, int, 0);
-MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
-
 static int __init dummy_init_one(void)
 {
 	struct net_device *dev_dummy;
 	int err;
 
-	dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
-				 "dummy%d", NET_NAME_ENUM, dummy_setup);
+	dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup);
 	if (!dev_dummy)
 		return -ENOMEM;
 
@@ -407,21 +219,6 @@ static int __init dummy_init_module(void)
 {
 	int i, err = 0;
 
-	if (num_vfs) {
-		err = bus_register(&dummy_bus);
-		if (err < 0) {
-			pr_err("registering dummy bus failed\n");
-			return err;
-		}
-
-		err = device_register(&dummy_parent);
-		if (err < 0) {
-			pr_err("registering dummy parent device failed\n");
-			bus_unregister(&dummy_bus);
-			return err;
-		}
-	}
-
 	rtnl_lock();
 	err = __rtnl_link_register(&dummy_link_ops);
 	if (err < 0)
@@ -437,22 +234,12 @@ static int __init dummy_init_module(void)
 out:
 	rtnl_unlock();
 
-	if (err && num_vfs) {
-		device_unregister(&dummy_parent);
-		bus_unregister(&dummy_bus);
-	}
-
 	return err;
 }
 
 static void __exit dummy_cleanup_module(void)
 {
 	rtnl_link_unregister(&dummy_link_ops);
-
-	if (num_vfs) {
-		device_unregister(&dummy_parent);
-		bus_unregister(&dummy_bus);
-	}
 }
 
 module_init(dummy_init_module);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c60421339a98..b6cf4b6962f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
 
 config CX_ECAT
 	tristate "Beckhoff CX5020 EtherCAT master support"
@@ -170,6 +171,7 @@ source "drivers/net/ethernet/sis/Kconfig"
 source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/socionext/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 39f6273358ed..3cdf01e96e0b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
 obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_SFC) += sfc/
 obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
 obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index a1a52eb53b14..8f71b79b4949 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1436,13 +1436,13 @@ static int ace_init(struct net_device *dev)
 	ace_set_txprd(regs, ap, 0);
 	writel(0, &regs->RxRetCsm);
 
-       /*
-	* Enable DMA engine now.
-	* If we do this sooner, Mckinley box pukes.
-	* I assume it's because Tigon II DMA engine wants to check
-	* *something* even before the CPU is started.
-	*/
-       writel(1, &regs->AssistState);  /* enable DMA */
+	/*
+	 * Enable DMA engine now.
+	 * If we do this sooner, Mckinley box pukes.
+	 * I assume it's because Tigon II DMA engine wants to check
+	 * *something* even before the CPU is started.
+	 */
+	writel(1, &regs->AssistState);  /* enable DMA */
 
 	/*
 	 * Start the NIC CPU
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index b11e573ad57a..ea149c134e15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
 
 	return 0;
 }
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+	struct ena_eth_io_rx_cdesc_base *cdesc;
+
+	cdesc = ena_com_get_next_rx_cdesc(io_cq);
+	if (cdesc)
+		return false;
+	else
+		return true;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index bb53c3a4f8e9..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
 
 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
 
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
 				       struct ena_eth_io_intr_reg *intr_reg)
 {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index fbe21a817bd8..6975150d144e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -161,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
 	ring->per_napi_packets = 0;
 	ring->per_napi_bytes = 0;
 	ring->cpu = 0;
+	ring->first_interrupt = false;
+	ring->no_interrupt_event_cnt = 0;
 	u64_stats_init(&ring->syncp);
 }
 
@@ -1277,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
 {
 	struct ena_napi *ena_napi = data;
 
+	ena_napi->tx_ring->first_interrupt = true;
+	ena_napi->rx_ring->first_interrupt = true;
+
 	napi_schedule_irqoff(&ena_napi->napi);
 
 	return IRQ_HANDLED;
@@ -2663,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work)
 	rtnl_unlock();
 }
 
-static int check_missing_comp_in_queue(struct ena_adapter *adapter,
-				       struct ena_ring *tx_ring)
+static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+					struct ena_ring *rx_ring)
+{
+	if (likely(rx_ring->first_interrupt))
+		return 0;
+
+	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+		return 0;
+
+	rx_ring->no_interrupt_event_cnt++;
+
+	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+		netif_err(adapter, rx_err, adapter->netdev,
+			  "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+			  rx_ring->qid);
+		adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+		smp_mb__before_atomic();
+		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+					  struct ena_ring *tx_ring)
 {
 	struct ena_tx_buffer *tx_buf;
 	unsigned long last_jiffies;
@@ -2674,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
 	for (i = 0; i < tx_ring->ring_size; i++) {
 		tx_buf = &tx_ring->tx_buffer_info[i];
 		last_jiffies = tx_buf->last_jiffies;
-		if (unlikely(last_jiffies &&
-			     time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+
+		if (last_jiffies == 0)
+			/* no pending Tx at this location */
+			continue;
+
+		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+			     2 * adapter->missing_tx_completion_to))) {
+			/* If after graceful period interrupt is still not
+			 * received, we schedule a reset
+			 */
+			netif_err(adapter, tx_err, adapter->netdev,
+				  "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+				  tx_ring->qid);
+			adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+			smp_mb__before_atomic();
+			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+			return -EIO;
+		}
+
+		if (unlikely(time_is_before_jiffies(last_jiffies +
+				adapter->missing_tx_completion_to))) {
 			if (!tx_buf->print_once)
 				netif_notice(adapter, tx_err, adapter->netdev,
 					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2704,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
 	return rc;
 }
 
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static void check_for_missing_completions(struct ena_adapter *adapter)
 {
 	struct ena_ring *tx_ring;
+	struct ena_ring *rx_ring;
 	int i, budget, rc;
 
 	/* Make sure the driver doesn't turn the device in other process */
@@ -2725,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
 
 	for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
 		tx_ring = &adapter->tx_ring[i];
+		rx_ring = &adapter->rx_ring[i];
+
+		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+		if (unlikely(rc))
+			return;
 
-		rc = check_missing_comp_in_queue(adapter, tx_ring);
+		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
 		if (unlikely(rc))
 			return;
 
@@ -2885,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t)
 
 	check_for_admin_com_state(adapter);
 
-	check_for_missing_tx_completions(adapter);
+	check_for_missing_completions(adapter);
 
 	check_for_empty_rx_ring(adapter);
 
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 3bbc003871de..f1972b5ab650 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
 #include "ena_eth_com.h"
 
 #define DRV_MODULE_VER_MAJOR	1
-#define DRV_MODULE_VER_MINOR	3
+#define DRV_MODULE_VER_MINOR	5
 #define DRV_MODULE_VER_SUBMINOR 0
 
 #define DRV_MODULE_NAME		"ena"
@@ -122,6 +122,7 @@
  * We wait for 6 sec just to be on the safe side.
  */
 #define ENA_DEVICE_KALIVE_TIMEOUT	(6 * HZ)
+#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
 
 #define ENA_MMIO_DISABLE_REG_READ	BIT(0)
 
@@ -236,6 +237,9 @@ struct ena_ring {
 	/* The maximum header length the device can handle */
 	u8 tx_max_header_size;
 
+	bool first_interrupt;
+	u16 no_interrupt_event_cnt;
+
 	/* cpu for TPH */
 	int cpu;
 	 /* number of tx/rx_buffer_info's entries */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 9aec43c5bba8..48ca97fbe7bc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types {
 	ENA_REGS_RESET_USER_TRIGGER		= 12,
 
 	ENA_REGS_RESET_GENERIC			= 13,
+
+	ENA_REGS_RESET_MISS_INTERRUPT		= 14,
 };
 
 /* ena_registers offsets */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a74a8fbad53a..7a3ebfd236f5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 {
 	struct ethhdr *eth = (struct ethhdr *)skb->data;
-	unsigned char *buf = skb->data;
 	unsigned char buffer[128];
-	unsigned int i, j;
+	unsigned int i;
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
 	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-	for (i = 0, j = 0; i < skb->len;) {
-		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-			      buf[i++]);
-
-		if ((i % 32) == 0) {
-			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-			j = 0;
-		} else if ((i % 16) == 0) {
-			buffer[j++] = ' ';
-			buffer[j++] = ' ';
-		} else if ((i % 4) == 0) {
-			buffer[j++] = ' ';
-		}
+	for (i = 0; i < skb->len; i += 32) {
+		unsigned int len = min(skb->len - i, 32U);
+
+		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+				   buffer, sizeof(buffer), false);
+		netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
 	}
-	if (i % 32)
-		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index e4ae696920ef..686f6d8c9e79 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -39,4 +39,5 @@ atlantic-objs := aq_main.o \
 	hw_atl/hw_atl_a0.o \
 	hw_atl/hw_atl_b0.o \
 	hw_atl/hw_atl_utils.o \
+	hw_atl/hw_atl_utils_fw2x.o \
 	hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 105fdb958cef..0b49f1aeebd3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -65,7 +65,13 @@
 
 /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
 
-#define AQ_CFG_FC_MODE 3U
+#define AQ_NIC_FC_OFF    0U
+#define AQ_NIC_FC_TX     1U
+#define AQ_NIC_FC_RX     2U
+#define AQ_NIC_FC_FULL   3U
+#define AQ_NIC_FC_AUTO   4U
+
+#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
 
 #define AQ_CFG_SPEED_MSK  0xFFFFU	/* 0xFFFFU==auto_neg */
 
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 9eb5e222a234..d52b088ff8f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -16,8 +16,45 @@
 #include <linux/pci.h>
 
 #include "ver.h"
-#include "aq_nic.h"
 #include "aq_cfg.h"
 #include "aq_utils.h"
 
+#define PCI_VENDOR_ID_AQUANTIA  0x1D6A
+
+#define AQ_DEVICE_ID_0001	0x0001
+#define AQ_DEVICE_ID_D100	0xD100
+#define AQ_DEVICE_ID_D107	0xD107
+#define AQ_DEVICE_ID_D108	0xD108
+#define AQ_DEVICE_ID_D109	0xD109
+
+#define AQ_DEVICE_ID_AQC100	0x00B1
+#define AQ_DEVICE_ID_AQC107	0x07B1
+#define AQ_DEVICE_ID_AQC108	0x08B1
+#define AQ_DEVICE_ID_AQC109	0x09B1
+#define AQ_DEVICE_ID_AQC111	0x11B1
+#define AQ_DEVICE_ID_AQC112	0x12B1
+
+#define AQ_DEVICE_ID_AQC100S	0x80B1
+#define AQ_DEVICE_ID_AQC107S	0x87B1
+#define AQ_DEVICE_ID_AQC108S	0x88B1
+#define AQ_DEVICE_ID_AQC109S	0x89B1
+#define AQ_DEVICE_ID_AQC111S	0x91B1
+#define AQ_DEVICE_ID_AQC112S	0x92B1
+
+#define AQ_DEVICE_ID_AQC111E	0x51B1
+#define AQ_DEVICE_ID_AQC112E	0x52B1
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+
+#define AQ_HWREV_ANY	0
+#define AQ_HWREV_1	1
+#define AQ_HWREV_2	2
+
+#define AQ_NIC_RATE_10G        BIT(0)
+#define AQ_NIC_RATE_5G         BIT(1)
+#define AQ_NIC_RATE_5GSR       BIT(2)
+#define AQ_NIC_RATE_2GS        BIT(3)
+#define AQ_NIC_RATE_1G         BIT(4)
+#define AQ_NIC_RATE_100M       BIT(5)
+
 #endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index b3825de6cdfb..a2d416b24ffc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -7,7 +7,7 @@
  * version 2, as published by the Free Software Foundation.
  */
 
-/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
  * functions.
  */
 
@@ -15,12 +15,15 @@
 #define AQ_HW_H
 
 #include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
 
 /* NIC H/W capabilities */
 struct aq_hw_caps_s {
 	u64 hw_features;
 	u64 link_speed_msk;
 	unsigned int hw_priv_flags;
+	u32 media_type;
 	u32 rxds;
 	u32 txds;
 	u32 txhwb_alignment;
@@ -28,7 +31,7 @@ struct aq_hw_caps_s {
 	u32 vecs;
 	u32 mtu;
 	u32 mac_regs_count;
-	u8 ports;
+	u32 hw_alive_check_addr;
 	u8 msix_irqs;
 	u8 tcs;
 	u8 rxd_alignment;
@@ -39,7 +42,6 @@ struct aq_hw_caps_s {
 	u8 rx_rings;
 	bool flow_control;
 	bool is_64_dma;
-	u32 fw_ver_expected;
 };
 
 struct aq_hw_link_status_s {
@@ -86,30 +88,43 @@ struct aq_stats_s {
 
 #define AQ_HW_FLAG_ERRORS      (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
 
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+			AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+			AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+					AQ_NIC_LINK_DOWN)
+
+#define AQ_HW_MEDIA_TYPE_TP    1U
+#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+
 struct aq_hw_s {
-	struct aq_obj_s header;
+	atomic_t flags;
+	u8 rbl_enabled:1;
 	struct aq_nic_cfg_s *aq_nic_cfg;
-	struct aq_pci_func_s *aq_pci_func;
+	const struct aq_fw_ops *aq_fw_ops;
 	void __iomem *mmio;
-	unsigned int not_ff_addr;
 	struct aq_hw_link_status_s aq_link_status;
+	struct hw_aq_atl_utils_mbox mbox;
+	struct hw_atl_stats_s last_stats;
+	struct aq_stats_s curr_stats;
+	u64 speed;
+	u32 itr_tx;
+	u32 itr_rx;
+	unsigned int chip_features;
+	u32 fw_ver_actual;
+	atomic_t dpc;
+	u32 mbox_addr;
+	u32 rpc_addr;
+	u32 rpc_tid;
+	struct hw_aq_atl_utils_fw_rpc rpc;
 };
 
 struct aq_ring_s;
 struct aq_ring_param_s;
-struct aq_nic_cfg_s;
 struct sk_buff;
 
 struct aq_hw_ops {
-	struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
-				  unsigned int port, struct aq_hw_ops *ops);
-
-	void (*destroy)(struct aq_hw_s *self);
-
-	int (*get_hw_caps)(struct aq_hw_s *self,
-			   struct aq_hw_caps_s *aq_hw_caps,
-			   unsigned short device,
-			   unsigned short subsystem_device);
 
 	int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
 			       unsigned int frags);
@@ -123,20 +138,11 @@ struct aq_hw_ops {
 	int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
 				      struct aq_ring_s *aq_ring);
 
-	int (*hw_get_mac_permanent)(struct aq_hw_s *self,
-				    struct aq_hw_caps_s *aq_hw_caps,
-				    u8 *mac);
-
 	int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
 
-	int (*hw_get_link_status)(struct aq_hw_s *self);
-
-	int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
-
 	int (*hw_reset)(struct aq_hw_s *self);
 
-	int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
-		       u8 *mac_addr);
+	int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
 
 	int (*hw_start)(struct aq_hw_s *self);
 
@@ -184,9 +190,8 @@ struct aq_hw_ops {
 			       struct aq_rss_parameters *rss_params);
 
 	int (*hw_get_regs)(struct aq_hw_s *self,
-			   struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
-
-	int (*hw_update_stats)(struct aq_hw_s *self);
+			   const struct aq_hw_caps_s *aq_hw_caps,
+			   u32 *regs_buff);
 
 	struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
 
@@ -197,4 +202,20 @@ struct aq_hw_ops {
 	int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
 };
 
+struct aq_fw_ops {
+	int (*init)(struct aq_hw_s *self);
+
+	int (*reset)(struct aq_hw_s *self);
+
+	int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
+
+	int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+	int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+
+	int (*update_link_status)(struct aq_hw_s *self);
+
+	int (*update_stats)(struct aq_hw_s *self);
+};
+
 #endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 5f13465995f6..d526c4f19d34 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -13,6 +13,7 @@
 
 #include "aq_hw_utils.h"
 #include "aq_hw.h"
+#include "aq_nic.h"
 
 void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
 			 u32 shift, u32 val)
@@ -39,8 +40,10 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
 {
 	u32 value = readl(hw->mmio + reg);
 
-	if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
-		aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+	if ((~0U) == value &&
+	    (~0U) == readl(hw->mmio +
+			   hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr))
+		aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
 
 	return value;
 }
@@ -54,11 +57,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
 {
 	int err = 0;
 
-	if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+	if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 		err = -ENXIO;
 		goto err_exit;
 	}
-	if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+	if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
 		err = -EIO;
 		goto err_exit;
 	}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index 03b72ddbffb9..dc88a1221f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,9 @@ do { \
 	} \
 } while (0)
 
+#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+
 struct aq_hw_s;
 
 void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5d6c40d86775..ba5fe8c4125d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,49 +13,39 @@
 #include "aq_nic.h"
 #include "aq_pci_func.h"
 #include "aq_ethtool.h"
-#include "hw_atl/hw_atl_a0.h"
-#include "hw_atl/hw_atl_b0.h"
 
 #include <linux/netdevice.h>
 #include <linux/module.h>
 
-static const struct pci_device_id aq_pci_tbl[] = {
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
-	{}
-};
-
-MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
-
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(AQ_CFG_DRV_VERSION);
 MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
 MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
 
-static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+static const struct net_device_ops aq_ndev_ops;
+
+struct net_device *aq_ndev_alloc(void)
 {
-	struct aq_hw_ops *ops = NULL;
+	struct net_device *ndev = NULL;
+	struct aq_nic_s *aq_nic = NULL;
+
+	ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+	if (!ndev)
+		return NULL;
 
-	ops = hw_atl_a0_get_ops_by_id(pdev);
-	if (!ops)
-		ops = hw_atl_b0_get_ops_by_id(pdev);
+	aq_nic = netdev_priv(ndev);
+	aq_nic->ndev = ndev;
+	ndev->netdev_ops = &aq_ndev_ops;
+	ndev->ethtool_ops = &aq_ethtool_ops;
 
-	return ops;
+	return ndev;
 }
 
 static int aq_ndev_open(struct net_device *ndev)
 {
-	struct aq_nic_s *aq_nic = NULL;
 	int err = 0;
+	struct aq_nic_s *aq_nic = netdev_priv(ndev);
 
-	aq_nic = aq_nic_alloc_hot(ndev);
-	if (!aq_nic) {
-		err = -ENOMEM;
-		goto err_exit;
-	}
 	err = aq_nic_init(aq_nic);
 	if (err < 0)
 		goto err_exit;
@@ -78,7 +68,6 @@ static int aq_ndev_close(struct net_device *ndev)
 	if (err < 0)
 		goto err_exit;
 	aq_nic_deinit(aq_nic);
-	aq_nic_free_hot_resources(aq_nic);
 
 err_exit:
 	return err;
@@ -150,15 +139,13 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 
 	err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
 	if (err < 0)
-		goto err_exit;
+		return;
 
 	if (netdev_mc_count(ndev)) {
 		err = aq_nic_set_multicast_list(aq_nic, ndev);
 		if (err < 0)
-			goto err_exit;
+			return;
 	}
-
-err_exit:;
 }
 
 static const struct net_device_ops aq_ndev_ops = {
@@ -170,66 +157,3 @@ static const struct net_device_ops aq_ndev_ops = {
 	.ndo_set_mac_address = aq_ndev_set_mac_address,
 	.ndo_set_features = aq_ndev_set_features
 };
-
-static int aq_pci_probe(struct pci_dev *pdev,
-			const struct pci_device_id *pci_id)
-{
-	struct aq_hw_ops *aq_hw_ops = NULL;
-	struct aq_pci_func_s *aq_pci_func = NULL;
-	int err = 0;
-
-	err = pci_enable_device(pdev);
-	if (err < 0)
-		goto err_exit;
-	aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
-	aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
-					&aq_ndev_ops, &aq_ethtool_ops);
-	if (!aq_pci_func) {
-		err = -ENOMEM;
-		goto err_exit;
-	}
-	err = aq_pci_func_init(aq_pci_func);
-	if (err < 0)
-		goto err_exit;
-
-err_exit:
-	if (err < 0) {
-		if (aq_pci_func)
-			aq_pci_func_free(aq_pci_func);
-	}
-	return err;
-}
-
-static void aq_pci_remove(struct pci_dev *pdev)
-{
-	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
-	aq_pci_func_deinit(aq_pci_func);
-	aq_pci_func_free(aq_pci_func);
-}
-
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
-{
-	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
-	return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static int aq_pci_resume(struct pci_dev *pdev)
-{
-	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-	pm_message_t pm_msg = PMSG_RESTORE;
-
-	return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static struct pci_driver aq_pci_ops = {
-	.name = AQ_CFG_DRV_NAME,
-	.id_table = aq_pci_tbl,
-	.probe = aq_pci_probe,
-	.remove = aq_pci_remove,
-	.suspend = aq_pci_suspend,
-	.resume = aq_pci_resume,
-};
-
-module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 9748e7e575e0..ce92152eb43e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -14,4 +14,6 @@
 
 #include "aq_common.h"
 
+struct net_device *aq_ndev_alloc(void);
+
 #endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 75a894a9251c..ebbaf63eaf47 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,7 +14,6 @@
 #include "aq_vec.h"
 #include "aq_hw.h"
 #include "aq_pci_func.h"
-#include "aq_nic_internal.h"
 
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
@@ -61,19 +60,13 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
 		rss_params->indirection_table[i] = i & (num_rss_queues - 1);
 }
 
-/* Fills aq_nic_cfg with valid defaults */
-static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+void aq_nic_cfg_start(struct aq_nic_s *self)
 {
 	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
 
-	cfg->aq_hw_caps = &self->aq_hw_caps;
-
-	cfg->vecs = AQ_CFG_VECS_DEF;
 	cfg->tcs = AQ_CFG_TCS_DEF;
 
-	cfg->rxds = AQ_CFG_RXDS_DEF;
-	cfg->txds = AQ_CFG_TXDS_DEF;
-
 	cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
 
 	cfg->itr = aq_itr;
@@ -94,19 +87,13 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
 	cfg->vlan_id = 0U;
 
 	aq_nic_rss_init(self, cfg->num_rss_queues);
-}
-
-/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
-int aq_nic_cfg_start(struct aq_nic_s *self)
-{
-	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
 
 	/*descriptors */
-	cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
-	cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+	cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
+	cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
 
 	/*rss rings */
-	cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
 	cfg->vecs = min(cfg->vecs, num_online_cpus());
 	/* cfg->vecs should be power of 2 for RSS */
 	if (cfg->vecs >= 8U)
@@ -120,23 +107,22 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
 
 	cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
 
-	cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+	cfg->irq_type = aq_pci_func_get_irq_type(self);
 
 	if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
-	    (self->aq_hw_caps.vecs == 1U) ||
+	    (cfg->aq_hw_caps->vecs == 1U) ||
 	    (cfg->vecs == 1U)) {
 		cfg->is_rss = 0U;
 		cfg->vecs = 1U;
 	}
 
-	cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
-	cfg->hw_features = self->aq_hw_caps.hw_features;
-	return 0;
+	cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
+	cfg->hw_features = cfg->aq_hw_caps->hw_features;
 }
 
 static int aq_nic_update_link_status(struct aq_nic_s *self)
 {
-	int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+	int err = self->aq_fw_ops->update_link_status(self->aq_hw);
 
 	if (err)
 		return err;
@@ -150,9 +136,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
 
 	self->link_status = self->aq_hw->aq_link_status;
 	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
-		aq_utils_obj_set(&self->header.flags,
+		aq_utils_obj_set(&self->flags,
 				 AQ_NIC_FLAG_STARTED);
-		aq_utils_obj_clear(&self->header.flags,
+		aq_utils_obj_clear(&self->flags,
 				   AQ_NIC_LINK_DOWN);
 		netif_carrier_on(self->ndev);
 		netif_tx_wake_all_queues(self->ndev);
@@ -160,7 +146,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
 	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
 		netif_carrier_off(self->ndev);
 		netif_tx_disable(self->ndev);
-		aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
 	}
 	return 0;
 }
@@ -171,15 +157,15 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
 	int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
 	int err = 0;
 
-	if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
 		goto err_exit;
 
 	err = aq_nic_update_link_status(self);
 	if (err)
 		goto err_exit;
 
-	if (self->aq_hw_ops.hw_update_stats)
-		self->aq_hw_ops.hw_update_stats(self->aq_hw);
+	if (self->aq_fw_ops->update_stats)
+		self->aq_fw_ops->update_stats(self->aq_hw);
 
 	aq_nic_update_ndev_stats(self);
 
@@ -205,60 +191,6 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
 		AQ_CFG_POLLING_TIMER_INTERVAL);
 }
 
-static struct net_device *aq_nic_ndev_alloc(void)
-{
-	return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
-}
-
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
-				   const struct ethtool_ops *et_ops,
-				   struct pci_dev *pdev,
-				   struct aq_pci_func_s *aq_pci_func,
-				   unsigned int port,
-				   const struct aq_hw_ops *aq_hw_ops)
-{
-	struct net_device *ndev = NULL;
-	struct aq_nic_s *self = NULL;
-	int err = 0;
-
-	ndev = aq_nic_ndev_alloc();
-	if (!ndev) {
-		err = -ENOMEM;
-		goto err_exit;
-	}
-
-	self = netdev_priv(ndev);
-
-	ndev->netdev_ops = ndev_ops;
-	ndev->ethtool_ops = et_ops;
-
-	SET_NETDEV_DEV(ndev, &pdev->dev);
-
-	ndev->if_port = port;
-	self->ndev = ndev;
-
-	self->aq_pci_func = aq_pci_func;
-
-	self->aq_hw_ops = *aq_hw_ops;
-	self->port = (u8)port;
-
-	self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
-						&self->aq_hw_ops);
-	err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
-					  pdev->device, pdev->subsystem_device);
-	if (err < 0)
-		goto err_exit;
-
-	aq_nic_cfg_init_defaults(self);
-
-err_exit:
-	if (err < 0) {
-		aq_nic_free_hot_resources(self);
-		self = NULL;
-	}
-	return self;
-}
-
 int aq_nic_ndev_register(struct aq_nic_s *self)
 {
 	int err = 0;
@@ -267,10 +199,14 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
 		err = -EINVAL;
 		goto err_exit;
 	}
-	err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
-			    self->aq_nic_cfg.aq_hw_caps,
+
+	err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+	if (err)
+		goto err_exit;
+
+	err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
 			    self->ndev->dev_addr);
-	if (err < 0)
+	if (err)
 		goto err_exit;
 
 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -281,83 +217,39 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
 	}
 #endif
 
+	for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
+	     self->aq_vecs++) {
+		self->aq_vec[self->aq_vecs] =
+		    aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
+		if (!self->aq_vec[self->aq_vecs]) {
+			err = -ENOMEM;
+			goto err_exit;
+		}
+	}
+
 	netif_carrier_off(self->ndev);
 
 	netif_tx_disable(self->ndev);
 
 	err = register_netdev(self->ndev);
-	if (err < 0)
+	if (err)
 		goto err_exit;
 
 err_exit:
 	return err;
 }
 
-int aq_nic_ndev_init(struct aq_nic_s *self)
+void aq_nic_ndev_init(struct aq_nic_s *self)
 {
-	struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
 	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
 
 	self->ndev->hw_features |= aq_hw_caps->hw_features;
 	self->ndev->features = aq_hw_caps->hw_features;
 	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
 	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
-	self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
-
-	return 0;
-}
-
-void aq_nic_ndev_free(struct aq_nic_s *self)
-{
-	if (!self->ndev)
-		goto err_exit;
-
-	if (self->ndev->reg_state == NETREG_REGISTERED)
-		unregister_netdev(self->ndev);
-
-	if (self->aq_hw)
-		self->aq_hw_ops.destroy(self->aq_hw);
-
-	free_netdev(self->ndev);
-
-err_exit:;
-}
-
-struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
-{
-	struct aq_nic_s *self = NULL;
-	int err = 0;
-
-	if (!ndev) {
-		err = -EINVAL;
-		goto err_exit;
-	}
-	self = netdev_priv(ndev);
-
-	if (!self) {
-		err = -EINVAL;
-		goto err_exit;
-	}
-	if (netif_running(ndev))
-		netif_tx_disable(ndev);
-	netif_carrier_off(self->ndev);
-
-	for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
-		self->aq_vecs++) {
-		self->aq_vec[self->aq_vecs] =
-		    aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
-		if (!self->aq_vec[self->aq_vecs]) {
-			err = -ENOMEM;
-			goto err_exit;
-		}
-	}
+	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
 
-err_exit:
-	if (err < 0) {
-		aq_nic_free_hot_resources(self);
-		self = NULL;
-	}
-	return self;
 }
 
 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
@@ -366,11 +258,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
 	self->aq_ring_tx[idx] = ring;
 }
 
-struct device *aq_nic_get_dev(struct aq_nic_s *self)
-{
-	return self->ndev->dev.parent;
-}
-
 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
 {
 	return self->ndev;
@@ -383,18 +270,20 @@ int aq_nic_init(struct aq_nic_s *self)
 	unsigned int i = 0U;
 
 	self->power_state = AQ_HW_POWER_STATE_D0;
-	err = self->aq_hw_ops.hw_reset(self->aq_hw);
+	err = self->aq_hw_ops->hw_reset(self->aq_hw);
 	if (err < 0)
 		goto err_exit;
 
-	err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
-			    aq_nic_get_ndev(self)->dev_addr);
+	err = self->aq_hw_ops->hw_init(self->aq_hw,
+				       aq_nic_get_ndev(self)->dev_addr);
 	if (err < 0)
 		goto err_exit;
 
 	for (i = 0U, aq_vec = self->aq_vec[0];
 		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
-		aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+		aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+
+	netif_carrier_off(self->ndev);
 
 err_exit:
 	return err;
@@ -406,13 +295,13 @@ int aq_nic_start(struct aq_nic_s *self)
 	int err = 0;
 	unsigned int i = 0U;
 
-	err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+	err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
 						    self->mc_list.ar,
 						    self->mc_list.count);
 	if (err < 0)
 		goto err_exit;
 
-	err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
 						   self->packet_filter);
 	if (err < 0)
 		goto err_exit;
@@ -424,7 +313,7 @@ int aq_nic_start(struct aq_nic_s *self)
 			goto err_exit;
 	}
 
-	err = self->aq_hw_ops.hw_start(self->aq_hw);
+	err = self->aq_hw_ops->hw_start(self->aq_hw);
 	if (err < 0)
 		goto err_exit;
 
@@ -442,14 +331,14 @@ int aq_nic_start(struct aq_nic_s *self)
 	} else {
 		for (i = 0U, aq_vec = self->aq_vec[0];
 			self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
-			err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+			err = aq_pci_func_alloc_irq(self, i,
 						    self->ndev->name, aq_vec,
-					aq_vec_get_affinity_mask(aq_vec));
+						    aq_vec_get_affinity_mask(aq_vec));
 			if (err < 0)
 				goto err_exit;
 		}
 
-		err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+		err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
 				    AQ_CFG_IRQ_MASK);
 		if (err < 0)
 			goto err_exit;
@@ -634,9 +523,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
 	frags = aq_nic_map_skb(self, skb, ring);
 
 	if (likely(frags)) {
-		err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
-						      ring,
-						      frags);
+		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+						       ring, frags);
 		if (err >= 0) {
 			++ring->stats.tx.packets;
 			ring->stats.tx.bytes += skb->len;
@@ -651,14 +539,14 @@ err_exit:
 
 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
 {
-	return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+	return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
 }
 
 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
 {
 	int err = 0;
 
-	err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
 	if (err < 0)
 		goto err_exit;
 
@@ -690,11 +578,11 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 		 * multicast mask
 		 */
 		self->packet_filter |= IFF_ALLMULTI;
-		self->aq_hw->aq_nic_cfg->mc_list_count = 0;
-		return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
-							self->packet_filter);
+		self->aq_nic_cfg.mc_list_count = 0;
+		return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+							     self->packet_filter);
 	} else {
-		return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+		return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
 						    self->mc_list.ar,
 						    self->mc_list.count);
 	}
@@ -709,7 +597,7 @@ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
 
 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
 {
-	return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+	return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
 }
 
 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
@@ -724,8 +612,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
 
 	regs->version = 1;
 
-	err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
-					  &self->aq_hw_caps, regs_buff);
+	err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
+					   self->aq_nic_cfg.aq_hw_caps,
+					   regs_buff);
 	if (err < 0)
 		goto err_exit;
 
@@ -735,7 +624,7 @@ err_exit:
 
 int aq_nic_get_regs_count(struct aq_nic_s *self)
 {
-	return self->aq_hw_caps.mac_regs_count;
+	return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
 }
 
 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
@@ -743,7 +632,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
 	unsigned int i = 0U;
 	unsigned int count = 0U;
 	struct aq_vec_s *aq_vec = NULL;
-	struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
 
 	if (!stats)
 		goto err_exit;
@@ -774,7 +663,6 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
 	i++;
 
 	data += i;
-	count = 0U;
 
 	for (i = 0U, aq_vec = self->aq_vec[0];
 		aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
@@ -788,7 +676,7 @@ err_exit:;
 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
 {
 	struct net_device *ndev = self->ndev;
-	struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
 
 	ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
 	ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
@@ -802,39 +690,46 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
 			       struct ethtool_link_ksettings *cmd)
 {
-	cmd->base.port = PORT_TP;
+	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+		cmd->base.port = PORT_FIBRE;
+	else
+		cmd->base.port = PORT_TP;
 	/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
 	cmd->base.duplex = DUPLEX_FULL;
 	cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
 
 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
 
-	if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
+	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     10000baseT_Full);
 
-	if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
+	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     5000baseT_Full);
 
-	if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
+	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     2500baseT_Full);
 
-	if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
+	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     1000baseT_Full);
 
-	if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
+	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     100baseT_Full);
 
-	if (self->aq_hw_caps.flow_control)
+	if (self->aq_nic_cfg.aq_hw_caps->flow_control)
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     Pause);
 
 	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
-	ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+	else
+		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
 
 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
 
@@ -865,7 +760,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
 						     Pause);
 
-	ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+	else
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
 }
 
 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
@@ -876,7 +774,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
 	int err = 0;
 
 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
-		rate = self->aq_hw_caps.link_speed_msk;
+		rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
 		self->aq_nic_cfg.is_autoneg = true;
 	} else {
 		speed = cmd->base.speed;
@@ -907,7 +805,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
 			goto err_exit;
 		break;
 		}
-		if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+		if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
 			err = -1;
 			goto err_exit;
 		}
@@ -915,7 +813,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
 		self->aq_nic_cfg.is_autoneg = false;
 	}
 
-	err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+	err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
 	if (err < 0)
 		goto err_exit;
 
@@ -934,7 +832,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
 {
 	u32 fw_version = 0U;
 
-	self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+	self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
 
 	return fw_version;
 }
@@ -949,18 +847,18 @@ int aq_nic_stop(struct aq_nic_s *self)
 
 	del_timer_sync(&self->service_timer);
 
-	self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+	self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
 
 	if (self->aq_nic_cfg.is_polling)
 		del_timer_sync(&self->polling_timer);
 	else
-		aq_pci_func_free_irqs(self->aq_pci_func);
+		aq_pci_func_free_irqs(self);
 
 	for (i = 0U, aq_vec = self->aq_vec[0];
 		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
 		aq_vec_stop(aq_vec);
 
-	return self->aq_hw_ops.hw_stop(self->aq_hw);
+	return self->aq_hw_ops->hw_stop(self->aq_hw);
 }
 
 void aq_nic_deinit(struct aq_nic_s *self)
@@ -976,23 +874,23 @@ void aq_nic_deinit(struct aq_nic_s *self)
 		aq_vec_deinit(aq_vec);
 
 	if (self->power_state == AQ_HW_POWER_STATE_D0) {
-		(void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+		(void)self->aq_hw_ops->hw_deinit(self->aq_hw);
 	} else {
-		(void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+		(void)self->aq_hw_ops->hw_set_power(self->aq_hw,
 						   self->power_state);
 	}
 
 err_exit:;
 }
 
-void aq_nic_free_hot_resources(struct aq_nic_s *self)
+void aq_nic_free_vectors(struct aq_nic_s *self)
 {
 	unsigned int i = 0U;
 
 	if (!self)
 		goto err_exit;
 
-	for (i = AQ_DIMOF(self->aq_vec); i--;) {
+	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
 		if (self->aq_vec[i]) {
 			aq_vec_free(self->aq_vec[i]);
 			self->aq_vec[i] = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 3c9f8db03d5f..d16b0f1a95aa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -14,26 +14,15 @@
 
 #include "aq_common.h"
 #include "aq_rss.h"
+#include "aq_hw.h"
 
 struct aq_ring_s;
-struct aq_pci_func_s;
 struct aq_hw_ops;
-
-#define AQ_NIC_FC_OFF    0U
-#define AQ_NIC_FC_TX     1U
-#define AQ_NIC_FC_RX     2U
-#define AQ_NIC_FC_FULL   3U
-#define AQ_NIC_FC_AUTO   4U
-
-#define AQ_NIC_RATE_10G        BIT(0)
-#define AQ_NIC_RATE_5G         BIT(1)
-#define AQ_NIC_RATE_5GSR       BIT(2)
-#define AQ_NIC_RATE_2GS        BIT(3)
-#define AQ_NIC_RATE_1G         BIT(4)
-#define AQ_NIC_RATE_100M       BIT(5)
+struct aq_fw_s;
+struct aq_vec_s;
 
 struct aq_nic_cfg_s {
-	struct aq_hw_caps_s *aq_hw_caps;
+	const struct aq_hw_caps_s *aq_hw_caps;
 	u64 hw_features;
 	u32 rxds;		/* rx ring size, descriptors # */
 	u32 txds;		/* tx ring size, descriptors # */
@@ -44,7 +33,6 @@ struct aq_nic_cfg_s {
 	u16 tx_itr;
 	u32 num_rss_queues;
 	u32 mtu;
-	u32 ucp_0x364;
 	u32 flow_control;
 	u32 link_speed_msk;
 	u32 vlan_id;
@@ -69,20 +57,43 @@ struct aq_nic_cfg_s {
 #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
 	((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
 
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
-				   const struct ethtool_ops *et_ops,
-				   struct pci_dev *pdev,
-				   struct aq_pci_func_s *aq_pci_func,
-				   unsigned int port,
-				   const struct aq_hw_ops *aq_hw_ops);
-int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s {
+	atomic_t flags;
+	struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+	struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+	struct aq_hw_s *aq_hw;
+	struct net_device *ndev;
+	unsigned int aq_vecs;
+	unsigned int packet_filter;
+	unsigned int power_state;
+	u8 port;
+	const struct aq_hw_ops *aq_hw_ops;
+	const struct aq_fw_ops *aq_fw_ops;
+	struct aq_nic_cfg_s aq_nic_cfg;
+	struct timer_list service_timer;
+	struct timer_list polling_timer;
+	struct aq_hw_link_status_s link_status;
+	struct {
+		u32 count;
+		u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+	} mc_list;
+
+	struct pci_dev *pdev;
+	unsigned int msix_entry_mask;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+	return self->ndev->dev.parent;
+}
+
+void aq_nic_ndev_init(struct aq_nic_s *self);
 struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
 			struct aq_ring_s *ring);
-struct device *aq_nic_get_dev(struct aq_nic_s *self);
 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
 int aq_nic_init(struct aq_nic_s *self);
-int aq_nic_cfg_start(struct aq_nic_s *self);
+void aq_nic_cfg_start(struct aq_nic_s *self);
 int aq_nic_ndev_register(struct aq_nic_s *self);
 void aq_nic_ndev_free(struct aq_nic_s *self);
 int aq_nic_start(struct aq_nic_s *self);
@@ -93,6 +104,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
 int aq_nic_stop(struct aq_nic_s *self);
 void aq_nic_deinit(struct aq_nic_s *self);
 void aq_nic_free_hot_resources(struct aq_nic_s *self);
+void aq_nic_free_vectors(struct aq_nic_s *self);
 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
deleted file mode 100644
index e7d2711dc165..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-/* File aq_nic_internal.h: Definition of private object structure. */
-
-#ifndef AQ_NIC_INTERNAL_H
-#define AQ_NIC_INTERNAL_H
-
-struct aq_nic_s {
-	struct aq_obj_s header;
-	struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
-	struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
-	struct aq_hw_s *aq_hw;
-	struct net_device *ndev;
-	struct aq_pci_func_s *aq_pci_func;
-	unsigned int aq_vecs;
-	unsigned int packet_filter;
-	unsigned int power_state;
-	u8 port;
-	struct aq_hw_ops aq_hw_ops;
-	struct aq_hw_caps_s aq_hw_caps;
-	struct aq_nic_cfg_s aq_nic_cfg;
-	struct timer_list service_timer;
-	struct timer_list polling_timer;
-	struct aq_hw_link_status_s link_status;
-	struct {
-		u32 count;
-		u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
-	} mc_list;
-};
-
-#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
-			AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
-			AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
-
-#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
-					AQ_NIC_LINK_DOWN)
-
-#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 58c29d04b186..22889fc158f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -9,181 +9,135 @@
 
 /* File aq_pci_func.c: Definition of PCI functions. */
 
-#include "aq_pci_func.h"
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "aq_main.h"
 #include "aq_nic.h"
 #include "aq_vec.h"
 #include "aq_hw.h"
-#include <linux/interrupt.h>
-
-struct aq_pci_func_s {
-	struct pci_dev *pdev;
-	struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
-	void __iomem *mmio;
-	void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
-	resource_size_t mmio_pa;
-	unsigned int msix_entry_mask;
-	unsigned int ports;
-	bool is_pci_enabled;
-	bool is_regions;
-	bool is_pci_using_dac;
-	struct aq_hw_caps_s aq_hw_caps;
+#include "aq_pci_func.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+static const struct pci_device_id aq_pci_tbl[] = {
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
+
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
+
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
+	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
+
+	{}
 };
 
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
-					struct pci_dev *pdev,
-					const struct net_device_ops *ndev_ops,
-					const struct ethtool_ops *eth_ops)
-{
-	struct aq_pci_func_s *self = NULL;
-	int err = 0;
-	unsigned int port = 0U;
-
-	if (!aq_hw_ops) {
-		err = -EFAULT;
-		goto err_exit;
-	}
-	self = kzalloc(sizeof(*self), GFP_KERNEL);
-	if (!self) {
-		err = -ENOMEM;
-		goto err_exit;
-	}
-
-	pci_set_drvdata(pdev, self);
-	self->pdev = pdev;
+static const struct aq_board_revision_s hw_atl_boards[] = {
+	{ AQ_DEVICE_ID_0001,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+	{ AQ_DEVICE_ID_D100,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
+	{ AQ_DEVICE_ID_D107,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+	{ AQ_DEVICE_ID_D108,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
+	{ AQ_DEVICE_ID_D109,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
+
+	{ AQ_DEVICE_ID_0001,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+	{ AQ_DEVICE_ID_D100,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
+	{ AQ_DEVICE_ID_D107,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+	{ AQ_DEVICE_ID_D108,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
+	{ AQ_DEVICE_ID_D109,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
+
+	{ AQ_DEVICE_ID_AQC100,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+	{ AQ_DEVICE_ID_AQC107,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+	{ AQ_DEVICE_ID_AQC108,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
+	{ AQ_DEVICE_ID_AQC109,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
+	{ AQ_DEVICE_ID_AQC111,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
+	{ AQ_DEVICE_ID_AQC112,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
+
+	{ AQ_DEVICE_ID_AQC100S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
+	{ AQ_DEVICE_ID_AQC107S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
+	{ AQ_DEVICE_ID_AQC108S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
+	{ AQ_DEVICE_ID_AQC109S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
+	{ AQ_DEVICE_ID_AQC111S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
+	{ AQ_DEVICE_ID_AQC112S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+	{ AQ_DEVICE_ID_AQC111E,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
+	{ AQ_DEVICE_ID_AQC112E,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
+};
 
-	err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
-				     pdev->subsystem_device);
-	if (err < 0)
-		goto err_exit;
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
 
-	self->ports = self->aq_hw_caps.ports;
+static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
+				     const struct aq_hw_ops **ops,
+				     const struct aq_hw_caps_s **caps)
+{
+	int i = 0;
 
-	for (port = 0; port < self->ports; ++port) {
-		struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
-							    pdev, self,
-							    port, aq_hw_ops);
+	if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
+		return -EINVAL;
 
-		if (!aq_nic) {
-			err = -ENOMEM;
-			goto err_exit;
+	for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
+		if (hw_atl_boards[i].devid == pdev->device &&
+		    (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
+		     hw_atl_boards[i].revision == pdev->revision)) {
+			*ops = hw_atl_boards[i].ops;
+			*caps = hw_atl_boards[i].caps;
+			break;
 		}
-		self->port[port] = aq_nic;
 	}
 
-err_exit:
-	if (err < 0) {
-		if (self)
-			aq_pci_func_free(self);
-		self = NULL;
-	}
+	if (i == ARRAY_SIZE(hw_atl_boards))
+		return -EINVAL;
 
-	(void)err;
-	return self;
+	return 0;
 }
 
-int aq_pci_func_init(struct aq_pci_func_s *self)
+int aq_pci_func_init(struct pci_dev *pdev)
 {
 	int err = 0;
-	unsigned int bar = 0U;
-	unsigned int port = 0U;
-	unsigned int numvecs = 0U;
-
-	err = pci_enable_device(self->pdev);
-	if (err < 0)
-		goto err_exit;
 
-	self->is_pci_enabled = true;
-
-	err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 	if (!err) {
-		err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
-		self->is_pci_using_dac = 1;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
 	}
 	if (err) {
-		err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (!err)
-			err = pci_set_consistent_dma_mask(self->pdev,
+			err = pci_set_consistent_dma_mask(pdev,
 							  DMA_BIT_MASK(32));
-		self->is_pci_using_dac = 0;
 	}
 	if (err != 0) {
 		err = -ENOSR;
 		goto err_exit;
 	}
 
-	err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+	err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
 	if (err < 0)
 		goto err_exit;
 
-	self->is_regions = true;
+	pci_set_master(pdev);
 
-	pci_set_master(self->pdev);
-
-	for (bar = 0; bar < 4; ++bar) {
-		if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
-			resource_size_t reg_sz;
-
-			self->mmio_pa = pci_resource_start(self->pdev, bar);
-			if (self->mmio_pa == 0U) {
-				err = -EIO;
-				goto err_exit;
-			}
-
-			reg_sz = pci_resource_len(self->pdev, bar);
-			if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
-				err = -EIO;
-				goto err_exit;
-			}
-
-			self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
-			if (!self->mmio) {
-				err = -EIO;
-				goto err_exit;
-			}
-			break;
-		}
-	}
-
-	numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
-	numvecs = min(numvecs, num_online_cpus());
-
-	/* enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
-	err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
-
-	if (err < 0) {
-		err = pci_alloc_irq_vectors(self->pdev, 1, 1,
-				PCI_IRQ_MSI | PCI_IRQ_LEGACY);
-		if (err < 0)
-			goto err_exit;
-	}
-#endif /* AQ_CFG_FORCE_LEGACY_INT */
-
-	/* net device init */
-	for (port = 0; port < self->ports; ++port) {
-		if (!self->port[port])
-			continue;
-
-		err = aq_nic_cfg_start(self->port[port]);
-		if (err < 0)
-			goto err_exit;
-
-		err = aq_nic_ndev_init(self->port[port]);
-		if (err < 0)
-			goto err_exit;
-
-		err = aq_nic_ndev_register(self->port[port]);
-		if (err < 0)
-			goto err_exit;
-	}
+	return 0;
 
 err_exit:
-	if (err < 0)
-		aq_pci_func_deinit(self);
 	return err;
 }
 
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
 			  char *name, void *aq_vec, cpumask_t *affinity_mask)
 {
 	struct pci_dev *pdev = self->pdev;
@@ -204,11 +158,10 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
 			irq_set_affinity_hint(pci_irq_vector(pdev, i),
 					      affinity_mask);
 	}
-
 	return err;
 }
 
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+void aq_pci_func_free_irqs(struct aq_nic_s *self)
 {
 	struct pci_dev *pdev = self->pdev;
 	unsigned int i = 0U;
@@ -224,12 +177,7 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
 	}
 }
 
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
-{
-	return self->mmio;
-}
-
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
 {
 	if (self->pdev->msix_enabled)
 		return AQ_HW_IRQ_MSIX;
@@ -238,62 +186,159 @@ unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
 	return AQ_HW_IRQ_LEGACY;
 }
 
-void aq_pci_func_deinit(struct aq_pci_func_s *self)
+static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
 {
-	if (!self)
-		goto err_exit;
-
-	aq_pci_func_free_irqs(self);
 	pci_free_irq_vectors(self->pdev);
-
-	if (self->is_regions)
-		pci_release_regions(self->pdev);
-
-	if (self->is_pci_enabled)
-		pci_disable_device(self->pdev);
-
-err_exit:;
 }
 
-void aq_pci_func_free(struct aq_pci_func_s *self)
+static int aq_pci_probe(struct pci_dev *pdev,
+			const struct pci_device_id *pci_id)
 {
-	unsigned int port = 0U;
+	struct aq_nic_s *self = NULL;
+	int err = 0;
+	struct net_device *ndev;
+	resource_size_t mmio_pa;
+	u32 bar;
+	u32 numvecs;
 
-	if (!self)
-		goto err_exit;
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
 
-	for (port = 0; port < self->ports; ++port) {
-		if (!self->port[port])
-			continue;
+	err = aq_pci_func_init(pdev);
+	if (err)
+		goto err_pci_func;
 
-		aq_nic_ndev_free(self->port[port]);
+	ndev = aq_ndev_alloc();
+	if (!ndev) {
+		err = -ENOMEM;
+		goto err_ndev;
 	}
 
-	if (self->mmio)
-		iounmap(self->mmio);
+	self = netdev_priv(ndev);
+	self->pdev = pdev;
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	pci_set_drvdata(pdev, self);
 
-	kfree(self);
+	err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
+					&aq_nic_get_cfg(self)->aq_hw_caps);
+	if (err)
+		goto err_ioremap;
 
-err_exit:;
-}
+	self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+	self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
 
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
-				pm_message_t *pm_msg)
-{
-	int err = 0;
-	unsigned int port = 0U;
+	for (bar = 0; bar < 4; ++bar) {
+		if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
+			resource_size_t reg_sz;
 
-	if (!self) {
-		err = -EFAULT;
-		goto err_exit;
+			mmio_pa = pci_resource_start(pdev, bar);
+			if (mmio_pa == 0U) {
+				err = -EIO;
+				goto err_ioremap;
+			}
+
+			reg_sz = pci_resource_len(pdev, bar);
+			if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+				err = -EIO;
+				goto err_ioremap;
+			}
+
+			self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+			if (!self->aq_hw->mmio) {
+				err = -EIO;
+				goto err_ioremap;
+			}
+			break;
+		}
 	}
-	for (port = 0; port < self->ports; ++port) {
-		if (!self->port[port])
-			continue;
 
-		(void)aq_nic_change_pm_state(self->port[port], pm_msg);
+	if (bar == 4) {
+		err = -EIO;
+		goto err_ioremap;
 	}
 
-err_exit:
+	numvecs = min((u8)AQ_CFG_VECS_DEF,
+		      aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
+	numvecs = min(numvecs, num_online_cpus());
+	/*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+	err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
+				    PCI_IRQ_MSIX);
+
+	if (err < 0) {
+		err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+					    PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+		if (err < 0)
+			goto err_hwinit;
+	}
+#endif
+
+	/* net device init */
+	aq_nic_cfg_start(self);
+
+	aq_nic_ndev_init(self);
+
+	err = aq_nic_ndev_register(self);
+	if (err < 0)
+		goto err_register;
+
+	return 0;
+
+err_register:
+	aq_nic_free_vectors(self);
+	aq_pci_free_irq_vectors(self);
+err_hwinit:
+	iounmap(self->aq_hw->mmio);
+err_ioremap:
+	free_netdev(ndev);
+err_pci_func:
+	pci_release_regions(pdev);
+err_ndev:
+	pci_disable_device(pdev);
 	return err;
 }
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+	struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+	if (self->ndev) {
+		if (self->ndev->reg_state == NETREG_REGISTERED)
+			unregister_netdev(self->ndev);
+		aq_nic_free_vectors(self);
+		aq_pci_free_irq_vectors(self);
+		iounmap(self->aq_hw->mmio);
+		kfree(self->aq_hw);
+		pci_release_regions(pdev);
+		free_netdev(self->ndev);
+	}
+
+	pci_disable_device(pdev);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+	struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+	return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+	struct aq_nic_s *self = pci_get_drvdata(pdev);
+	pm_message_t pm_msg = PMSG_RESTORE;
+
+	return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+	.name = AQ_CFG_DRV_NAME,
+	.id_table = aq_pci_tbl,
+	.probe = aq_pci_probe,
+	.remove = aq_pci_remove,
+	.suspend = aq_pci_suspend,
+	.resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index ecb033791203..aeee67bf69fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -13,22 +13,20 @@
 #define AQ_PCI_FUNC_H
 
 #include "aq_common.h"
+#include "aq_nic.h"
 
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
-					struct pci_dev *pdev,
-					const struct net_device_ops *ndev_ops,
-					const struct ethtool_ops *eth_ops);
-int aq_pci_func_init(struct aq_pci_func_s *self);
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+struct aq_board_revision_s {
+	unsigned short devid;
+	unsigned short revision;
+	const struct aq_hw_ops *ops;
+	const struct aq_hw_caps_s *caps;
+};
+
+int aq_pci_func_init(struct pci_dev *pdev);
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
 			  char *name, void *aq_vec,
 			  cpumask_t *affinity_mask);
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
-int aq_pci_func_start(struct aq_pci_func_s *self);
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
-void aq_pci_func_deinit(struct aq_pci_func_s *self);
-void aq_pci_func_free(struct aq_pci_func_s *self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
-				pm_message_t *pm_msg);
+void aq_pci_func_free_irqs(struct aq_nic_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
 
 #endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 519ca6534b85..0be6a11370bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
 
 		skb_record_rx_queue(skb, self->idx);
 
-		napi_gro_receive(napi, skb);
-
 		++self->stats.rx.packets;
 		self->stats.rx.bytes += skb->len;
+
+		napi_gro_receive(napi, skb);
 	}
 
 err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 5844078764bd..965fae0fb6e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -15,6 +15,7 @@
 #include "aq_common.h"
 
 struct page;
+struct aq_nic_cfg_s;
 
 /*           TxC       SOP        DX         EOP
  *         +----------+----------+----------+-----------
@@ -105,7 +106,6 @@ union aq_ring_stats_s {
 };
 
 struct aq_ring_s {
-	struct aq_obj_s header;
 	struct aq_ring_buff_s *buff_ring;
 	u8 *dx_ring;		/* descriptors ring, dma shared mem */
 	struct aq_nic_s *aq_nic;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index e12bcdfb874a..786ea8187c69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -14,12 +14,6 @@
 
 #include "aq_common.h"
 
-#define AQ_DIMOF(_ARY_)  ARRAY_SIZE(_ARY_)
-
-struct aq_obj_s {
-	atomic_t flags;
-};
-
 static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
 {
 	unsigned long flags_old, flags_new;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 5fecc9a099ef..f890b8a5a862 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -19,8 +19,7 @@
 #include <linux/netdevice.h>
 
 struct aq_vec_s {
-	struct aq_obj_s header;
-	struct aq_hw_ops *aq_hw_ops;
+	const struct aq_hw_ops *aq_hw_ops;
 	struct aq_hw_s *aq_hw;
 	struct aq_nic_s *aq_nic;
 	unsigned int tx_rings;
@@ -166,7 +165,7 @@ err_exit:
 	return self;
 }
 
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
 		struct aq_hw_s *aq_hw)
 {
 	struct aq_ring_s *ring = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 6c68b184236c..8bdf60bb3f63 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -19,6 +19,8 @@
 
 struct aq_hw_s;
 struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
 struct aq_ring_stats_rx_s;
 struct aq_ring_stats_tx_s;
 
@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
 irqreturn_t aq_vec_isr_legacy(int irq, void *private);
 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
 			      struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
 		struct aq_hw_s *aq_hw);
 void aq_vec_deinit(struct aq_vec_s *self);
 void aq_vec_free(struct aq_vec_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f18dce14c93c..67e2f9fb9402 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -12,78 +12,100 @@
 #include "../aq_hw.h"
 #include "../aq_hw_utils.h"
 #include "../aq_ring.h"
+#include "../aq_nic.h"
 #include "hw_atl_a0.h"
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
 #include "hw_atl_a0_internal.h"
 
-static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
-				 struct aq_hw_caps_s *aq_hw_caps,
-				 unsigned short device,
-				 unsigned short subsystem_device)
-{
-	memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
-
-	if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
-		aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
-
-	if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
-		aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
-		aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
-	}
-
-	return 0;
-}
-
-static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
-					unsigned int port,
-					struct aq_hw_ops *ops)
-{
-	struct hw_atl_s *self = NULL;
-
-	self = kzalloc(sizeof(*self), GFP_KERNEL);
-	if (!self)
-		goto err_exit;
-
-	self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
+	.is_64_dma = true, \
+	.msix_irqs = 4U, \
+	.irq_mask = ~0U, \
+	.vecs = HW_ATL_A0_RSS_MAX, \
+	.tcs = HW_ATL_A0_TC_MAX, \
+	.rxd_alignment = 1U, \
+	.rxd_size = HW_ATL_A0_RXD_SIZE, \
+	.rxds = 248U, \
+	.txd_alignment = 1U, \
+	.txd_size = HW_ATL_A0_TXD_SIZE, \
+	.txds = 8U * 1024U, \
+	.txhwb_alignment = 4096U, \
+	.tx_rings = HW_ATL_A0_TX_RINGS, \
+	.rx_rings = HW_ATL_A0_RX_RINGS, \
+	.hw_features = NETIF_F_HW_CSUM | \
+			NETIF_F_RXHASH | \
+			NETIF_F_RXCSUM | \
+			NETIF_F_SG | \
+			NETIF_F_TSO, \
+	.hw_priv_flags = IFF_UNICAST_FLT, \
+	.flow_control = true, \
+	.mtu = HW_ATL_A0_MTU_JUMBO, \
+	.mac_regs_count = 88, \
+	.hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
+	DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+	.link_speed_msk = HW_ATL_A0_RATE_5G  |
+			  HW_ATL_A0_RATE_2G5 |
+			  HW_ATL_A0_RATE_1G  |
+			  HW_ATL_A0_RATE_100M,
+};
 
-	self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
+	DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_TP,
+	.link_speed_msk = HW_ATL_A0_RATE_10G |
+			  HW_ATL_A0_RATE_5G  |
+			  HW_ATL_A0_RATE_2G5 |
+			  HW_ATL_A0_RATE_1G  |
+			  HW_ATL_A0_RATE_100M,
+};
 
-err_exit:
-	return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
+	DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_TP,
+	.link_speed_msk = HW_ATL_A0_RATE_5G  |
+			  HW_ATL_A0_RATE_2G5 |
+			  HW_ATL_A0_RATE_1G  |
+			  HW_ATL_A0_RATE_100M,
+};
 
-static void hw_atl_a0_destroy(struct aq_hw_s *self)
-{
-	kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
+	DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_TP,
+	.link_speed_msk = HW_ATL_A0_RATE_2G5 |
+			  HW_ATL_A0_RATE_1G  |
+			  HW_ATL_A0_RATE_100M,
+};
 
 static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
 {
 	int err = 0;
 
-	glb_glb_reg_res_dis_set(self, 1U);
-	pci_pci_reg_res_dis_set(self, 0U);
-	rx_rx_reg_res_dis_set(self, 0U);
-	tx_tx_reg_res_dis_set(self, 0U);
+	hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+	hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
 
 	HW_ATL_FLUSH();
-	glb_soft_res_set(self, 1);
+	hw_atl_glb_soft_res_set(self, 1);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+	AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
 	if (err < 0)
 		goto err_exit;
 
-	itr_irq_reg_res_dis_set(self, 0U);
-	itr_res_irq_set(self, 1U);
+	hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+	hw_atl_itr_res_irq_set(self, 1U);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+	AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
 	if (err < 0)
 		goto err_exit;
 
-	hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+	self->aq_fw_ops->set_state(self, MPI_RESET);
 
 	err = aq_hw_err_from_flags(self);
 
@@ -99,51 +121,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
 	bool is_rx_flow_control = false;
 
 	/* TPS Descriptor rate init */
-	tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
-	tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
 
 	/* TPS VM init */
-	tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
 
 	/* TPS TC credits init */
-	tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
-	tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
 
-	tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
-	tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
-	tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
-	tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
 
 	/* Tx buf size */
 	buff_size = HW_ATL_A0_TXBUF_MAX;
 
-	tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	tpb_tx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 66U) /
-					    100U, tc);
-	tpb_tx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 50U) /
-					    100U, tc);
+	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 50U) /
+						   100U, tc);
 
 	/* QoS Rx buf size per TC */
 	tc = 0;
 	is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
 	buff_size = HW_ATL_A0_RXBUF_MAX;
 
-	rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	rpb_rx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 66U) /
-					    100U, tc);
-	rpb_rx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 50U) /
-					    100U, tc);
-	rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 50U) /
+						   100U, tc);
+	hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
 
 	/* QoS 802.1p priority -> TC mapping */
 	for (i_priority = 8U; i_priority--;)
-		rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -151,20 +175,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
 static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
 				     struct aq_rss_parameters *rss_params)
 {
-	struct aq_nic_cfg_s *cfg = NULL;
+	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 	int err = 0;
 	unsigned int i = 0U;
 	unsigned int addr = 0U;
 
-	cfg = self->aq_nic_cfg;
-
 	for (i = 10, addr = 0U; i--; ++addr) {
 		u32 key_data = cfg->is_rss ?
 			__swab32(rss_params->hash_secret_key[i]) : 0U;
-		rpf_rss_key_wr_data_set(self, key_data);
-		rpf_rss_key_addr_set(self, addr);
-		rpf_rss_key_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+		hw_atl_rpf_rss_key_addr_set(self, addr);
+		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -193,11 +216,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
 			((i * 3U) & 0xFU));
 	}
 
-	for (i = AQ_DIMOF(bitary); i--;) {
-		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
-		rpf_rss_redir_tbl_addr_set(self, i);
-		rpf_rss_redir_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+	for (i = ARRAY_SIZE(bitary); i--;) {
+		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+		hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -212,35 +236,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
 				    struct aq_nic_cfg_s *aq_nic_cfg)
 {
 	/* TX checksums offloads*/
-	tpo_ipv4header_crc_offload_en_set(self, 1);
-	tpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* RX checksums offloads*/
-	rpo_ipv4header_crc_offload_en_set(self, 1);
-	rpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* LSO offloads*/
-	tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
 
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
 {
-	thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
 
 	/* Tx interrupts */
-	tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
 	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
 			0x00010000U : 0x00000000U);
-	tdm_tx_dca_en_set(self, 0U);
-	tdm_tx_dca_mode_set(self, 0U);
+	hw_atl_tdm_tx_dca_en_set(self, 0U);
+	hw_atl_tdm_tx_dca_mode_set(self, 0U);
 
-	tpb_tx_path_scp_ins_en_set(self, 1U);
+	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -251,38 +275,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
 	int i;
 
 	/* Rx TC/RSS number config */
-	rpb_rpf_rx_traf_class_mode_set(self, 1U);
+	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
 
 	/* Rx flow control */
-	rpb_rx_flow_ctl_mode_set(self, 1U);
+	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
 
 	/* RSS Ring selection */
-	reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
 					0xB3333333U : 0x00000000U);
 
 	/* Multicast filters */
 	for (i = HW_ATL_A0_MAC_MAX; i--;) {
-		rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
-		rpfl2unicast_flr_act_set(self, 1U, i);
+		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
 	}
 
-	reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
-	reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
 
 	/* Vlan filters */
-	rpf_vlan_outer_etht_set(self, 0x88A8U);
-	rpf_vlan_inner_etht_set(self, 0x8100U);
-	rpf_vlan_prom_mode_en_set(self, 1);
+	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+	hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 
 	/* Rx Interrupts */
-	rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
-	rpfl2broadcast_flr_act_set(self, 1U);
-	rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
 
-	rdm_rx_dca_en_set(self, 0U);
-	rdm_rx_dca_mode_set(self, 0U);
+	hw_atl_rdm_rx_dca_en_set(self, 0U);
+	hw_atl_rdm_rx_dca_mode_set(self, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -301,10 +325,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 		(mac_addr[4] << 8) | mac_addr[5];
 
-	rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
-	rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
-	rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
-	rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
 
 	err = aq_hw_err_from_flags(self);
 
@@ -312,9 +336,7 @@ err_exit:
 	return err;
 }
 
-static int hw_atl_a0_hw_init(struct aq_hw_s *self,
-			     struct aq_nic_cfg_s *aq_nic_cfg,
-			     u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
 {
 	static u32 aq_hw_atl_igcr_table_[4][2] = {
 		{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -325,20 +347,18 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
 
 	int err = 0;
 
-	self->aq_nic_cfg = aq_nic_cfg;
-
-	hw_atl_utils_hw_chip_features_init(self,
-					   &PHAL_ATLANTIC_A0->chip_features);
+	struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
 
 	hw_atl_a0_hw_init_tx_path(self);
 	hw_atl_a0_hw_init_rx_path(self);
 
 	hw_atl_a0_hw_mac_addr_set(self, mac_addr);
 
-	hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+	self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+	self->aq_fw_ops->set_state(self, MPI_INIT);
 
-	reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
-	reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+	hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+	hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
 
 	hw_atl_a0_hw_qos_set(self);
 	hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
@@ -346,26 +366,25 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
 
 	/* Reset link status and read out initial hardware counters */
 	self->aq_link_status.mbps = 0;
-	hw_atl_utils_update_stats(self);
+	self->aq_fw_ops->update_stats(self);
 
 	err = aq_hw_err_from_flags(self);
 	if (err < 0)
 		goto err_exit;
 
 	/* Interrupts */
-	reg_irq_glb_ctl_set(self,
-			    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
-						 [(aq_nic_cfg->vecs > 1U) ?
-						 1 : 0]);
+	hw_atl_reg_irq_glb_ctl_set(self,
+				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+					[(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
 
-	itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+	hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
 
 	/* Interrupts */
-	reg_gen_irq_map_set(self,
-			    ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
-			    ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
-			    ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
-			    ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+	hw_atl_reg_gen_irq_map_set(self,
+				   ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+				   ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+				   ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+				   ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
 
 	hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
 
@@ -376,28 +395,28 @@ err_exit:
 static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 1, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 1, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_start(struct aq_hw_s *self)
 {
-	tpb_tx_buff_en_set(self, 1);
-	rpb_rx_buff_en_set(self, 1);
+	hw_atl_tpb_tx_buff_en_set(self, 1);
+	hw_atl_rpb_rx_buff_en_set(self, 1);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
-	reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 	return 0;
 }
 
@@ -483,36 +502,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
 	u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	rdm_rx_desc_en_set(self, false, aq_ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
 
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 
-	reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
-					   aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+						  aq_ring->idx);
 
-	reg_rx_dma_desc_base_addressmswset(self,
-					   dma_desc_addr_msw, aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+						  dma_desc_addr_msw,
+						  aq_ring->idx);
 
-	rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
-	rdm_rx_desc_data_buff_size_set(self,
-				       AQ_CFG_RX_FRAME_MAX / 1024U,
+	hw_atl_rdm_rx_desc_data_buff_size_set(self,
+					      AQ_CFG_RX_FRAME_MAX / 1024U,
 				       aq_ring->idx);
 
-	rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
-	rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
 
 	/* Rx ring set mode */
 
 	/* Mapping interrupt vector */
-	itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
 
-	rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -524,25 +544,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
 	u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+						  aq_ring->idx);
 
-	reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+						  aq_ring->idx);
 
-	tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
 	hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
 
 	/* Set Tx threshold */
-	tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
 
 	/* Mapping interrupt vector */
-	itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
 
-	tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -563,7 +583,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
 		rxd->hdr_addr = 0U;
 	}
 
-	reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -572,13 +592,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
 	int err = 0;
-	unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+	unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
 
-	if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+	if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 		err = -ENXIO;
 		goto err_exit;
 	}
-	ring->hw_head = hw_head_;
+	ring->hw_head = hw_head;
 	err = aq_hw_err_from_flags(self);
 
 err_exit:
@@ -602,15 +622,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 
 		if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
 			if ((1U << 4) &
-				reg_rx_dma_desc_status_get(self, ring->idx)) {
-			rdm_rx_desc_en_set(self, false, ring->idx);
-			rdm_rx_desc_res_set(self, true, ring->idx);
-			rdm_rx_desc_res_set(self, false, ring->idx);
-			rdm_rx_desc_en_set(self, true, ring->idx);
+			hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+				hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+				hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+				hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+				hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
 			}
 
 			if (ring->hw_head ||
-			    (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+			    (hw_atl_rdm_rx_desc_head_ptr_get(self,
+							     ring->idx) < 2U)) {
 				break;
 			} else if (!(rxd_wb->status & 0x1U)) {
 				struct hw_atl_rxd_wb_s *rxd_wb1 =
@@ -693,26 +714,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 
 static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
 			       (1U << HW_ATL_A0_ERR_INT));
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_clearlsw_set(self, LODWORD(mask));
-	itr_irq_status_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
 
-	if ((1U << 16) & reg_gen_irq_status_get(self))
-
-		atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+	if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+		atomic_inc(&self->dpc);
 
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 {
-	*mask = itr_irq_statuslsw_get(self);
+	*mask = hw_atl_itr_irq_statuslsw_get(self);
 	return aq_hw_err_from_flags(self);
 }
 
@@ -723,18 +743,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
 {
 	unsigned int i = 0U;
 
-	rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
-	rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
-	rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+	hw_atl_rpfl2promiscuous_mode_en_set(self,
+					    IS_FILTER_ENABLED(IFF_PROMISC));
+	hw_atl_rpfl2multicast_flr_en_set(self,
+					 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+	hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
 	self->aq_nic_cfg->is_mc_list_enabled =
 			IS_FILTER_ENABLED(IFF_MULTICAST);
 
 	for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled &&
-				    (i <= self->aq_nic_cfg->mc_list_count)) ?
-				    1U : 0U, i);
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled &&
+					   (i <= self->aq_nic_cfg->mc_list_count)) ?
+					    1U : 0U, i);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -761,17 +783,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
 		u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
 					(ar_mac[i][4] << 8) | ar_mac[i][5];
 
-		rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addresslsw_set(self,
-						 l, HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+							l,
+							HW_ATL_A0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addressmsw_set(self,
-						 h, HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+							h,
+							HW_ATL_A0_MAC_MIN + i);
 
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled),
-				    HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled),
+					   HW_ATL_A0_MAC_MIN + i);
 	}
 
 	err = aq_hw_err_from_flags(self);
@@ -823,7 +847,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 	}
 
 	for (i = HW_ATL_A0_RINGS_MAX; i--;)
-		reg_irq_thr_set(self, itr_rx, i);
+		hw_atl_reg_irq_thr_set(self, itr_rx, i);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -837,38 +861,19 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
 static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
-static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
-	int err = 0;
-
-	err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
-	if (err < 0)
-		goto err_exit;
-
-err_exit:
-	return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
-	.create               = hw_atl_a0_create,
-	.destroy              = hw_atl_a0_destroy,
-	.get_hw_caps          = hw_atl_a0_get_hw_caps,
-
-	.hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_a0 = {
 	.hw_set_mac_address   = hw_atl_a0_hw_mac_addr_set,
-	.hw_get_link_status   = hw_atl_utils_mpi_get_link_status,
-	.hw_set_link_speed    = hw_atl_a0_hw_set_speed,
 	.hw_init              = hw_atl_a0_hw_init,
 	.hw_deinit            = hw_atl_utils_hw_deinit,
 	.hw_set_power         = hw_atl_utils_hw_set_power,
@@ -898,21 +903,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
 	.hw_rss_set                  = hw_atl_a0_hw_rss_set,
 	.hw_rss_hash_set             = hw_atl_a0_hw_rss_hash_set,
 	.hw_get_regs                 = hw_atl_utils_hw_get_regs,
-	.hw_update_stats             = hw_atl_utils_update_stats,
 	.hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
 	.hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
-{
-	bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
-	bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D100) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D107) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D108) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D109));
-
-	bool is_rev_ok = (pdev->revision == 1U);
-
-	return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
index 6e1d527954c9..25fe954def03 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -16,19 +16,11 @@
 
 #include "../aq_common.h"
 
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc109;
 
-#define PCI_VENDOR_ID_AQUANTIA  0x1D6A
-#define HW_ATL_DEVICE_ID_0001   0x0001
-#define HW_ATL_DEVICE_ID_D100   0xD100
-#define HW_ATL_DEVICE_ID_D107   0xD107
-#define HW_ATL_DEVICE_ID_D108   0xD108
-#define HW_ATL_DEVICE_ID_D109   0xD109
-
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
-
-#endif
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+extern const struct aq_hw_ops hw_atl_ops_a0;
 
 #endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 0592a0330cf0..1d8855558d74 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,69 +88,4 @@
 
 #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
 
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
-	u64 buf_addr;
-	u32 ctl;
-	u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
-	u32 rsvd;
-	u32 len;
-	u32 ctl;
-	u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
-	u64 buf_addr;
-	u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
-	u32 type;
-	u32 rss_hash;
-	u16 status;
-	u16 pkt_len;
-	u16 next_desc_ptr;
-	u16 vlan;
-};
-
-/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
-	.ports = 1U,
-	.is_64_dma = true,
-	.msix_irqs = 4U,
-	.irq_mask = ~0U,
-	.vecs = HW_ATL_A0_RSS_MAX,
-	.tcs = HW_ATL_A0_TC_MAX,
-	.rxd_alignment = 1U,
-	.rxd_size = HW_ATL_A0_RXD_SIZE,
-	.rxds = 248U,
-	.txd_alignment = 1U,
-	.txd_size = HW_ATL_A0_TXD_SIZE,
-	.txds = 8U * 1024U,
-	.txhwb_alignment = 4096U,
-	.tx_rings = HW_ATL_A0_TX_RINGS,
-	.rx_rings = HW_ATL_A0_RX_RINGS,
-	.hw_features = NETIF_F_HW_CSUM |
-			NETIF_F_RXCSUM |
-			NETIF_F_RXHASH |
-			NETIF_F_SG |
-			NETIF_F_TSO,
-	.hw_priv_flags = IFF_UNICAST_FLT,
-	.link_speed_msk = (HW_ATL_A0_RATE_10G |
-			HW_ATL_A0_RATE_5G |
-			HW_ATL_A0_RATE_2G5 |
-			HW_ATL_A0_RATE_1G |
-			HW_ATL_A0_RATE_100M),
-	.flow_control = true,
-	.mtu = HW_ATL_A0_MTU_JUMBO,
-	.mac_regs_count = 88,
-	.fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
-};
-
 #endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index e4a22ce7bf09..819f6bcf9b4e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -12,83 +12,89 @@
 #include "../aq_hw.h"
 #include "../aq_hw_utils.h"
 #include "../aq_ring.h"
+#include "../aq_nic.h"
 #include "hw_atl_b0.h"
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
 #include "hw_atl_b0_internal.h"
 #include "hw_atl_llh_internal.h"
 
-static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
-				 struct aq_hw_caps_s *aq_hw_caps,
-				 unsigned short device,
-				 unsigned short subsystem_device)
-{
-	memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
-
-	if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
-		aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
-
-	if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
-		aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
-		aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
-	}
-
-	return 0;
-}
-
-static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
-					unsigned int port,
-					struct aq_hw_ops *ops)
-{
-	struct hw_atl_s *self = NULL;
-
-	self = kzalloc(sizeof(*self), GFP_KERNEL);
-	if (!self)
-		goto err_exit;
-
-	self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
+	.is_64_dma = true,	\
+	.msix_irqs = 4U,	\
+	.irq_mask = ~0U,	\
+	.vecs = HW_ATL_B0_RSS_MAX,	\
+	.tcs = HW_ATL_B0_TC_MAX,	\
+	.rxd_alignment = 1U,		\
+	.rxd_size = HW_ATL_B0_RXD_SIZE, \
+	.rxds = 4U * 1024U,		\
+	.txd_alignment = 1U,		\
+	.txd_size = HW_ATL_B0_TXD_SIZE, \
+	.txds = 8U * 1024U,		\
+	.txhwb_alignment = 4096U,	\
+	.tx_rings = HW_ATL_B0_TX_RINGS, \
+	.rx_rings = HW_ATL_B0_RX_RINGS, \
+	.hw_features = NETIF_F_HW_CSUM | \
+			NETIF_F_RXCSUM | \
+			NETIF_F_RXHASH | \
+			NETIF_F_SG |  \
+			NETIF_F_TSO | \
+			NETIF_F_LRO,  \
+	.hw_priv_flags = IFF_UNICAST_FLT,   \
+	.flow_control = true,		\
+	.mtu = HW_ATL_B0_MTU_JUMBO,	\
+	.mac_regs_count = 88,		\
+	.hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
+	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+	.link_speed_msk = HW_ATL_B0_RATE_10G |
+			  HW_ATL_B0_RATE_5G  |
+			  HW_ATL_B0_RATE_2G5 |
+			  HW_ATL_B0_RATE_1G  |
+			  HW_ATL_B0_RATE_100M,
+};
 
-	self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
+	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_TP,
+	.link_speed_msk = HW_ATL_B0_RATE_10G |
+			  HW_ATL_B0_RATE_5G  |
+			  HW_ATL_B0_RATE_2G5 |
+			  HW_ATL_B0_RATE_1G  |
+			  HW_ATL_B0_RATE_100M,
+};
 
-err_exit:
-	return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
+	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_TP,
+	.link_speed_msk = HW_ATL_B0_RATE_5G  |
+			  HW_ATL_B0_RATE_2G5 |
+			  HW_ATL_B0_RATE_1G  |
+			  HW_ATL_B0_RATE_100M,
+};
 
-static void hw_atl_b0_destroy(struct aq_hw_s *self)
-{
-	kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
+	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+	.media_type = AQ_HW_MEDIA_TYPE_TP,
+	.link_speed_msk = HW_ATL_B0_RATE_2G5 |
+			  HW_ATL_B0_RATE_1G  |
+			  HW_ATL_B0_RATE_100M,
+};
 
 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
 {
 	int err = 0;
 
-	glb_glb_reg_res_dis_set(self, 1U);
-	pci_pci_reg_res_dis_set(self, 0U);
-	rx_rx_reg_res_dis_set(self, 0U);
-	tx_tx_reg_res_dis_set(self, 0U);
-
-	HW_ATL_FLUSH();
-	glb_soft_res_set(self, 1);
-
-	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
-	if (err < 0)
-		goto err_exit;
-
-	itr_irq_reg_res_dis_set(self, 0U);
-	itr_res_irq_set(self, 1U);
-
-	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
-	if (err < 0)
-		goto err_exit;
+	err = hw_atl_utils_soft_reset(self);
+	if (err)
+		return err;
 
-	hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+	self->aq_fw_ops->set_state(self, MPI_RESET);
 
 	err = aq_hw_err_from_flags(self);
 
-err_exit:
 	return err;
 }
 
@@ -100,51 +106,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 	bool is_rx_flow_control = false;
 
 	/* TPS Descriptor rate init */
-	tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
-	tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
 
 	/* TPS VM init */
-	tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
 
 	/* TPS TC credits init */
-	tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
-	tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
 
-	tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
-	tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
-	tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
-	tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
 
 	/* Tx buf size */
 	buff_size = HW_ATL_B0_TXBUF_MAX;
 
-	tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	tpb_tx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 66U) /
-					    100U, tc);
-	tpb_tx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 50U) /
-					    100U, tc);
+	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 50U) /
+						   100U, tc);
 
 	/* QoS Rx buf size per TC */
 	tc = 0;
 	is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
 	buff_size = HW_ATL_B0_RXBUF_MAX;
 
-	rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	rpb_rx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 66U) /
-					    100U, tc);
-	rpb_rx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 50U) /
-					    100U, tc);
-	rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 50U) /
+						   100U, tc);
+	hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
 
 	/* QoS 802.1p priority -> TC mapping */
 	for (i_priority = 8U; i_priority--;)
-		rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -152,20 +160,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
 				     struct aq_rss_parameters *rss_params)
 {
-	struct aq_nic_cfg_s *cfg = NULL;
+	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 	int err = 0;
 	unsigned int i = 0U;
 	unsigned int addr = 0U;
 
-	cfg = self->aq_nic_cfg;
-
 	for (i = 10, addr = 0U; i--; ++addr) {
 		u32 key_data = cfg->is_rss ?
 			__swab32(rss_params->hash_secret_key[i]) : 0U;
-		rpf_rss_key_wr_data_set(self, key_data);
-		rpf_rss_key_addr_set(self, addr);
-		rpf_rss_key_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+		hw_atl_rpf_rss_key_addr_set(self, addr);
+		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -194,11 +201,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
 			((i * 3U) & 0xFU));
 	}
 
-	for (i = AQ_DIMOF(bitary); i--;) {
-		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
-		rpf_rss_redir_tbl_addr_set(self, i);
-		rpf_rss_redir_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+	for (i = ARRAY_SIZE(bitary); i--;) {
+		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+		hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -215,15 +223,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 	unsigned int i;
 
 	/* TX checksums offloads*/
-	tpo_ipv4header_crc_offload_en_set(self, 1);
-	tpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* RX checksums offloads*/
-	rpo_ipv4header_crc_offload_en_set(self, 1);
-	rpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* LSO offloads*/
-	tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
 
 /* LRO offloads */
 	{
@@ -232,43 +240,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 			((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
 
 		for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
-			rpo_lro_max_num_of_descriptors_set(self, val, i);
+			hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
 
-		rpo_lro_time_base_divider_set(self, 0x61AU);
-		rpo_lro_inactive_interval_set(self, 0);
-		rpo_lro_max_coalescing_interval_set(self, 2);
+		hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+		hw_atl_rpo_lro_inactive_interval_set(self, 0);
+		hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
 
-		rpo_lro_qsessions_lim_set(self, 1U);
+		hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
 
-		rpo_lro_total_desc_lim_set(self, 2U);
+		hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
 
-		rpo_lro_patch_optimization_en_set(self, 0U);
+		hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
 
-		rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+		hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
 
-		rpo_lro_pkt_lim_set(self, 1U);
+		hw_atl_rpo_lro_pkt_lim_set(self, 1U);
 
-		rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+		hw_atl_rpo_lro_en_set(self,
+				      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
 	}
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
 {
-	thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
 
 	/* Tx interrupts */
-	tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
 	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
 			0x00010000U : 0x00000000U);
-	tdm_tx_dca_en_set(self, 0U);
-	tdm_tx_dca_mode_set(self, 0U);
+	hw_atl_tdm_tx_dca_en_set(self, 0U);
+	hw_atl_tdm_tx_dca_mode_set(self, 0U);
 
-	tpb_tx_path_scp_ins_en_set(self, 1U);
+	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -279,55 +288,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
 	int i;
 
 	/* Rx TC/RSS number config */
-	rpb_rpf_rx_traf_class_mode_set(self, 1U);
+	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
 
 	/* Rx flow control */
-	rpb_rx_flow_ctl_mode_set(self, 1U);
+	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
 
 	/* RSS Ring selection */
-	reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
 					0xB3333333U : 0x00000000U);
 
 	/* Multicast filters */
 	for (i = HW_ATL_B0_MAC_MAX; i--;) {
-		rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
-		rpfl2unicast_flr_act_set(self, 1U, i);
+		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
 	}
 
-	reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
-	reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
 
 	/* Vlan filters */
-	rpf_vlan_outer_etht_set(self, 0x88A8U);
-	rpf_vlan_inner_etht_set(self, 0x8100U);
+	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
 
 	if (cfg->vlan_id) {
-		rpf_vlan_flr_act_set(self, 1U, 0U);
-		rpf_vlan_id_flr_set(self, 0U, 0U);
-		rpf_vlan_flr_en_set(self, 0U, 0U);
+		hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
+		hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
+		hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
 
-		rpf_vlan_accept_untagged_packets_set(self, 1U);
-		rpf_vlan_untagged_act_set(self, 1U);
+		hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+		hw_atl_rpf_vlan_untagged_act_set(self, 1U);
 
-		rpf_vlan_flr_act_set(self, 1U, 1U);
-		rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
-		rpf_vlan_flr_en_set(self, 1U, 1U);
+		hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
+		hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+		hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
 	} else {
-		rpf_vlan_prom_mode_en_set(self, 1);
+		hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 	}
 
 	/* Rx Interrupts */
-	rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
 	aq_hw_write_reg(self, 0x00005040U,
 			IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
 
-	rpfl2broadcast_flr_act_set(self, 1U);
-	rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
 
-	rdm_rx_dca_en_set(self, 0U);
-	rdm_rx_dca_mode_set(self, 0U);
+	hw_atl_rdm_rx_dca_en_set(self, 0U);
+	hw_atl_rdm_rx_dca_mode_set(self, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -346,10 +355,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 		(mac_addr[4] << 8) | mac_addr[5];
 
-	rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
-	rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
-	rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
-	rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
 
 	err = aq_hw_err_from_flags(self);
 
@@ -357,9 +366,7 @@ err_exit:
 	return err;
 }
 
-static int hw_atl_b0_hw_init(struct aq_hw_s *self,
-			     struct aq_nic_cfg_s *aq_nic_cfg,
-			     u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
 {
 	static u32 aq_hw_atl_igcr_table_[4][2] = {
 		{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -371,51 +378,50 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
 	int err = 0;
 	u32 val;
 
-	self->aq_nic_cfg = aq_nic_cfg;
-
-	hw_atl_utils_hw_chip_features_init(self,
-					   &PHAL_ATLANTIC_B0->chip_features);
+	struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
 
 	hw_atl_b0_hw_init_tx_path(self);
 	hw_atl_b0_hw_init_rx_path(self);
 
 	hw_atl_b0_hw_mac_addr_set(self, mac_addr);
 
-	hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+	self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+	self->aq_fw_ops->set_state(self, MPI_INIT);
 
 	hw_atl_b0_hw_qos_set(self);
 	hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
 	hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 
 	/* Force limit MRRS on RDM/TDM to 2K */
-	val = aq_hw_read_reg(self, pci_reg_control6_adr);
-	aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+	val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+	aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+			(val & ~0x707) | 0x404);
 
 	/* TX DMA total request limit. B0 hardware is not capable to
 	 * handle more than (8K-MRRS) incoming DMA data.
 	 * Value 24 in 256byte units
 	 */
-	aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+	aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
 
 	/* Reset link status and read out initial hardware counters */
 	self->aq_link_status.mbps = 0;
-	hw_atl_utils_update_stats(self);
+	self->aq_fw_ops->update_stats(self);
 
 	err = aq_hw_err_from_flags(self);
 	if (err < 0)
 		goto err_exit;
 
 	/* Interrupts */
-	reg_irq_glb_ctl_set(self,
-			    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+	hw_atl_reg_irq_glb_ctl_set(self,
+				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
 						 [(aq_nic_cfg->vecs > 1U) ?
 						 1 : 0]);
 
-	itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+	hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
 
 	/* Interrupts */
-	reg_gen_irq_map_set(self,
-			    ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+	hw_atl_reg_gen_irq_map_set(self,
+				   ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
 			    ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
 
 	hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
@@ -427,28 +433,28 @@ err_exit:
 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 1, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 1, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
 {
-	tpb_tx_buff_en_set(self, 1);
-	rpb_rx_buff_en_set(self, 1);
+	hw_atl_tpb_tx_buff_en_set(self, 1);
+	hw_atl_rpb_rx_buff_en_set(self, 1);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
-	reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 	return 0;
 }
 
@@ -534,36 +540,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
 	u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	rdm_rx_desc_en_set(self, false, aq_ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
 
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 
-	reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
-					   aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+						  aq_ring->idx);
 
-	reg_rx_dma_desc_base_addressmswset(self,
-					   dma_desc_addr_msw, aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+						  dma_desc_addr_msw, aq_ring->idx);
 
-	rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
-	rdm_rx_desc_data_buff_size_set(self,
-				       AQ_CFG_RX_FRAME_MAX / 1024U,
+	hw_atl_rdm_rx_desc_data_buff_size_set(self,
+					      AQ_CFG_RX_FRAME_MAX / 1024U,
 				       aq_ring->idx);
 
-	rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
-	rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
 
 	/* Rx ring set mode */
 
 	/* Mapping interrupt vector */
-	itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
 
-	rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -575,25 +581,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
 	u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+						  aq_ring->idx);
 
-	reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+						  aq_ring->idx);
 
-	tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
 	hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
 
 	/* Set Tx threshold */
-	tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
 
 	/* Mapping interrupt vector */
-	itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
 
-	tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -614,7 +620,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
 		rxd->hdr_addr = 0U;
 	}
 
-	reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -623,9 +629,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
 	int err = 0;
-	unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+	unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
 
-	if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+	if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 		err = -ENXIO;
 		goto err_exit;
 	}
@@ -728,22 +734,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 
 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_setlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_clearlsw_set(self, LODWORD(mask));
-	itr_irq_status_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
 
-	atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+	atomic_inc(&self->dpc);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 {
-	*mask = itr_irq_statuslsw_get(self);
+	*mask = hw_atl_itr_irq_statuslsw_get(self);
 	return aq_hw_err_from_flags(self);
 }
 
@@ -754,20 +760,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 {
 	unsigned int i = 0U;
 
-	rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
-	rpfl2multicast_flr_en_set(self,
-				  IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+	hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+	hw_atl_rpfl2multicast_flr_en_set(self,
+					 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
 
-	rpfl2_accept_all_mc_packets_set(self,
-					IS_FILTER_ENABLED(IFF_ALLMULTI));
+	hw_atl_rpfl2_accept_all_mc_packets_set(self,
+					       IS_FILTER_ENABLED(IFF_ALLMULTI));
 
-	rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+	hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
 	self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
 
 	for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled &&
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled &&
 				    (i <= self->aq_nic_cfg->mc_list_count)) ?
 				    1U : 0U, i);
 
@@ -796,16 +802,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
 		u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
 					(ar_mac[i][4] << 8) | ar_mac[i][5];
 
-		rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+		hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addresslsw_set(self,
-						 l, HW_ATL_B0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+							l, HW_ATL_B0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addressmsw_set(self,
-						 h, HW_ATL_B0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+							h, HW_ATL_B0_MAC_MIN + i);
 
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled),
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled),
 				    HW_ATL_B0_MAC_MIN + i);
 	}
 
@@ -824,10 +830,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 	switch (self->aq_nic_cfg->itr) {
 	case  AQ_CFG_INTERRUPT_MODERATION_ON:
 	case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
-		tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
-		tdm_tdm_intr_moder_en_set(self, 1U);
-		rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
-		rdm_rdm_intr_moder_en_set(self, 1U);
+		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+		hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+		hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
 
 		if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
 			/* HW timers are in 2us units */
@@ -887,18 +893,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 		}
 		break;
 	case AQ_CFG_INTERRUPT_MODERATION_OFF:
-		tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
-		tdm_tdm_intr_moder_en_set(self, 0U);
-		rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
-		rdm_rdm_intr_moder_en_set(self, 0U);
+		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+		hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+		hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
 		itr_tx = 0U;
 		itr_rx = 0U;
 		break;
 	}
 
 	for (i = HW_ATL_B0_RINGS_MAX; i--;) {
-		reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
-		reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+		hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+		hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
 	}
 
 	return aq_hw_err_from_flags(self);
@@ -913,38 +919,19 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
-static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
-	int err = 0;
-
-	err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
-	if (err < 0)
-		goto err_exit;
-
-err_exit:
-	return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
-	.create               = hw_atl_b0_create,
-	.destroy              = hw_atl_b0_destroy,
-	.get_hw_caps          = hw_atl_b0_get_hw_caps,
-
-	.hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_b0 = {
 	.hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
-	.hw_get_link_status   = hw_atl_utils_mpi_get_link_status,
-	.hw_set_link_speed    = hw_atl_b0_hw_set_speed,
 	.hw_init              = hw_atl_b0_hw_init,
 	.hw_deinit            = hw_atl_utils_hw_deinit,
 	.hw_set_power         = hw_atl_utils_hw_set_power,
@@ -974,21 +961,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
 	.hw_rss_set                  = hw_atl_b0_hw_rss_set,
 	.hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
 	.hw_get_regs                 = hw_atl_utils_hw_get_regs,
-	.hw_update_stats             = hw_atl_utils_update_stats,
 	.hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
 	.hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
-
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
-{
-	bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
-	bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D100) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D107) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D108) ||
-			(pdev->device == HW_ATL_DEVICE_ID_D109));
-
-	bool is_rev_ok = (pdev->revision == 2U);
-
-	return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index a1e1bce6c1f3..2cc8dacfdc27 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -16,19 +16,27 @@
 
 #include "../aq_common.h"
 
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
 
-#define PCI_VENDOR_ID_AQUANTIA  0x1D6A
-#define HW_ATL_DEVICE_ID_0001   0x0001
-#define HW_ATL_DEVICE_ID_D100   0xD100
-#define HW_ATL_DEVICE_ID_D107   0xD107
-#define HW_ATL_DEVICE_ID_D108   0xD108
-#define HW_ATL_DEVICE_ID_D109   0xD109
+#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
 
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
+#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
+#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
 
-#endif
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
 
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109
+
+extern const struct aq_hw_ops hw_atl_ops_b0;
+
+#define hw_atl_ops_b1 hw_atl_ops_b0
 
 #endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 9aa2c6edfca2..405d1455c222 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,70 +142,6 @@
 #define HW_ATL_INTR_MODER_MAX  0x1FF
 #define HW_ATL_INTR_MODER_MIN  0xFF
 
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
-	u64 buf_addr;
-	u32 ctl;
-	u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
-	u32 rsvd;
-	u32 len;
-	u32 ctl;
-	u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
-	u64 buf_addr;
-	u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
-	u32 type;
-	u32 rss_hash;
-	u16 status;
-	u16 pkt_len;
-	u16 next_desc_ptr;
-	u16 vlan;
-};
-
 /* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
-	.ports = 1U,
-	.is_64_dma = true,
-	.msix_irqs = 4U,
-	.irq_mask = ~0U,
-	.vecs = HW_ATL_B0_RSS_MAX,
-	.tcs = HW_ATL_B0_TC_MAX,
-	.rxd_alignment = 1U,
-	.rxd_size = HW_ATL_B0_RXD_SIZE,
-	.rxds = 8U * 1024U,
-	.txd_alignment = 1U,
-	.txd_size = HW_ATL_B0_TXD_SIZE,
-	.txds = 8U * 1024U,
-	.txhwb_alignment = 4096U,
-	.tx_rings = HW_ATL_B0_TX_RINGS,
-	.rx_rings = HW_ATL_B0_RX_RINGS,
-	.hw_features = NETIF_F_HW_CSUM |
-			NETIF_F_RXCSUM |
-			NETIF_F_RXHASH |
-			NETIF_F_SG |
-			NETIF_F_TSO |
-			NETIF_F_LRO,
-	.hw_priv_flags = IFF_UNICAST_FLT,
-	.link_speed_msk = (HW_ATL_B0_RATE_10G |
-			HW_ATL_B0_RATE_5G |
-			HW_ATL_B0_RATE_2G5 |
-			HW_ATL_B0_RATE_1G |
-			HW_ATL_B0_RATE_100M),
-	.flow_control = true,
-	.mtu = HW_ATL_B0_MTU_JUMBO,
-	.mac_regs_count = 88,
-	.fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
-};
 
 #endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3de651afa8c7..10ba035dadb1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -16,111 +16,115 @@
 #include "../aq_hw_utils.h"
 
 /* global */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+				u32 semaphore)
 {
-	aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+	aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
 }
 
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
 {
-	return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
 }
 
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
-			    glb_reg_res_dis_msk,
-			    glb_reg_res_dis_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+			    HW_ATL_GLB_REG_RES_DIS_MSK,
+			    HW_ATL_GLB_REG_RES_DIS_SHIFT,
 			    glb_reg_res_dis);
 }
 
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
 {
-	aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
-			    glb_soft_res_shift, soft_res);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+			    HW_ATL_GLB_SOFT_RES_MSK,
+			    HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
 }
 
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
-				  glb_soft_res_msk,
-				  glb_soft_res_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+				  HW_ATL_GLB_SOFT_RES_MSK,
+				  HW_ATL_GLB_SOFT_RES_SHIFT);
 }
 
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
 }
 
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
 }
 
 /* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
 }
 
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
 }
 
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
 }
 
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
 }
 
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
 }
 
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
 }
 
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
 }
 
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
 }
 
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
 }
 
 /* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_auto_masklsw)
 {
-	aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
 }
 
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+				  u32 rx)
 {
 /* register address for bitfield imr_rx{r}_en */
 	static u32 itr_imr_rxren_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_rx{r}_en */
@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
 			    irq_map_en_rx);
 }
 
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+				  u32 tx)
 {
 /* register address for bitfield imr_tx{t}_en */
 	static u32 itr_imr_txten_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_tx{t}_en */
@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
 			    irq_map_en_tx);
 }
 
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
 {
 /* register address for bitfield imr_rx{r}[4:0] */
 	static u32 itr_imr_rxr_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_rx{r}[4:0] */
 	static u32 itr_imr_rxr_msk[32] = {
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
 		};
 
 /* lower bit position of bitfield imr_rx{r}[4:0] */
@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
 			    irq_map_rx);
 }
 
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
 {
 /* register address for bitfield imr_tx{t}[4:0] */
 	static u32 itr_imr_txt_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_tx{t}[4:0] */
 	static u32 itr_imr_txt_msk[32] = {
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
 		};
 
 /* lower bit position of bitfield imr_tx{t}[4:0] */
@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
 			    irq_map_tx);
 }
 
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_msk_clearlsw)
 {
-	aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
 }
 
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
 {
-	aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
 }
 
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
-			    itr_reg_res_dsbl_msk,
-			    itr_reg_res_dsbl_shift, irq_reg_res_dis);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+			    HW_ATL_ITR_REG_RES_DSBL_MSK,
+			    HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
 }
 
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
-				 u32 irq_status_clearlsw)
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+					u32 irq_status_clearlsw)
 {
-	aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
 }
 
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
 }
 
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
-				  itr_res_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+				  HW_ATL_ITR_RES_SHIFT);
 }
 
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
 {
-	aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
-			    itr_res_shift, res_irq);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+			    HW_ATL_ITR_RES_SHIFT, res_irq);
 }
 
 /* rdm */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
-			    rdm_dcadcpuid_msk,
-			    rdm_dcadcpuid_shift, cpuid);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+			    HW_ATL_RDM_DCADCPUID_MSK,
+			    HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
 }
 
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
-			    rdm_dca_en_shift, rx_dca_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+			    HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
 }
 
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
-			    rdm_dca_mode_shift, rx_dca_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+			    HW_ATL_RDM_DCA_MODE_MSK,
+			    HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
 }
 
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_data_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_data_buff_size,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
-			    rdm_descddata_size_msk,
-			    rdm_descddata_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+			    HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+			    HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
 			    rx_desc_data_buff_size);
 }
 
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+				   u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
-			    rdm_dcaddesc_en_msk,
-			    rdm_dcaddesc_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+			    HW_ATL_RDM_DCADDESC_EN_MSK,
+			    HW_ATL_RDM_DCADDESC_EN_SHIFT,
 			    rx_desc_dca_en);
 }
 
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+			       u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
-			    rdm_descden_msk,
-			    rdm_descden_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+			    HW_ATL_RDM_DESCDEN_MSK,
+			    HW_ATL_RDM_DESCDEN_SHIFT,
 			    rx_desc_en);
 }
 
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_buff_size,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
-			    rdm_descdhdr_size_msk,
-			    rdm_descdhdr_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+			    HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+			    HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
 			    rx_desc_head_buff_size);
 }
 
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_splitting, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_splitting,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
-			    rdm_descdhdr_split_msk,
-			    rdm_descdhdr_split_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+			    HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+			    HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
 			    rx_desc_head_splitting);
 }
 
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
 {
-	return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
-				  rdm_descdhd_msk, rdm_descdhd_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+				  HW_ATL_RDM_DESCDHD_MSK,
+				  HW_ATL_RDM_DESCDHD_SHIFT);
 }
 
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+				u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
-			    rdm_descdlen_msk, rdm_descdlen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+			    HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
 			    rx_desc_len);
 }
 
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+				u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
-			    rdm_descdreset_msk, rdm_descdreset_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+			    HW_ATL_RDM_DESCDRESET_MSK,
+			    HW_ATL_RDM_DESCDRESET_SHIFT,
 			    rx_desc_res);
 }
 
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 rx_desc_wr_wb_irq_en)
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 rx_desc_wr_wb_irq_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
-			    rdm_int_desc_wrb_en_msk,
-			    rdm_int_desc_wrb_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+			    HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+			    HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
 			    rx_desc_wr_wb_irq_en);
 }
 
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+				   u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
-			    rdm_dcadhdr_en_msk,
-			    rdm_dcadhdr_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+			    HW_ATL_RDM_DCADHDR_EN_MSK,
+			    HW_ATL_RDM_DCADHDR_EN_SHIFT,
 			    rx_head_dca_en);
 }
 
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+				  u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
-			    rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+			    HW_ATL_RDM_DCADPAY_EN_MSK,
+			    HW_ATL_RDM_DCADPAY_EN_SHIFT,
 			    rx_pld_dca_en);
 }
 
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 rdm_intr_moder_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
-			    rdm_int_rim_en_msk,
-			    rdm_int_rim_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+			    HW_ATL_RDM_INT_RIM_EN_MSK,
+			    HW_ATL_RDM_INT_RIM_EN_SHIFT,
 			    rdm_intr_moder_en);
 }
 
 /* reg */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+				u32 regidx)
 {
-	aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+	aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
 }
 
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
 }
 
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
 {
-	aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+	aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
 }
 
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
 {
-	aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+	aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
 }
 
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrlsw,
-					u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrlsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
 			rx_dma_desc_base_addrlsw);
 }
 
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrmsw,
-					u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrmsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
 			rx_dma_desc_base_addrmsw);
 }
 
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
 {
-	return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+	return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
 }
 
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 rx_dma_desc_tail_ptr,
+					 u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
 			rx_dma_desc_tail_ptr);
 }
 
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+					u32 rx_flr_mcst_flr_msk)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+			rx_flr_mcst_flr_msk);
 }
 
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
-			     u32 filter)
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+				    u32 filter)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+			rx_flr_mcst_flr);
 }
 
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+				       u32 rx_flr_rss_control1)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+			rx_flr_rss_control1);
 }
 
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+				    u32 rx_filter_control2)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
 }
 
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 rx_intr_moderation_ctl,
-				u32 queue)
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 rx_intr_moderation_ctl,
+				       u32 queue)
 {
-	aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
 			rx_intr_moderation_ctl);
 }
 
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+				     u32 tx_dma_debug_ctl)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
 }
 
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrlsw,
-					u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrlsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
 			tx_dma_desc_base_addrlsw);
 }
 
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrmsw,
-					u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrmsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
 			tx_dma_desc_base_addrmsw);
 }
 
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 tx_dma_desc_tail_ptr,
+					 u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
 			tx_dma_desc_tail_ptr);
 }
 
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 tx_intr_moderation_ctl,
-				u32 queue)
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 tx_intr_moderation_ctl,
+				       u32 queue)
 {
-	aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
 			tx_intr_moderation_ctl);
 }
 
 /* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
-			    rpb_dma_sys_lbk_msk,
-			    rpb_dma_sys_lbk_shift, dma_sys_lbk);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+			    HW_ATL_RPB_DMA_SYS_LBK_MSK,
+			    HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
 }
 
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
-				    u32 rx_traf_class_mode)
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+					   u32 rx_traf_class_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
-			    rpb_rpf_rx_tc_mode_msk,
-			    rpb_rpf_rx_tc_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+			    HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+			    HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
 			    rx_traf_class_mode);
 }
 
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
-			    rpb_rx_buf_en_shift, rx_buff_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+			    HW_ATL_RPB_RX_BUF_EN_MSK,
+			    HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
 }
 
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_hi_threshold_per_tc,
-					 u32 buffer)
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_hi_threshold_per_tc,
+						u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
-			    rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+			    HW_ATL_RPB_RXBHI_THRESH_MSK,
+			    HW_ATL_RPB_RXBHI_THRESH_SHIFT,
 			    rx_buff_hi_threshold_per_tc);
 }
 
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_lo_threshold_per_tc,
-					 u32 buffer)
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_lo_threshold_per_tc,
+						u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
-			    rpb_rxblo_thresh_msk,
-			    rpb_rxblo_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+			    HW_ATL_RPB_RXBLO_THRESH_MSK,
+			    HW_ATL_RPB_RXBLO_THRESH_SHIFT,
 			    rx_buff_lo_threshold_per_tc);
 }
 
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
-			    rpb_rx_fc_mode_msk,
-			    rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+			    HW_ATL_RPB_RX_FC_MODE_MSK,
+			    HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
 }
 
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 rx_pkt_buff_size_per_tc, u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
-			    rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+			    HW_ATL_RPB_RXBBUF_SIZE_MSK,
+			    HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
 			    rx_pkt_buff_size_per_tc);
 }
 
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
-			       u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+				      u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
-			    rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+			    HW_ATL_RPB_RXBXOFF_EN_MSK,
+			    HW_ATL_RPB_RXBXOFF_EN_SHIFT,
 			    rx_xoff_en_per_tc);
 }
 
 /* rpf */
 
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
-					u32 l2broadcast_count_threshold)
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+					       u32 l2broadcast_count_threshold)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
-			    rpfl2bc_thresh_msk,
-			    rpfl2bc_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+			    HW_ATL_RPFL2BC_THRESH_MSK,
+			    HW_ATL_RPFL2BC_THRESH_SHIFT,
 			    l2broadcast_count_threshold);
 }
 
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
-			    rpfl2bc_en_shift, l2broadcast_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+			    HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
 }
 
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+				       u32 l2broadcast_flr_act)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
-			    rpfl2bc_act_shift, l2broadcast_flr_act);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+			    HW_ATL_RPFL2BC_ACT_MSK,
+			    HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
 }
 
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
-			       u32 filter)
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+				      u32 l2multicast_flr_en,
+				      u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
-			    rpfl2mc_enf_msk,
-			    rpfl2mc_enf_shift, l2multicast_flr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+			    HW_ATL_RPFL2MC_ENF_MSK,
+			    HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
 }
 
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
-				  u32 l2promiscuous_mode_en)
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+					 u32 l2promiscuous_mode_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
-			    rpfl2promis_mode_msk,
-			    rpfl2promis_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+			    HW_ATL_RPFL2PROMIS_MODE_MSK,
+			    HW_ATL_RPFL2PROMIS_MODE_SHIFT,
 			    l2promiscuous_mode_en);
 }
 
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
-			      u32 filter)
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+				     u32 l2unicast_flr_act,
+				     u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
-			    rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+			    HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
 			    l2unicast_flr_act);
 }
 
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
-			 u32 filter)
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
-			    rpfl2uc_enf_msk,
-			    rpfl2uc_enf_shift, l2unicast_flr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+			    HW_ATL_RPFL2UC_ENF_MSK,
+			    HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
 }
 
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addresslsw,
-				      u32 filter)
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addresslsw,
+					     u32 filter)
 {
-	aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+	aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
 			l2unicast_dest_addresslsw);
 }
 
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addressmsw,
-				      u32 filter)
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addressmsw,
+					     u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
-			    rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+			    HW_ATL_RPFL2UC_DAFMSW_MSK,
+			    HW_ATL_RPFL2UC_DAFMSW_SHIFT,
 			    l2unicast_dest_addressmsw);
 }
 
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
-				     u32 l2_accept_all_mc_packets)
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+					    u32 l2_accept_all_mc_packets)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
-			    rpfl2mc_accept_all_msk,
-			    rpfl2mc_accept_all_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+			    HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+			    HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
 			    l2_accept_all_mc_packets);
 }
 
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
-				      u32 user_priority_tc_map, u32 tc)
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+					     u32 user_priority_tc_map, u32 tc)
 {
 /* register address for bitfield rx_tc_up{t}[2:0] */
 	static u32 rpf_rpb_rx_tc_upt_adr[8] = {
-			0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
-			0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+			0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+			0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
 		};
 
 /* bitmask for bitfield rx_tc_up{t}[2:0] */
@@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
 			    user_priority_tc_map);
 }
 
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
-			    rpf_rss_key_addr_msk,
-			    rpf_rss_key_addr_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+			    HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+			    HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
 			    rss_key_addr);
 }
 
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
 {
-	aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
 			rss_key_wr_data);
 }
 
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
-				  rpf_rss_key_wr_eni_msk,
-				  rpf_rss_key_wr_eni_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+				  HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+				  HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
 }
 
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
-			    rpf_rss_key_wr_eni_msk,
-			    rpf_rss_key_wr_eni_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+			    HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+			    HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
 			    rss_key_wr_en);
 }
 
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+				       u32 rss_redir_tbl_addr)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
-			    rpf_rss_redir_addr_msk,
-			    rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+			    HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+			    HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+			    rss_redir_tbl_addr);
 }
 
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
-				   u32 rss_redir_tbl_wr_data)
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+					  u32 rss_redir_tbl_wr_data)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
-			    rpf_rss_redir_wr_data_msk,
-			    rpf_rss_redir_wr_data_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+			    HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+			    HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
 			    rss_redir_tbl_wr_data);
 }
 
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
-				  rpf_rss_redir_wr_eni_msk,
-				  rpf_rss_redir_wr_eni_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+				  HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+				  HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
 }
 
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
-			    rpf_rss_redir_wr_eni_msk,
-			    rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+			    HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+			    HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
 }
 
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+				       u32 tpo_to_rpf_sys_lbk)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
-			    rpf_tpo_rpf_sys_lbk_msk,
-			    rpf_tpo_rpf_sys_lbk_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+			    HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+			    HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
 			    tpo_to_rpf_sys_lbk);
 }
 
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
-			    rpf_vl_inner_tpid_msk,
-			    rpf_vl_inner_tpid_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+			    HW_ATL_RPF_VL_INNER_TPID_MSK,
+			    HW_ATL_RPF_VL_INNER_TPID_SHIFT,
 			    vlan_inner_etht);
 }
 
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
-			    rpf_vl_outer_tpid_msk,
-			    rpf_vl_outer_tpid_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+			    HW_ATL_RPF_VL_OUTER_TPID_MSK,
+			    HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
 			    vlan_outer_etht);
 }
 
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_prom_mode_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
-			    rpf_vl_promis_mode_msk,
-			    rpf_vl_promis_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+			    HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+			    HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
 			    vlan_prom_mode_en);
 }
 
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-					  u32 vlan_accept_untagged_packets)
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+						 u32 vlan_acc_untagged_packets)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
-			    rpf_vl_accept_untagged_mode_msk,
-			    rpf_vl_accept_untagged_mode_shift,
-			    vlan_accept_untagged_packets);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+			    vlan_acc_untagged_packets);
 }
 
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_untagged_act)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
-			    rpf_vl_untagged_act_msk,
-			    rpf_vl_untagged_act_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+			    HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+			    HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
 			    vlan_untagged_act);
 }
 
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
-			    rpf_vl_en_f_msk,
-			    rpf_vl_en_f_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+			    HW_ATL_RPF_VL_EN_F_MSK,
+			    HW_ATL_RPF_VL_EN_F_SHIFT,
 			    vlan_flr_en);
 }
 
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+				 u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
-			    rpf_vl_act_f_msk,
-			    rpf_vl_act_f_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+			    HW_ATL_RPF_VL_ACT_F_MSK,
+			    HW_ATL_RPF_VL_ACT_F_SHIFT,
 			    vlan_flr_act);
 }
 
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
-			    rpf_vl_id_f_msk,
-			    rpf_vl_id_f_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+			    HW_ATL_RPF_VL_ID_F_MSK,
+			    HW_ATL_RPF_VL_ID_F_SHIFT,
 			    vlan_id_flr);
 }
 
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
-			    rpf_et_enf_msk,
-			    rpf_et_enf_shift, etht_flr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+			    HW_ATL_RPF_ET_ENF_MSK,
+			    HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
 }
 
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-				   u32 etht_user_priority_en, u32 filter)
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+					  u32 etht_user_priority_en, u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
-			    rpf_et_upfen_msk, rpf_et_upfen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+			    HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
 			    etht_user_priority_en);
 }
 
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
-			      u32 filter)
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+				     u32 etht_rx_queue_en,
+				     u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
-			    rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+			    HW_ATL_RPF_ET_RXQFEN_MSK,
+			    HW_ATL_RPF_ET_RXQFEN_SHIFT,
 			    etht_rx_queue_en);
 }
 
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
-				u32 filter)
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+				       u32 etht_user_priority,
+				       u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
-			    rpf_et_upf_msk,
-			    rpf_et_upf_shift, etht_user_priority);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+			    HW_ATL_RPF_ET_UPF_MSK,
+			    HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
 }
 
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-			   u32 filter)
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+				  u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
-			    rpf_et_rxqf_msk,
-			    rpf_et_rxqf_shift, etht_rx_queue);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+			    HW_ATL_RPF_ET_RXQF_MSK,
+			    HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
 }
 
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-			    u32 filter)
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+				   u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
-			    rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+			    HW_ATL_RPF_ET_MNG_RXQF_MSK,
+			    HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
 			    etht_mgt_queue);
 }
 
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+				 u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
-			    rpf_et_actf_msk,
-			    rpf_et_actf_shift, etht_flr_act);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+			    HW_ATL_RPF_ET_ACTF_MSK,
+			    HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
 }
 
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
-			    rpf_et_valf_msk,
-			    rpf_et_valf_shift, etht_flr);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+			    HW_ATL_RPF_ET_VALF_MSK,
+			    HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
 }
 
 /* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en)
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
-			    rpo_ipv4chk_en_msk,
-			    rpo_ipv4chk_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+			    HW_ATL_RPO_IPV4CHK_EN_MSK,
+			    HW_ATL_RPO_IPV4CHK_EN_SHIFT,
 			    ipv4header_crc_offload_en);
 }
 
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_vlan_stripping, u32 descriptor)
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_vlan_stripping,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
-			    rpo_descdvl_strip_msk,
-			    rpo_descdvl_strip_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+			    HW_ATL_RPO_DESCDVL_STRIP_MSK,
+			    HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
 			    rx_desc_vlan_stripping);
 }
 
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en)
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
-			    rpol4chk_en_shift, tcp_udp_crc_offload_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+			    HW_ATL_RPOL4CHK_EN_MSK,
+			    HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
 }
 
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
 {
-	aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+	aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
 }
 
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
-				       u32 lro_patch_optimization_en)
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+					      u32 lro_patch_optimization_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
-			    rpo_lro_ptopt_en_msk,
-			    rpo_lro_ptopt_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+			    HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+			    HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
 			    lro_patch_optimization_en);
 }
 
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
-			       u32 lro_qsessions_lim)
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+				      u32 lro_qsessions_lim)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
-			    rpo_lro_qses_lmt_msk,
-			    rpo_lro_qses_lmt_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+			    HW_ATL_RPO_LRO_QSES_LMT_MSK,
+			    HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
 			    lro_qsessions_lim);
 }
 
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+				       u32 lro_total_desc_lim)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
-			    rpo_lro_tot_dsc_lmt_msk,
-			    rpo_lro_tot_dsc_lmt_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+			    HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+			    HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
 			    lro_total_desc_lim);
 }
 
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lro_min_pld_of_first_pkt)
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lro_min_pld_of_first_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
-			    rpo_lro_pkt_min_msk,
-			    rpo_lro_pkt_min_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+			    HW_ATL_RPO_LRO_PKT_MIN_MSK,
+			    HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
 			    lro_min_pld_of_first_pkt);
 }
 
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
 {
-	aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+	aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
 }
 
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
-					u32 lro_max_number_of_descriptors,
-					u32 lro)
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+					       u32 lro_max_number_of_descriptors,
+					       u32 lro)
 {
 /* Register address for bitfield lro{L}_des_max[1:0] */
 	static u32 rpo_lro_ldes_max_adr[32] = {
@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
 			    lro_max_number_of_descriptors);
 }
 
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
-				   u32 lro_time_base_divider)
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+					  u32 lro_time_base_divider)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
-			    rpo_lro_tb_div_msk,
-			    rpo_lro_tb_div_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+			    HW_ATL_RPO_LRO_TB_DIV_MSK,
+			    HW_ATL_RPO_LRO_TB_DIV_SHIFT,
 			    lro_time_base_divider);
 }
 
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
-				   u32 lro_inactive_interval)
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+					  u32 lro_inactive_interval)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
-			    rpo_lro_ina_ival_msk,
-			    rpo_lro_ina_ival_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+			    HW_ATL_RPO_LRO_INA_IVAL_MSK,
+			    HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
 			    lro_inactive_interval);
 }
 
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
-					 u32 lro_max_coalescing_interval)
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+						u32 lro_max_coal_interval)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
-			    rpo_lro_max_ival_msk,
-			    rpo_lro_max_ival_shift,
-			    lro_max_coalescing_interval);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+			    HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+			    HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+			    lro_max_coal_interval);
 }
 
 /* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
-			    rx_reg_res_dsbl_msk,
-			    rx_reg_res_dsbl_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+			    HW_ATL_RX_REG_RES_DSBL_MSK,
+			    HW_ATL_RX_REG_RES_DSBL_SHIFT,
 			    rx_reg_res_dis);
 }
 
 /* tdm */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
-			    tdm_dcadcpuid_msk,
-			    tdm_dcadcpuid_shift, cpuid);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+			    HW_ATL_TDM_DCADCPUID_MSK,
+			    HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
 }
 
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
-				   u32 large_send_offload_en)
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+					  u32 large_send_offload_en)
 {
-	aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+	aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
 }
 
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
-			    tdm_dca_en_shift, tx_dca_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+			    HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
 }
 
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
-			    tdm_dca_mode_shift, tx_dca_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+			    HW_ATL_TDM_DCA_MODE_MSK,
+			    HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
 }
 
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+				   u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
-			    tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+			    HW_ATL_TDM_DCADDESC_EN_MSK,
+			    HW_ATL_TDM_DCADDESC_EN_SHIFT,
 			    tx_desc_dca_en);
 }
 
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+			       u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
-			    tdm_descden_msk,
-			    tdm_descden_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+			    HW_ATL_TDM_DESCDEN_MSK,
+			    HW_ATL_TDM_DESCDEN_SHIFT,
 			    tx_desc_en);
 }
 
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
 {
-	return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
-				  tdm_descdhd_msk, tdm_descdhd_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+				  HW_ATL_TDM_DESCDHD_MSK,
+				  HW_ATL_TDM_DESCDHD_SHIFT);
 }
 
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
-			 u32 descriptor)
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+				u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
-			    tdm_descdlen_msk,
-			    tdm_descdlen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+			    HW_ATL_TDM_DESCDLEN_MSK,
+			    HW_ATL_TDM_DESCDLEN_SHIFT,
 			    tx_desc_len);
 }
 
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 tx_desc_wr_wb_irq_en)
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 tx_desc_wr_wb_irq_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
-			    tdm_int_desc_wrb_en_msk,
-			    tdm_int_desc_wrb_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+			    HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+			    HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
 			    tx_desc_wr_wb_irq_en);
 }
 
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
-				     u32 tx_desc_wr_wb_threshold,
-				     u32 descriptor)
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+					    u32 tx_desc_wr_wb_threshold,
+					    u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
-			    tdm_descdwrb_thresh_msk,
-			    tdm_descdwrb_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+			    HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+			    HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
 			    tx_desc_wr_wb_threshold);
 }
 
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-			       u32 tdm_irq_moderation_en)
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 tdm_irq_moderation_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
-			    tdm_int_mod_en_msk,
-			    tdm_int_mod_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+			    HW_ATL_TDM_INT_MOD_EN_MSK,
+			    HW_ATL_TDM_INT_MOD_EN_SHIFT,
 			    tdm_irq_moderation_en);
 }
 
 /* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				       u32 lso_tcp_flag_of_first_pkt)
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					      u32 lso_tcp_flag_of_first_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
-			    thm_lso_tcp_flag_first_msk,
-			    thm_lso_tcp_flag_first_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+			    HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+			    HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
 			    lso_tcp_flag_of_first_pkt);
 }
 
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lso_tcp_flag_of_last_pkt)
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lso_tcp_flag_of_last_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
-			    thm_lso_tcp_flag_last_msk,
-			    thm_lso_tcp_flag_last_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+			    HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+			    HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
 			    lso_tcp_flag_of_last_pkt);
 }
 
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
-					u32 lso_tcp_flag_of_middle_pkt)
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+					       u32 lso_tcp_flag_of_middle_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
-			    thm_lso_tcp_flag_mid_msk,
-			    thm_lso_tcp_flag_mid_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+			    HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+			    HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
 			    lso_tcp_flag_of_middle_pkt);
 }
 
 /* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
-			    tpb_tx_buf_en_shift, tx_buff_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+			    HW_ATL_TPB_TX_BUF_EN_MSK,
+			    HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
 }
 
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_hi_threshold_per_tc,
 					 u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
-			    tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+			    HW_ATL_TPB_TXBHI_THRESH_MSK,
+			    HW_ATL_TPB_TXBHI_THRESH_SHIFT,
 			    tx_buff_hi_threshold_per_tc);
 }
 
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_lo_threshold_per_tc,
 					 u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
-			    tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+			    HW_ATL_TPB_TXBLO_THRESH_MSK,
+			    HW_ATL_TPB_TXBLO_THRESH_SHIFT,
 			    tx_buff_lo_threshold_per_tc);
 }
 
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
-			    tpb_dma_sys_lbk_msk,
-			    tpb_dma_sys_lbk_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+			    HW_ATL_TPB_DMA_SYS_LBK_MSK,
+			    HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
 			    tx_dma_sys_lbk_en);
 }
 
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 tx_pkt_buff_size_per_tc, u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
-			    tpb_txbbuf_size_msk,
-			    tpb_txbbuf_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+			    HW_ATL_TPB_TXBBUF_SIZE_MSK,
+			    HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
 			    tx_pkt_buff_size_per_tc);
 }
 
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
-			    tpb_tx_scp_ins_en_msk,
-			    tpb_tx_scp_ins_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+			    HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+			    HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
 			    tx_path_scp_ins_en);
 }
 
 /* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en)
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
-			    tpo_ipv4chk_en_msk,
-			    tpo_ipv4chk_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+			    HW_ATL_TPO_IPV4CHK_EN_MSK,
+			    HW_ATL_TPO_IPV4CHK_EN_SHIFT,
 			    ipv4header_crc_offload_en);
 }
 
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en)
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
-			    tpol4chk_en_msk,
-			    tpol4chk_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+			    HW_ATL_TPOL4CHK_EN_MSK,
+			    HW_ATL_TPOL4CHK_EN_SHIFT,
 			    tcp_udp_crc_offload_en);
 }
 
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+				      u32 tx_pkt_sys_lbk_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
-			    tpo_pkt_sys_lbk_msk,
-			    tpo_pkt_sys_lbk_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+			    HW_ATL_TPO_PKT_SYS_LBK_MSK,
+			    HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
 			    tx_pkt_sys_lbk_en);
 }
 
 /* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_data_arb_mode)
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_data_arb_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
-			    tps_data_tc_arb_mode_msk,
-			    tps_data_tc_arb_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+			    HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+			    HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
 			    tx_pkt_shed_data_arb_mode);
 }
 
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
-						 u32 curr_time_res)
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+							u32 curr_time_res)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
-			    tps_desc_rate_ta_rst_msk,
-			    tps_desc_rate_ta_rst_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+			    HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+			    HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
 			    curr_time_res);
 }
 
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_desc_rate_lim)
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_desc_rate_lim)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
-			    tps_desc_rate_lim_msk,
-			    tps_desc_rate_lim_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+			    HW_ATL_TPS_DESC_RATE_LIM_MSK,
+			    HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
 			    tx_pkt_shed_desc_rate_lim);
 }
 
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_tc_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
-			    tps_desc_tc_arb_mode_msk,
-			    tps_desc_tc_arb_mode_shift,
-			    tx_pkt_shed_desc_tc_arb_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+			    HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+			    HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+			    arb_mode);
 }
 
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_desc_tc_max_credit,
-					    u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
+						   u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
-			    tps_desc_tctcredit_max_msk,
-			    tps_desc_tctcredit_max_shift,
-			    tx_pkt_shed_desc_tc_max_credit);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+			    HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+			    HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+			    max_credit);
 }
 
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_desc_tc_weight,
+					       u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
-			    tps_desc_tctweight_msk,
-			    tps_desc_tctweight_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+			    HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+			    HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
 			    tx_pkt_shed_desc_tc_weight);
 }
 
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_vm_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
-			    tps_desc_vm_arb_mode_msk,
-			    tps_desc_vm_arb_mode_shift,
-			    tx_pkt_shed_desc_vm_arb_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+			    HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+			    HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+			    arb_mode);
 }
 
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_tc_data_max_credit,
-					    u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
+						   u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
-			    tps_data_tctcredit_max_msk,
-			    tps_data_tctcredit_max_shift,
-			    tx_pkt_shed_tc_data_max_credit);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+			    HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+			    HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+			    max_credit);
 }
 
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_tc_data_weight,
+					       u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
-			    tps_data_tctweight_msk,
-			    tps_data_tctweight_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+			    HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+			    HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
 			    tx_pkt_shed_tc_data_weight);
 }
 
 /* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
-			    tx_reg_res_dsbl_msk,
-			    tx_reg_res_dsbl_shift, tx_reg_res_dis);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+			    HW_ATL_TX_REG_RES_DSBL_MSK,
+			    HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
 }
 
 /* msm */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
-				  msm_reg_access_busy_msk,
-				  msm_reg_access_busy_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+				  HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+				  HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
 }
 
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					u32 reg_addr_for_indirect_addr)
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+					       u32 reg_addr_for_indirect_addr)
 {
-	aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
-			    msm_reg_addr_msk,
-			    msm_reg_addr_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+			    HW_ATL_MSM_REG_ADDR_MSK,
+			    HW_ATL_MSM_REG_ADDR_SHIFT,
 			    reg_addr_for_indirect_addr);
 }
 
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
 {
-	aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
-			    msm_reg_rd_strobe_msk,
-			    msm_reg_rd_strobe_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+			    HW_ATL_MSM_REG_RD_STROBE_MSK,
+			    HW_ATL_MSM_REG_RD_STROBE_SHIFT,
 			    reg_rd_strobe);
 }
 
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
 }
 
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
 {
-	aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+	aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
 }
 
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
 {
-	aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
-			    msm_reg_wr_strobe_msk,
-			    msm_reg_wr_strobe_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+			    HW_ATL_MSM_REG_WR_STROBE_MSK,
+			    HW_ATL_MSM_REG_WR_STROBE_SHIFT,
 			    reg_wr_strobe);
 }
 
 /* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
-			    pci_reg_res_dsbl_msk,
-			    pci_reg_res_dsbl_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+			    HW_ATL_PCI_REG_RES_DSBL_MSK,
+			    HW_ATL_PCI_REG_RES_DSBL_SHIFT,
 			    pci_reg_res_dis);
 }
 
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
-				 u32 scratch_scp)
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+					u32 glb_cpu_scratch_scp,
+					u32 scratch_scp)
 {
-	aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+	aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
 			glb_cpu_scratch_scp);
 }
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index ed1085b95adb..dfb426f2dc2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -21,657 +21,681 @@ struct aq_hw_s;
 /* global */
 
 /* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,	u32 glb_cpu_sem,
-			 u32 semaphore);
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,	u32 glb_cpu_sem,
+				u32 semaphore);
 
 /* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
 
 /* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
 
 /* set soft reset */
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
 
 /* get soft reset */
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
 
 /* stats */
 
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
 
 /* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
 
 /* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
 
 /* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
 
 /* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
 
 /* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
 
 /* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
 
 /* interrupt */
 
 /* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_auto_masklsw);
 
 /* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+				  u32 rx);
 
 /* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+				  u32 tx);
 
 /* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
 
 /* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
 
 /* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_msk_clearlsw);
 
 /* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
 
 /* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
 
 /* set interrupt status clear lsw */
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
-				 u32 irq_status_clearlsw);
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+					u32 irq_status_clearlsw);
 
 /* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
 
 /* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
 
 /* set reset interrupt */
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
 
 /* rdm */
 
 /* set cpu id */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
 
 /* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
 
 /* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
 
 /* set rx descriptor data buffer size */
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_data_buff_size,
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_data_buff_size,
 				    u32 descriptor);
 
 /* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
-			    u32 dca);
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+				   u32 dca);
 
 /* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
-			u32 descriptor);
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+			       u32 descriptor);
 
 /* set rx descriptor header splitting */
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_splitting,
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_splitting,
 				    u32 descriptor);
 
 /* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
 
 /* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
-			 u32 descriptor);
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+				u32 descriptor);
 
 /* set rx descriptor write-back interrupt enable */
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 rx_desc_wr_wb_irq_en);
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 rx_desc_wr_wb_irq_en);
 
 /* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
-			    u32 dca);
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+				   u32 dca);
 
 /* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+				  u32 dca);
 
 /* set rx descriptor header buffer size */
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_buff_size,
-				    u32 descriptor);
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_buff_size,
+					   u32 descriptor);
 
 /* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
-			 u32 descriptor);
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+				u32 descriptor);
 
 /* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 rdm_intr_moder_en);
 
 /* reg */
 
 /* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+				u32 regidx);
 
 /* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
 
 /* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
 
 /* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
 
 /* set rx dma descriptor base address lsw */
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrlsw,
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrlsw,
 					u32 descriptor);
 
 /* set rx dma descriptor base address msw */
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrmsw,
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrmsw,
 					u32 descriptor);
 
 /* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
 
 /* set rx dma descriptor tail pointer register */
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 rx_dma_desc_tail_ptr,
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 rx_dma_desc_tail_ptr,
 				  u32 descriptor);
 
 /* set rx filter multicast filter mask register */
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
-				 u32 rx_flr_mcst_flr_msk);
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+					u32 rx_flr_mcst_flr_msk);
 
 /* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
-			     u32 filter);
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+				    u32 filter);
 
 /* set rx filter rss control register 1 */
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
-				u32 rx_flr_rss_control1);
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+				       u32 rx_flr_rss_control1);
 
 /* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
 
 /* Set RX Interrupt Moderation Control Register */
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 rx_intr_moderation_ctl,
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 rx_intr_moderation_ctl,
 				u32 queue);
 
 /* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+				     u32 tx_dma_debug_ctl);
 
 /* set tx dma descriptor base address lsw */
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrlsw,
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrlsw,
 					u32 descriptor);
 
 /* set tx dma descriptor base address msw */
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrmsw,
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrmsw,
 					u32 descriptor);
 
 /* set tx dma descriptor tail pointer register */
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 tx_dma_desc_tail_ptr,
-				  u32 descriptor);
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 tx_dma_desc_tail_ptr,
+					 u32 descriptor);
 
 /* Set TX Interrupt Moderation Control Register */
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 tx_intr_moderation_ctl,
-				u32 queue);
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 tx_intr_moderation_ctl,
+				       u32 queue);
 
 /* set global microprocessor scratch pad */
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
-				 u32 glb_cpu_scratch_scp, u32 scratch_scp);
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+					u32 glb_cpu_scratch_scp,
+					u32 scratch_scp);
 
 /* rpb */
 
 /* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
 
 /* set rx traffic class mode */
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
-				    u32 rx_traf_class_mode);
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+					   u32 rx_traf_class_mode);
 
 /* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
 
 /* set rx buffer high threshold (per tc) */
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_hi_threshold_per_tc,
-					 u32 buffer);
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_hi_threshold_per_tc,
+						u32 buffer);
 
 /* set rx buffer low threshold (per tc) */
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_lo_threshold_per_tc,
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_lo_threshold_per_tc,
 					 u32 buffer);
 
 /* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
 
 /* set rx packet buffer size (per tc) */
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 rx_pkt_buff_size_per_tc,
-				     u32 buffer);
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 rx_pkt_buff_size_per_tc,
+					    u32 buffer);
 
 /* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
-			       u32 buffer);
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+				      u32 buffer);
 
 /* rpf */
 
 /* set l2 broadcast count threshold */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
-					u32 l2broadcast_count_threshold);
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+					       u32 l2broadcast_count_threshold);
 
 /* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
 
 /* set l2 broadcast filter action */
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
-				u32 l2broadcast_flr_act);
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+				       u32 l2broadcast_flr_act);
 
 /* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
-			       u32 filter);
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+				      u32 l2multicast_flr_en,
+				      u32 filter);
 
 /* set l2 promiscuous mode enable */
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
-				  u32 l2promiscuous_mode_en);
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+					 u32 l2promiscuous_mode_en);
 
 /* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
-			      u32 filter);
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+				     u32 l2unicast_flr_act,
+				     u32 filter);
 
 /* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
-			 u32 filter);
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+				u32 filter);
 
 /* set l2 unicast destination address lsw */
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addresslsw,
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addresslsw,
 				      u32 filter);
 
 /* set l2 unicast destination address msw */
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addressmsw,
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addressmsw,
 				      u32 filter);
 
 /* Set L2 Accept all Multicast packets */
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
-				     u32 l2_accept_all_mc_packets);
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+					    u32 l2_accept_all_mc_packets);
 
 /* set user-priority tc mapping */
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
-				      u32 user_priority_tc_map, u32 tc);
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+					     u32 user_priority_tc_map, u32 tc);
 
 /* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
 
 /* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
 
 /* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
 
 /* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
 
 /* set rss redirection table address */
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
-				u32 rss_redir_tbl_addr);
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+				       u32 rss_redir_tbl_addr);
 
 /* set rss redirection table write data */
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
-				   u32 rss_redir_tbl_wr_data);
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+					  u32 rss_redir_tbl_wr_data);
 
 /* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
 
 /* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
 
 /* set tpo to rpf system loopback */
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
-				u32 tpo_to_rpf_sys_lbk);
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+				       u32 tpo_to_rpf_sys_lbk);
 
 /* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
 
 /* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
 
 /* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_prom_mode_en);
 
 /* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_untagged_act);
 
 /* Set VLAN accept untagged packets */
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-					  u32 vlan_accept_untagged_packets);
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+						 u32 vlan_acc_untagged_packets);
 
 /* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+				u32 filter);
 
 /* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
-			  u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+				 u32 filter);
 
 /* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+				u32 filter);
 
 /* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+				u32 filter);
 
 /* set  ethertype user-priority enable */
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-				   u32 etht_user_priority_en, u32 filter);
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+					  u32 etht_user_priority_en,
+					  u32 filter);
 
 /* set  ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
-			      u32 filter);
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+				     u32 etht_rx_queue_en,
+				     u32 filter);
 
 /* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-			   u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+				  u32 filter);
 
 /* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
-				u32 filter);
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+				       u32 etht_user_priority,
+				       u32 filter);
 
 /* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-			    u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+				   u32 filter);
 
 /* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
-			  u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+				 u32 filter);
 
 /* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
 
 /* rpo */
 
 /* set ipv4 header checksum offload enable */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en);
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en);
 
 /* set rx descriptor vlan stripping */
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_vlan_stripping,
-				    u32 descriptor);
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_vlan_stripping,
+					   u32 descriptor);
 
 /* set tcp/udp checksum offload enable */
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en);
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en);
 
 /* Set LRO Patch Optimization Enable. */
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
-				       u32 lro_patch_optimization_en);
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+					      u32 lro_patch_optimization_en);
 
 /* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
 
 /* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+				      u32 lro_qsessions_lim);
 
 /* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+				       u32 lro_total_desc_lim);
 
 /* Set LRO Min Payload of First Packet */
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lro_min_pld_of_first_pkt);
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lro_min_pld_of_first_pkt);
 
 /* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
 
 /* Set LRO Max Number of Descriptors */
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
-					u32 lro_max_desc_num, u32 lro);
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+					       u32 lro_max_desc_num, u32 lro);
 
 /* Set LRO Time Base Divider */
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
-				   u32 lro_time_base_divider);
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+					  u32 lro_time_base_divider);
 
 /*Set LRO Inactive Interval */
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
-				   u32 lro_inactive_interval);
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+					  u32 lro_inactive_interval);
 
 /*Set LRO Max Coalescing Interval */
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
-					 u32 lro_max_coalescing_interval);
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+						u32 lro_max_coal_interval);
 
 /* rx */
 
 /* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
 
 /* tdm */
 
 /* set cpu id */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
 
 /* set large send offload enable */
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
-				   u32 large_send_offload_en);
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+					  u32 large_send_offload_en);
 
 /* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+			       u32 descriptor);
 
 /* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
 
 /* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
 
 /* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+				   u32 dca);
 
 /* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
 
 /* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
-			 u32 descriptor);
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+				u32 descriptor);
 
 /* set tx descriptor write-back interrupt enable */
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 tx_desc_wr_wb_irq_en);
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 tx_desc_wr_wb_irq_en);
 
 /* set tx descriptor write-back threshold */
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
-				     u32 tx_desc_wr_wb_threshold,
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+					    u32 tx_desc_wr_wb_threshold,
 				     u32 descriptor);
 
 /* Set TDM Interrupt Moderation Enable */
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-			       u32 tdm_irq_moderation_en);
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 tdm_irq_moderation_en);
 /* thm */
 
 /* set lso tcp flag of first packet */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				       u32 lso_tcp_flag_of_first_pkt);
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					      u32 lso_tcp_flag_of_first_pkt);
 
 /* set lso tcp flag of last packet */
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lso_tcp_flag_of_last_pkt);
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lso_tcp_flag_of_last_pkt);
 
 /* set lso tcp flag of middle packet */
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
-					u32 lso_tcp_flag_of_middle_pkt);
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+					       u32 lso_tcp_flag_of_middle_pkt);
 
 /* tpb */
 
 /* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
 
 /* set tx buffer high threshold (per tc) */
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_hi_threshold_per_tc,
 					 u32 buffer);
 
 /* set tx buffer low threshold (per tc) */
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_lo_threshold_per_tc,
 					 u32 buffer);
 
 /* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
 
 /* set tx packet buffer size (per tc) */
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 tx_pkt_buff_size_per_tc, u32 buffer);
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 tx_pkt_buff_size_per_tc, u32 buffer);
 
 /* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
 
 /* tpo */
 
 /* set ipv4 header checksum offload enable */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en);
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en);
 
 /* set tcp/udp checksum offload enable */
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en);
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en);
 
 /* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+				      u32 tx_pkt_sys_lbk_en);
 
 /* tps */
 
 /* set tx packet scheduler data arbitration mode */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_data_arb_mode);
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_data_arb_mode);
 
 /* set tx packet scheduler descriptor rate current time reset */
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
-						 u32 curr_time_res);
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+							u32 curr_time_res);
 
 /* set tx packet scheduler descriptor rate limit */
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_desc_rate_lim);
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_desc_rate_lim);
 
 /* set tx packet scheduler descriptor tc arbitration mode */
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_tc_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode);
 
 /* set tx packet scheduler descriptor tc max credit */
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_desc_tc_max_credit,
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
 					    u32 tc);
 
 /* set tx packet scheduler descriptor tc weight */
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_desc_tc_weight,
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_desc_tc_weight,
 					u32 tc);
 
 /* set tx packet scheduler descriptor vm arbitration mode */
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_vm_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode);
 
 /* set tx packet scheduler tc data max credit */
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_tc_data_max_credit,
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
 					    u32 tc);
 
 /* set tx packet scheduler tc data weight */
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_tc_data_weight,
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_tc_data_weight,
 					u32 tc);
 
 /* tx */
 
 /* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
 
 /* msm */
 
 /* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
 
 /* set  register address for indirect address */
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					u32 reg_addr_for_indirect_addr);
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+					       u32 reg_addr_for_indirect_addr);
 
 /* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
 
 /* get  register read data */
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
 
 /* set  register write data */
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
 
 /* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
 
 /* pci */
 
 /* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
 
 #endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 93450ec930e8..e0cf70120f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -18,91 +18,91 @@
  * base address: 0x000003a0
  * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
  */
-#define glb_cpu_sem_adr(semaphore)  (0x000003a0u + (semaphore) * 0x4)
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore)  (0x000003a0u + (semaphore) * 0x4)
 /* register address for bitfield rx dma good octet counter lsw [1f:0] */
-#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
 /* register address for bitfield rx dma good packet counter lsw [1f:0] */
-#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
 /* register address for bitfield tx dma good octet counter lsw [1f:0] */
-#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
 /* register address for bitfield tx dma good packet counter lsw [1f:0] */
-#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
 
 /* register address for bitfield rx dma good octet counter msw [3f:20] */
-#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
 /* register address for bitfield rx dma good packet counter msw [3f:20] */
-#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
 /* register address for bitfield tx dma good octet counter msw [3f:20] */
-#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
 /* register address for bitfield tx dma good packet counter msw [3f:20] */
-#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
 
 /* preprocessor definitions for msm rx errors counter register */
-#define mac_msm_rx_errs_cnt_adr 0x00000120u
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
 
 /* preprocessor definitions for msm rx unicast frames counter register */
-#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
 
 /* preprocessor definitions for msm rx multicast frames counter register */
-#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
 
 /* preprocessor definitions for msm rx broadcast frames counter register */
-#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
 
 /* preprocessor definitions for msm rx broadcast octets counter register 1 */
-#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
 
 /* preprocessor definitions for msm rx broadcast octets counter register 2 */
-#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
 
 /* preprocessor definitions for msm rx unicast octets counter register 0 */
-#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
 
 /* preprocessor definitions for rx dma statistics counter 7 */
-#define rx_dma_stat_counter7_adr 0x00006818u
+#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
 
 /* preprocessor definitions for msm tx unicast frames counter register */
-#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
 
 /* preprocessor definitions for msm tx multicast frames counter register */
-#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
 
 /* preprocessor definitions for global mif identification */
-#define glb_mif_id_adr 0x0000001cu
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
 
 /* register address for bitfield iamr_lsw[1f:0] */
-#define itr_iamrlsw_adr 0x00002090
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
 /* register address for bitfield rx dma drop packet counter [1f:0] */
-#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
 
 /* register address for bitfield imcr_lsw[1f:0] */
-#define itr_imcrlsw_adr 0x00002070
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
 /* register address for bitfield imsr_lsw[1f:0] */
-#define itr_imsrlsw_adr 0x00002060
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
 /* register address for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_adr 0x00002300
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
 /* bitmask for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
 /* lower bit position of bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_shift 29
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
 /* register address for bitfield iscr_lsw[1f:0] */
-#define itr_iscrlsw_adr 0x00002050
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
 /* register address for bitfield isr_lsw[1f:0] */
-#define itr_isrlsw_adr 0x00002000
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
 /* register address for bitfield itr_reset */
-#define itr_res_adr 0x00002300
+#define HW_ATL_ITR_RES_ADR 0x00002300
 /* bitmask for bitfield itr_reset */
-#define itr_res_msk 0x80000000
+#define HW_ATL_ITR_RES_MSK 0x80000000
 /* lower bit position of bitfield itr_reset */
-#define itr_res_shift 31
+#define HW_ATL_ITR_RES_SHIFT 31
 /* register address for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
 /* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_shift 0
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
 /* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
 
 /* rx dca_en bitfield definitions
  * preprocessor definitions for the bitfield "dca_en".
@@ -110,17 +110,17 @@
  */
 
 /* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
 /* bitmask for bitfield dca_en */
-#define rdm_dca_en_msk 0x80000000
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca_en */
-#define rdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca_en */
-#define rdm_dca_en_shift 31
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
 /* width of bitfield dca_en */
-#define rdm_dca_en_width 1
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
 /* default value of bitfield dca_en */
-#define rdm_dca_en_default 0x1
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
 
 /* rx dca_mode[3:0] bitfield definitions
  * preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -128,17 +128,17 @@
  */
 
 /* register address for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_adr 0x00006180
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
 /* bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_msk 0x0000000f
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
 /* inverted bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
 /* lower bit position of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_shift 0
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
 /* width of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_width 4
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
 /* default value of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_default 0x0
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
 
 /* rx desc{d}_data_size[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
@@ -147,17 +147,18 @@
  */
 
 /* register address for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+	(0x00005b18 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_msk 0x0000001f
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
 /* inverted bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_mskn 0xffffffe0
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
 /* lower bit position of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_shift 0
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
 /* width of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_width 5
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
 /* default value of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_default 0x0
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
 
 /* rx dca{d}_desc_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -166,17 +167,17 @@
  */
 
 /* register address for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_shift 31
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
 /* width of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_width 1
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
 /* default value of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_default 0x0
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
 
 /* rx desc{d}_en bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_en".
@@ -185,17 +186,17 @@
  */
 
 /* register address for bitfield desc{d}_en */
-#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_en */
-#define rdm_descden_msk 0x80000000
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
 /* inverted bitmask for bitfield desc{d}_en */
-#define rdm_descden_mskn 0x7fffffff
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
 /* lower bit position of bitfield desc{d}_en */
-#define rdm_descden_shift 31
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
 /* width of bitfield desc{d}_en */
-#define rdm_descden_width 1
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
 /* default value of bitfield desc{d}_en */
-#define rdm_descden_default 0x0
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
 
 /* rx desc{d}_hdr_size[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
@@ -204,17 +205,18 @@
  */
 
 /* register address for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+	(0x00005b18 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_msk 0x00001f00
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
 /* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_mskn 0xffffe0ff
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
 /* lower bit position of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_shift 8
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
 /* width of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_width 5
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
 /* default value of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
 
 /* rx desc{d}_hdr_split bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hdr_split".
@@ -223,17 +225,18 @@
  */
 
 /* register address for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+	(0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_msk 0x10000000
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
 /* inverted bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_mskn 0xefffffff
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
 /* lower bit position of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_shift 28
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
 /* width of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_width 1
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
 /* default value of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
 
 /* rx desc{d}_hd[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -242,15 +245,15 @@
  */
 
 /* register address for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_msk 0x00001fff
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
 /* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_mskn 0xffffe000
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
 /* lower bit position of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_shift 0
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
 /* width of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_width 13
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
 
 /* rx desc{d}_len[9:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -259,17 +262,17 @@
  */
 
 /* register address for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_msk 0x00001ff8
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
 /* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_mskn 0xffffe007
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
 /* lower bit position of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_shift 3
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
 /* width of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_width 10
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
 /* default value of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_default 0x0
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
 
 /* rx desc{d}_reset bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_reset".
@@ -278,17 +281,17 @@
  */
 
 /* register address for bitfield desc{d}_reset */
-#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_msk 0x02000000
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
 /* inverted bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_mskn 0xfdffffff
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
 /* lower bit position of bitfield desc{d}_reset */
-#define rdm_descdreset_shift 25
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
 /* width of bitfield desc{d}_reset */
-#define rdm_descdreset_width 1
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
 /* default value of bitfield desc{d}_reset */
-#define rdm_descdreset_default 0x0
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
 
 /* rx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -296,17 +299,17 @@
  */
 
 /* register address for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_adr 0x00005a30
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
 /* bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_msk 0x00000004
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
 /* inverted bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
 /* lower bit position of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_shift 2
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
 /* width of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_width 1
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
 /* default value of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
 
 /* rx dca{d}_hdr_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_hdr_en".
@@ -315,17 +318,17 @@
  */
 
 /* register address for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_msk 0x40000000
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
 /* inverted bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_mskn 0xbfffffff
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
 /* lower bit position of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_shift 30
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
 /* width of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_width 1
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
 /* default value of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_default 0x0
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
 
 /* rx dca{d}_pay_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_pay_en".
@@ -334,17 +337,17 @@
  */
 
 /* register address for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_msk 0x20000000
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
 /* inverted bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_mskn 0xdfffffff
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
 /* lower bit position of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_shift 29
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
 /* width of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_width 1
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
 /* default value of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_default 0x0
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
 
 /* RX rdm_int_rim_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "rdm_int_rim_en".
@@ -352,51 +355,51 @@
  */
 
 /* Register address for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_adr 0x00005A30
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
 /* Bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_msk 0x00000008
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
 /* Inverted bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_mskn 0xFFFFFFF7
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
 /* Lower bit position of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_shift 3
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
 /* Width of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_width 1
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
 /* Default value of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_default 0x0
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
 
 /* general interrupt mapping register definitions
  * preprocessor definitions for general interrupt mapping register
  * base address: 0x00002180
  * parameter: regidx {f} | stride size 0x4 | range [0, 3]
  */
-#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
 
 /* general interrupt status register definitions
  * preprocessor definitions for general interrupt status register
  * address: 0x000021A0
  */
 
-#define gen_intr_stat_adr 0x000021A4U
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
 
 /* interrupt global control register  definitions
  * preprocessor definitions for interrupt global control register
  * address: 0x00002300
  */
-#define intr_glb_ctl_adr 0x00002300u
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
 
 /* interrupt throttle register definitions
  * preprocessor definitions for interrupt throttle register
  * base address: 0x00002800
  * parameter: throttle {t} | stride size 0x4 | range [0, 31]
  */
-#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
 
 /* rx dma descriptor base address lsw definitions
  * preprocessor definitions for rx dma descriptor base address lsw
  * base address: 0x00005b00
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
 (0x00005b00u + (descriptor) * 0x20)
 
 /* rx dma descriptor base address msw definitions
@@ -404,7 +407,7 @@
  * base address: 0x00005b04
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
 (0x00005b04u + (descriptor) * 0x20)
 
 /* rx dma descriptor status register definitions
@@ -412,46 +415,48 @@
  * base address: 0x00005b14
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+	(0x00005b14u + (descriptor) * 0x20)
 
 /* rx dma descriptor tail pointer register definitions
  * preprocessor definitions for rx dma descriptor tail pointer register
  * base address: 0x00005b10
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+	(0x00005b10u + (descriptor) * 0x20)
 
 /* rx interrupt moderation control register definitions
  * Preprocessor definitions for RX Interrupt Moderation Control Register
  * Base Address: 0x00005A40
  * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
  */
-#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
 
 /* rx filter multicast filter mask register definitions
  * preprocessor definitions for rx filter multicast filter mask register
  * address: 0x00005270
  */
-#define rx_flr_mcst_flr_msk_adr 0x00005270u
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
 
 /* rx filter multicast filter register definitions
  * preprocessor definitions for rx filter multicast filter register
  * base address: 0x00005250
  * parameter: filter {f} | stride size 0x4 | range [0, 7]
  */
-#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
 
 /* RX Filter RSS Control Register 1 Definitions
  * Preprocessor definitions for RX Filter RSS Control Register 1
  * Address: 0x000054C0
  */
-#define rx_flr_rss_control1_adr 0x000054C0u
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
 
 /* RX Filter Control Register 2 Definitions
  * Preprocessor definitions for RX Filter Control Register 2
  * Address: 0x00005104
  */
-#define rx_flr_control2_adr 0x00005104u
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
 
 /* tx tx dma debug control [1f:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
@@ -459,24 +464,24 @@
  */
 
 /* register address for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_adr 0x00008920
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
 /* bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
 /* inverted bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
 /* lower bit position of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_shift 0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
 /* width of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_width 32
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
 /* default value of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_default 0x0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
 
 /* tx dma descriptor base address lsw definitions
  * preprocessor definitions for tx dma descriptor base address lsw
  * base address: 0x00007c00
  * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
  */
-#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
 	(0x00007c00u + (descriptor) * 0x40)
 
 /* tx dma descriptor tail pointer register definitions
@@ -484,7 +489,8 @@
  * base address: 0x00007c10
  *  parameter: descriptor {d} | stride size 0x40 | range [0, 31]
  */
-#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+	(0x00007c10u + (descriptor) * 0x40)
 
 /* rx dma_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -492,17 +498,17 @@
  */
 
 /* register address for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_adr 0x00005000
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
 /* bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
 /* inverted bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
 /* lower bit position of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_shift 6
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
 /* width of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_width 1
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
 /* default value of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_default 0x0
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
 
 /* rx rx_tc_mode bitfield definitions
  * preprocessor definitions for the bitfield "rx_tc_mode".
@@ -510,17 +516,17 @@
  */
 
 /* register address for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_adr 0x00005700
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
 /* bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_msk 0x00000100
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
 /* inverted bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
 /* lower bit position of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_shift 8
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
 /* width of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_width 1
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
 /* default value of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_default 0x0
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
 
 /* rx rx_buf_en bitfield definitions
  * preprocessor definitions for the bitfield "rx_buf_en".
@@ -528,17 +534,17 @@
  */
 
 /* register address for bitfield rx_buf_en */
-#define rpb_rx_buf_en_adr 0x00005700
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
 /* bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_msk 0x00000001
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
 /* inverted bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_mskn 0xfffffffe
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield rx_buf_en */
-#define rpb_rx_buf_en_shift 0
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
 /* width of bitfield rx_buf_en */
-#define rpb_rx_buf_en_width 1
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
 /* default value of bitfield rx_buf_en */
-#define rpb_rx_buf_en_default 0x0
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
 
 /* rx rx{b}_hi_thresh[d:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
@@ -547,17 +553,17 @@
  */
 
 /* register address for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_msk 0x3fff0000
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
 /* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_mskn 0xc000ffff
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
 /* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_shift 16
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
 /* width of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_width 14
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
 /* default value of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_default 0x0
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
 
 /* rx rx{b}_lo_thresh[d:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
@@ -566,17 +572,17 @@
  */
 
 /* register address for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_msk 0x00003fff
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
 /* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_mskn 0xffffc000
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
 /* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_shift 0
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
 /* width of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_width 14
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
 /* default value of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_default 0x0
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
 
 /* rx rx_fc_mode[1:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
@@ -584,17 +590,17 @@
  */
 
 /* register address for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_adr 0x00005700
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
 /* bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_msk 0x00000030
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
 /* inverted bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_mskn 0xffffffcf
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
 /* lower bit position of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_shift 4
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
 /* width of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_width 2
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
 /* default value of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_default 0x0
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
 
 /* rx rx{b}_buf_size[8:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
@@ -603,17 +609,17 @@
  */
 
 /* register address for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_msk 0x000001ff
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
 /* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_mskn 0xfffffe00
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
 /* lower bit position of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_shift 0
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
 /* width of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_width 9
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
 /* default value of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_default 0x0
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
 
 /* rx rx{b}_xoff_en bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_xoff_en".
@@ -622,17 +628,17 @@
  */
 
 /* register address for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_msk 0x80000000
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
 /* inverted bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_mskn 0x7fffffff
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_shift 31
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
 /* width of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_width 1
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
 /* default value of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_default 0x0
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
 
 /* rx l2_bc_thresh[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
@@ -640,17 +646,17 @@
  */
 
 /* register address for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_adr 0x00005100
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
 /* bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_msk 0xffff0000
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
 /* inverted bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_mskn 0x0000ffff
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
 /* lower bit position of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_shift 16
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
 /* width of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_width 16
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
 /* default value of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_default 0x0
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
 
 /* rx l2_bc_en bitfield definitions
  * preprocessor definitions for the bitfield "l2_bc_en".
@@ -658,17 +664,17 @@
  */
 
 /* register address for bitfield l2_bc_en */
-#define rpfl2bc_en_adr 0x00005100
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
 /* bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_msk 0x00000001
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
 /* inverted bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_mskn 0xfffffffe
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield l2_bc_en */
-#define rpfl2bc_en_shift 0
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
 /* width of bitfield l2_bc_en */
-#define rpfl2bc_en_width 1
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
 /* default value of bitfield l2_bc_en */
-#define rpfl2bc_en_default 0x0
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
 
 /* rx l2_bc_act[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
@@ -676,17 +682,17 @@
  */
 
 /* register address for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_adr 0x00005100
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
 /* bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_msk 0x00007000
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
 /* inverted bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_mskn 0xffff8fff
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
 /* lower bit position of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_shift 12
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
 /* width of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_width 3
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
 /* default value of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_default 0x0
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
 
 /* rx l2_mc_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "l2_mc_en{f}".
@@ -695,17 +701,17 @@
  */
 
 /* register address for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
 /* bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_msk 0x80000000
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
 /* inverted bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
 /* lower bit position of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_shift 31
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
 /* width of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_width 1
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
 /* default value of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_default 0x0
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
 
 /* rx l2_promis_mode bitfield definitions
  * preprocessor definitions for the bitfield "l2_promis_mode".
@@ -713,17 +719,17 @@
  */
 
 /* register address for bitfield l2_promis_mode */
-#define rpfl2promis_mode_adr 0x00005100
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
 /* bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_msk 0x00000008
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
 /* inverted bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_mskn 0xfffffff7
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
 /* lower bit position of bitfield l2_promis_mode */
-#define rpfl2promis_mode_shift 3
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
 /* width of bitfield l2_promis_mode */
-#define rpfl2promis_mode_width 1
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
 /* default value of bitfield l2_promis_mode */
-#define rpfl2promis_mode_default 0x0
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
 
 /* rx l2_uc_act{f}[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
@@ -732,17 +738,17 @@
  */
 
 /* register address for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
 /* bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_msk 0x00070000
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
 /* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_mskn 0xfff8ffff
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
 /* lower bit position of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_shift 16
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
 /* width of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_width 3
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
 /* default value of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_default 0x0
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
 
 /* rx l2_uc_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "l2_uc_en{f}".
@@ -751,26 +757,26 @@
  */
 
 /* register address for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
 /* bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_msk 0x80000000
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
 /* inverted bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
 /* lower bit position of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_shift 31
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
 /* width of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_width 1
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
 /* default value of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_default 0x0
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
 
 /* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
-#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
 /* register address for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
 /* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_msk 0x0000ffff
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
 /* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_shift 0
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
 
 /* rx l2_mc_accept_all bitfield definitions
  * Preprocessor definitions for the bitfield "l2_mc_accept_all".
@@ -778,22 +784,22 @@
  */
 
 /* Register address for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_adr 0x00005270
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
 /* Bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_msk 0x00004000
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
 /* Inverted bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
 /* Lower bit position of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_shift 14
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
 /* Width of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_width 1
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
 /* Default value of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_default 0x0
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
 
 /* width of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_width 3
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
 /* default value of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_default 0x0
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
 
 /* rx rss_key_addr[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
@@ -801,17 +807,17 @@
  */
 
 /* register address for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
 /* bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_msk 0x0000001f
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
 /* inverted bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_mskn 0xffffffe0
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
 /* lower bit position of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_shift 0
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
 /* width of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_width 5
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
 /* default value of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_default 0x0
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
 
 /* rx rss_key_wr_data[1f:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
@@ -819,17 +825,17 @@
  */
 
 /* register address for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_adr 0x000054d4
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
 /* bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_msk 0xffffffff
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
 /* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_mskn 0x00000000
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
 /* lower bit position of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_shift 0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
 /* width of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_width 32
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
 /* default value of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
 
 /* rx rss_key_wr_en_i bitfield definitions
  * preprocessor definitions for the bitfield "rss_key_wr_en_i".
@@ -837,17 +843,17 @@
  */
 
 /* register address for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
 /* bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_msk 0x00000020
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
 /* inverted bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
 /* lower bit position of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_shift 5
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
 /* width of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_width 1
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
 /* default value of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
 
 /* rx rss_redir_addr[3:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
@@ -855,17 +861,17 @@
  */
 
 /* register address for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
 /* bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_msk 0x0000000f
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
 /* inverted bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_mskn 0xfffffff0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
 /* lower bit position of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_shift 0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
 /* width of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_width 4
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
 /* default value of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
 
 /* rx rss_redir_wr_data[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
@@ -873,17 +879,17 @@
  */
 
 /* register address for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_adr 0x000054e4
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
 /* bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_msk 0x0000ffff
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
 /* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_mskn 0xffff0000
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
 /* lower bit position of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_shift 0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
 /* width of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_width 16
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
 /* default value of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
 
 /* rx rss_redir_wr_en_i bitfield definitions
  * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
@@ -891,17 +897,17 @@
  */
 
 /* register address for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
 /* bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_msk 0x00000010
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
 /* inverted bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
 /* lower bit position of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_shift 4
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
 /* width of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_width 1
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
 /* default value of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
 
 /* rx tpo_rpf_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
@@ -909,17 +915,17 @@
  */
 
 /* register address for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
 /* bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
 /* inverted bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
 /* lower bit position of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_shift 8
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
 /* width of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_width 1
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
 /* default value of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_default 0x0
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
 
 /* rx vl_inner_tpid[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
@@ -927,17 +933,17 @@
  */
 
 /* register address for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
 /* bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_msk 0x0000ffff
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
 /* inverted bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_mskn 0xffff0000
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
 /* lower bit position of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_shift 0
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
 /* width of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_width 16
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
 /* default value of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_default 0x8100
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
 
 /* rx vl_outer_tpid[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
@@ -945,17 +951,17 @@
  */
 
 /* register address for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
 /* bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_msk 0xffff0000
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
 /* inverted bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_mskn 0x0000ffff
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
 /* lower bit position of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_shift 16
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
 /* width of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_width 16
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
 /* default value of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_default 0x88a8
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
 
 /* rx vl_promis_mode bitfield definitions
  * preprocessor definitions for the bitfield "vl_promis_mode".
@@ -963,17 +969,17 @@
  */
 
 /* register address for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
 /* bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_msk 0x00000002
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
 /* inverted bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_mskn 0xfffffffd
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
 /* lower bit position of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_shift 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
 /* width of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_width 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
 /* default value of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_default 0x0
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
 
 /* RX vl_accept_untagged_mode Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
@@ -981,17 +987,17 @@
  */
 
 /* Register address for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
 /* Bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_msk 0x00000004
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
 /* Inverted bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
 /* Lower bit position of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_shift 2
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
 /* Width of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_width 1
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
 /* Default value of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_default 0x0
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
 
 /* rX vl_untagged_act[2:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
@@ -999,17 +1005,17 @@
  */
 
 /* Register address for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_adr 0x00005280
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
 /* Bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_msk 0x00000038
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
 /* Inverted bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
 /* Lower bit position of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_shift 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
 /* Width of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_width 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
 /* Default value of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_default 0x0
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
 
 /* RX vl_en{F} Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_en{F}".
@@ -1018,17 +1024,17 @@
  */
 
 /* Register address for bitfield vl_en{F} */
-#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
 /* Bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_msk 0x80000000
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
 /* Inverted bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield vl_en{F} */
-#define rpf_vl_en_f_shift 31
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
 /* Width of bitfield vl_en{F} */
-#define rpf_vl_en_f_width 1
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
 /* Default value of bitfield vl_en{F} */
-#define rpf_vl_en_f_default 0x0
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
 
 /* RX vl_act{F}[2:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
@@ -1037,17 +1043,17 @@
  */
 
 /* Register address for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
 /* Bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_msk 0x00070000
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
 /* Inverted bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_mskn 0xFFF8FFFF
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
 /* Lower bit position of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_shift 16
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
 /* Width of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_width 3
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
 /* Default value of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_default 0x0
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
 
 /* RX vl_id{F}[B:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
@@ -1056,17 +1062,17 @@
  */
 
 /* Register address for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
 /* Bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_msk 0x00000FFF
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
 /* Inverted bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_mskn 0xFFFFF000
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
 /* Lower bit position of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_shift 0
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
 /* Width of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_width 12
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
 /* Default value of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_default 0x0
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
 
 /* RX et_en{F} Bitfield Definitions
  * Preprocessor definitions for the bitfield "et_en{F}".
@@ -1075,17 +1081,17 @@
  */
 
 /* Register address for bitfield et_en{F} */
-#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* Bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_msk 0x80000000
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
 /* Inverted bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield et_en{F} */
-#define rpf_et_en_f_shift 31
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
 /* Width of bitfield et_en{F} */
-#define rpf_et_en_f_width 1
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
 /* Default value of bitfield et_en{F} */
-#define rpf_et_en_f_default 0x0
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
 
 /* rx et_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "et_en{f}".
@@ -1094,17 +1100,17 @@
  */
 
 /* register address for bitfield et_en{f} */
-#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_en{f} */
-#define rpf_et_enf_msk 0x80000000
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
 /* inverted bitmask for bitfield et_en{f} */
-#define rpf_et_enf_mskn 0x7fffffff
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
 /* lower bit position of bitfield et_en{f} */
-#define rpf_et_enf_shift 31
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
 /* width of bitfield et_en{f} */
-#define rpf_et_enf_width 1
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
 /* default value of bitfield et_en{f} */
-#define rpf_et_enf_default 0x0
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
 
 /* rx et_up{f}_en bitfield definitions
  * preprocessor definitions for the bitfield "et_up{f}_en".
@@ -1113,17 +1119,17 @@
  */
 
 /* register address for bitfield et_up{f}_en */
-#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_msk 0x40000000
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
 /* inverted bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_mskn 0xbfffffff
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
 /* lower bit position of bitfield et_up{f}_en */
-#define rpf_et_upfen_shift 30
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
 /* width of bitfield et_up{f}_en */
-#define rpf_et_upfen_width 1
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
 /* default value of bitfield et_up{f}_en */
-#define rpf_et_upfen_default 0x0
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
 
 /* rx et_rxq{f}_en bitfield definitions
  * preprocessor definitions for the bitfield "et_rxq{f}_en".
@@ -1132,17 +1138,17 @@
  */
 
 /* register address for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_msk 0x20000000
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
 /* inverted bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_mskn 0xdfffffff
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
 /* lower bit position of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_shift 29
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
 /* width of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_width 1
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
 /* default value of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_default 0x0
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
 
 /* rx et_up{f}[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_up{f}[2:0]".
@@ -1151,17 +1157,17 @@
  */
 
 /* register address for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_msk 0x1c000000
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
 /* inverted bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_mskn 0xe3ffffff
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
 /* lower bit position of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_shift 26
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
 /* width of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_width 3
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
 /* default value of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_default 0x0
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
 
 /* rx et_rxq{f}[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
@@ -1170,17 +1176,17 @@
  */
 
 /* register address for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_msk 0x01f00000
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
 /* inverted bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_mskn 0xfe0fffff
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
 /* lower bit position of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_shift 20
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
 /* width of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_width 5
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
 /* default value of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_default 0x0
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
 
 /* rx et_mng_rxq{f} bitfield definitions
  * preprocessor definitions for the bitfield "et_mng_rxq{f}".
@@ -1189,17 +1195,17 @@
  */
 
 /* register address for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_msk 0x00080000
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
 /* inverted bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
 /* lower bit position of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_shift 19
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
 /* width of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_width 1
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
 /* default value of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_default 0x0
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
 
 /* rx et_act{f}[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_act{f}[2:0]".
@@ -1208,17 +1214,17 @@
  */
 
 /* register address for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_msk 0x00070000
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
 /* inverted bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_mskn 0xfff8ffff
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
 /* lower bit position of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_shift 16
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
 /* width of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_width 3
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
 /* default value of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_default 0x0
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
 
 /* rx et_val{f}[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_val{f}[f:0]".
@@ -1227,17 +1233,17 @@
  */
 
 /* register address for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_msk 0x0000ffff
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
 /* inverted bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_mskn 0xffff0000
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
 /* lower bit position of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_shift 0
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
 /* width of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_width 16
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
 /* default value of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_default 0x0
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
 
 /* rx ipv4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1245,17 +1251,17 @@
  */
 
 /* register address for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_adr 0x00005580
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
 /* bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
 /* inverted bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
 /* lower bit position of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_shift 1
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
 /* width of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_width 1
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
 /* default value of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_default 0x0
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
 
 /* rx desc{d}_vl_strip bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_vl_strip".
@@ -1264,17 +1270,18 @@
  */
 
 /* register address for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+	(0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_msk 0x20000000
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
 /* inverted bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_mskn 0xdfffffff
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
 /* lower bit position of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_shift 29
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
 /* width of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_width 1
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
 /* default value of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_default 0x0
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
 
 /* rx l4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "l4_chk_en".
@@ -1282,17 +1289,17 @@
  */
 
 /* register address for bitfield l4_chk_en */
-#define rpol4chk_en_adr 0x00005580
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
 /* bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_msk 0x00000001
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
 /* inverted bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield l4_chk_en */
-#define rpol4chk_en_shift 0
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
 /* width of bitfield l4_chk_en */
-#define rpol4chk_en_width 1
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
 /* default value of bitfield l4_chk_en */
-#define rpol4chk_en_default 0x0
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
 
 /* rx reg_res_dsbl bitfield definitions
  * preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -1300,17 +1307,17 @@
  */
 
 /* register address for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_adr 0x00005000
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
 /* bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
 /* inverted bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
 /* lower bit position of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_shift 29
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
 /* width of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_width 1
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
 /* default value of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_default 0x1
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
 
 /* tx dca{d}_cpuid[7:0] bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
@@ -1319,17 +1326,17 @@
  */
 
 /* register address for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
 /* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_mskn 0xffffff00
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
 /* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_shift 0
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
 /* width of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_width 8
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
 /* default value of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_default 0x0
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
 
 /* tx lso_en[1f:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_en[1f:0]".
@@ -1337,17 +1344,17 @@
  */
 
 /* register address for bitfield lso_en[1f:0] */
-#define tdm_lso_en_adr 0x00007810
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
 /* bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_msk 0xffffffff
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
 /* inverted bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_mskn 0x00000000
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
 /* lower bit position of bitfield lso_en[1f:0] */
-#define tdm_lso_en_shift 0
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
 /* width of bitfield lso_en[1f:0] */
-#define tdm_lso_en_width 32
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
 /* default value of bitfield lso_en[1f:0] */
-#define tdm_lso_en_default 0x0
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
 
 /* tx dca_en bitfield definitions
  * preprocessor definitions for the bitfield "dca_en".
@@ -1355,17 +1362,17 @@
  */
 
 /* register address for bitfield dca_en */
-#define tdm_dca_en_adr 0x00008480
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
 /* bitmask for bitfield dca_en */
-#define tdm_dca_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca_en */
-#define tdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca_en */
-#define tdm_dca_en_shift 31
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
 /* width of bitfield dca_en */
-#define tdm_dca_en_width 1
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
 /* default value of bitfield dca_en */
-#define tdm_dca_en_default 0x1
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
 
 /* tx dca_mode[3:0] bitfield definitions
  * preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -1373,17 +1380,17 @@
  */
 
 /* register address for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_adr 0x00008480
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
 /* bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_msk 0x0000000f
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
 /* inverted bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
 /* lower bit position of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_shift 0
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
 /* width of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_width 4
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
 /* default value of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_default 0x0
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
 
 /* tx dca{d}_desc_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -1392,17 +1399,17 @@
  */
 
 /* register address for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_shift 31
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
 /* width of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_width 1
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
 /* default value of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_default 0x0
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
 
 /* tx desc{d}_en bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_en".
@@ -1411,17 +1418,17 @@
  */
 
 /* register address for bitfield desc{d}_en */
-#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_en */
-#define tdm_descden_msk 0x80000000
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
 /* inverted bitmask for bitfield desc{d}_en */
-#define tdm_descden_mskn 0x7fffffff
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
 /* lower bit position of bitfield desc{d}_en */
-#define tdm_descden_shift 31
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
 /* width of bitfield desc{d}_en */
-#define tdm_descden_width 1
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
 /* default value of bitfield desc{d}_en */
-#define tdm_descden_default 0x0
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
 
 /* tx desc{d}_hd[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -1430,15 +1437,15 @@
  */
 
 /* register address for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_msk 0x00001fff
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
 /* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_mskn 0xffffe000
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
 /* lower bit position of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_shift 0
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
 /* width of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_width 13
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
 
 /* tx desc{d}_len[9:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -1447,17 +1454,17 @@
  */
 
 /* register address for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_msk 0x00001ff8
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
 /* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_mskn 0xffffe007
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
 /* lower bit position of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_shift 3
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
 /* width of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_width 10
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
 /* default value of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_default 0x0
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
 
 /* tx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -1465,17 +1472,17 @@
  */
 
 /* register address for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_adr 0x00007b40
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
 /* bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_msk 0x00000002
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
 /* inverted bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
 /* lower bit position of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_shift 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
 /* width of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_width 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
 /* default value of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
 
 /* tx desc{d}_wrb_thresh[6:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
@@ -1484,17 +1491,18 @@
  */
 
 /* register address for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+	(0x00007c18 + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_msk 0x00007f00
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
 /* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_mskn 0xffff80ff
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
 /* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_shift 8
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
 /* width of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_width 7
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
 /* default value of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
 
 /* tx lso_tcp_flag_first[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
@@ -1502,17 +1510,17 @@
  */
 
 /* register address for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
 /* bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
 /* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
 /* lower bit position of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
 /* width of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
 /* default value of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
 
 /* tx lso_tcp_flag_last[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
@@ -1520,17 +1528,17 @@
  */
 
 /* register address for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_adr 0x00007824
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
 /* bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
 /* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
 /* lower bit position of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
 /* width of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
 /* default value of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
 
 /* tx lso_tcp_flag_mid[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
@@ -1538,17 +1546,17 @@
  */
 
 /* Register address for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_adr 0x00005598
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
 /* Bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
 /* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_mskn 0x00000000
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
 /* Lower bit position of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_shift 0
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
 /* Width of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_width 32
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
 /* Default value of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_default 0x0
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
 
 /* RX lro_en[1F:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_en[1F:0]".
@@ -1556,17 +1564,17 @@
  */
 
 /* Register address for bitfield lro_en[1F:0] */
-#define rpo_lro_en_adr 0x00005590
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
 /* Bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
 /* Inverted bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_mskn 0x00000000
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
 /* Lower bit position of bitfield lro_en[1F:0] */
-#define rpo_lro_en_shift 0
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
 /* Width of bitfield lro_en[1F:0] */
-#define rpo_lro_en_width 32
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
 /* Default value of bitfield lro_en[1F:0] */
-#define rpo_lro_en_default 0x0
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
 
 /* RX lro_ptopt_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_ptopt_en".
@@ -1574,17 +1582,17 @@
  */
 
 /* Register address for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_adr 0x00005594
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
 /* Bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_msk 0x00008000
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
 /* Inverted bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
 /* Lower bit position of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_shift 15
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
 /* Width of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_width 1
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
 /* Default value of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_defalt 0x1
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
 
 /* RX lro_q_ses_lmt Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
@@ -1592,17 +1600,17 @@
  */
 
 /* Register address for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
 /* Bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_msk 0x00003000
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
 /* Inverted bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
 /* Lower bit position of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_shift 12
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
 /* Width of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_width 2
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
 /* Default value of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_default 0x1
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
 
 /* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
@@ -1610,17 +1618,17 @@
  */
 
 /* Register address for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
 /* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
 /* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
 /* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_shift 5
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
 /* Width of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_width 2
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
 /* Default value of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_defalt 0x1
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
 
 /* RX lro_pkt_min[4:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
@@ -1628,22 +1636,22 @@
  */
 
 /* Register address for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_adr 0x00005594
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
 /* Bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_msk 0x0000001F
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
 /* Inverted bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
 /* Lower bit position of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_shift 0
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
 /* Width of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_width 5
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
 /* Default value of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_default 0x8
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
 
 /* Width of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_width 2
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
 /* Default value of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_default 0x0
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
 
 /* RX lro_tb_div[11:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
@@ -1651,17 +1659,17 @@
  */
 
 /* Register address for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_adr 0x00005620
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
 /* Bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_msk 0xFFF00000
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
 /* Inverted bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_mskn 0x000FFFFF
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
 /* Lower bit position of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_shift 20
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
 /* Width of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_width 12
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
 /* Default value of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_default 0xC35
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
 
 /* RX lro_ina_ival[9:0] Bitfield Definitions
  *   Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
@@ -1669,17 +1677,17 @@
  */
 
 /* Register address for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
 /* Bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_msk 0x000FFC00
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
 /* Inverted bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_mskn 0xFFF003FF
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
 /* Lower bit position of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_shift 10
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
 /* Width of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_width 10
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
 /* Default value of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_default 0xA
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
 
 /* RX lro_max_ival[9:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
@@ -1687,17 +1695,17 @@
  */
 
 /* Register address for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
 /* Bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_msk 0x000003FF
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
 /* Inverted bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_mskn 0xFFFFFC00
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
 /* Lower bit position of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_shift 0
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
 /* Width of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_width 10
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
 /* Default value of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_default 0x19
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
 
 /* TX dca{D}_cpuid[7:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
@@ -1706,17 +1714,17 @@
  */
 
 /* Register address for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* Bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_msk 0x000000FF
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
 /* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
 /* Lower bit position of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_shift 0
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
 /* Width of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_width 8
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
 /* Default value of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_default 0x0
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
 
 /* TX dca{D}_desc_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "dca{D}_desc_en".
@@ -1725,17 +1733,17 @@
  */
 
 /* Register address for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* Bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
 /* Inverted bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_shift 31
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
 /* Width of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_width 1
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
 /* Default value of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_default 0x0
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
 
 /* TX desc{D}_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_en".
@@ -1744,17 +1752,17 @@
  */
 
 /* Register address for bitfield desc{D}_en */
-#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_msk 0x80000000
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
 /* Inverted bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield desc{D}_en */
-#define tdm_desc_den_shift 31
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
 /* Width of bitfield desc{D}_en */
-#define tdm_desc_den_width 1
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
 /* Default value of bitfield desc{D}_en */
-#define tdm_desc_den_default 0x0
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
 
 /* TX desc{D}_hd[C:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
@@ -1763,15 +1771,15 @@
  */
 
 /* Register address for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_msk 0x00001FFF
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
 /* Inverted bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_mskn 0xFFFFE000
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
 /* Lower bit position of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_shift 0
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
 /* Width of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_width 13
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
 
 /* TX desc{D}_len[9:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
@@ -1780,17 +1788,17 @@
  */
 
 /* Register address for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_msk 0x00001FF8
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
 /* Inverted bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_mskn 0xFFFFE007
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
 /* Lower bit position of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_shift 3
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
 /* Width of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_width 10
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
 /* Default value of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_default 0x0
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
 
 /* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
@@ -1799,18 +1807,18 @@
  */
 
 /* Register address for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_adr(descriptor) \
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
 	(0x00007C18 + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_msk 0x00007F00
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
 /* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
 /* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_shift 8
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
 /* Width of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_width 7
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
 /* Default value of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
 
 /* TX tdm_int_mod_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "tdm_int_mod_en".
@@ -1818,34 +1826,34 @@
  */
 
 /* Register address for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_adr 0x00007B40
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
 /* Bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_msk 0x00000010
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
 /* Inverted bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_mskn 0xFFFFFFEF
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
 /* Lower bit position of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_shift 4
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
 /* Width of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_width 1
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
 /* Default value of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_default 0x0
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
 
 /* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
  * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
  */
 /* register address for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
 /* bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
 /* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
 /* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_shift 16
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
 /* width of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
 /* default value of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
 
 /* tx tx_buf_en bitfield definitions
  * preprocessor definitions for the bitfield "tx_buf_en".
@@ -1853,17 +1861,17 @@
  */
 
 /* register address for bitfield tx_buf_en */
-#define tpb_tx_buf_en_adr 0x00007900
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
 /* bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_msk 0x00000001
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
 /* inverted bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_mskn 0xfffffffe
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield tx_buf_en */
-#define tpb_tx_buf_en_shift 0
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
 /* width of bitfield tx_buf_en */
-#define tpb_tx_buf_en_width 1
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
 /* default value of bitfield tx_buf_en */
-#define tpb_tx_buf_en_default 0x0
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
 
 /* tx tx{b}_hi_thresh[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
@@ -1872,17 +1880,17 @@
  */
 
 /* register address for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
 /* bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_msk 0x1fff0000
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
 /* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_mskn 0xe000ffff
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
 /* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_shift 16
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
 /* width of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_width 13
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
 /* default value of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_default 0x0
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
 
 /* tx tx{b}_lo_thresh[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
@@ -1891,17 +1899,17 @@
  */
 
 /* register address for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
 /* bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_msk 0x00001fff
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
 /* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_mskn 0xffffe000
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
 /* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_shift 0
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
 /* width of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_width 13
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
 /* default value of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_default 0x0
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
 
 /* tx dma_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -1909,17 +1917,17 @@
  */
 
 /* register address for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_adr 0x00007000
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
 /* bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
 /* inverted bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
 /* lower bit position of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_shift 6
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
 /* width of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_width 1
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
 /* default value of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_default 0x0
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
 
 /* tx tx{b}_buf_size[7:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
@@ -1928,17 +1936,17 @@
  */
 
 /* register address for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
 /* bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_msk 0x000000ff
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
 /* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_mskn 0xffffff00
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
 /* lower bit position of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_shift 0
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
 /* width of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_width 8
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
 /* default value of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_default 0x0
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
 
 /* tx tx_scp_ins_en bitfield definitions
  * preprocessor definitions for the bitfield "tx_scp_ins_en".
@@ -1946,17 +1954,17 @@
  */
 
 /* register address for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_adr 0x00007900
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
 /* bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_msk 0x00000004
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
 /* inverted bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
 /* lower bit position of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_shift 2
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
 /* width of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_width 1
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
 /* default value of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_default 0x0
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
 
 /* tx ipv4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1964,17 +1972,17 @@
  */
 
 /* register address for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_adr 0x00007800
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
 /* bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
 /* inverted bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
 /* lower bit position of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_shift 1
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
 /* width of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_width 1
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
 /* default value of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_default 0x0
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
 
 /* tx l4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "l4_chk_en".
@@ -1982,17 +1990,17 @@
  */
 
 /* register address for bitfield l4_chk_en */
-#define tpol4chk_en_adr 0x00007800
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
 /* bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_msk 0x00000001
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
 /* inverted bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield l4_chk_en */
-#define tpol4chk_en_shift 0
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
 /* width of bitfield l4_chk_en */
-#define tpol4chk_en_width 1
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
 /* default value of bitfield l4_chk_en */
-#define tpol4chk_en_default 0x0
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
 
 /* tx pkt_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "pkt_sys_loopback".
@@ -2000,17 +2008,17 @@
  */
 
 /* register address for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_adr 0x00007000
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
 /* bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_msk 0x00000080
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
 /* inverted bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
 /* lower bit position of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_shift 7
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
 /* width of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_width 1
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
 /* default value of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_default 0x0
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
 
 /* tx data_tc_arb_mode bitfield definitions
  * preprocessor definitions for the bitfield "data_tc_arb_mode".
@@ -2018,17 +2026,17 @@
  */
 
 /* register address for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_adr 0x00007100
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
 /* bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
 /* inverted bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
 /* lower bit position of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
 /* width of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_width 1
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
 /* default value of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
 
 /* tx desc_rate_ta_rst bitfield definitions
  * preprocessor definitions for the bitfield "desc_rate_ta_rst".
@@ -2036,17 +2044,17 @@
  */
 
 /* register address for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
 /* bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_msk 0x80000000
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
 /* inverted bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
 /* lower bit position of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_shift 31
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
 /* width of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_width 1
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
 /* default value of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_default 0x0
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
 
 /* tx desc_rate_limit[a:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
@@ -2054,17 +2062,17 @@
  */
 
 /* register address for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
 /* bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_msk 0x000007ff
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
 /* inverted bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_mskn 0xfffff800
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
 /* lower bit position of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_shift 0
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
 /* width of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_width 11
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
 /* default value of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_default 0x0
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
 
 /* tx desc_tc_arb_mode[1:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
@@ -2072,17 +2080,17 @@
  */
 
 /* register address for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_adr 0x00007200
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
 /* bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_msk 0x00000003
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
 /* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
 /* lower bit position of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
 /* width of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_width 2
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
 /* default value of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
 
 /* tx desc_tc{t}_credit_max[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
@@ -2091,17 +2099,17 @@
  */
 
 /* register address for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
 /* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
 /* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
 /* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_shift 16
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
 /* width of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_width 12
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
 /* default value of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
 
 /* tx desc_tc{t}_weight[8:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
@@ -2110,17 +2118,17 @@
  */
 
 /* register address for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
 /* bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
 /* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
 /* lower bit position of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_shift 0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
 /* width of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_width 9
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
 /* default value of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_default 0x0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
 
 /* tx desc_vm_arb_mode bitfield definitions
  * preprocessor definitions for the bitfield "desc_vm_arb_mode".
@@ -2128,17 +2136,17 @@
  */
 
 /* register address for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_adr 0x00007300
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
 /* bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
 /* inverted bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
 /* lower bit position of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
 /* width of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_width 1
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
 /* default value of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
 
 /* tx data_tc{t}_credit_max[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
@@ -2147,17 +2155,17 @@
  */
 
 /* register address for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
 /* bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
 /* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
 /* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_shift 16
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
 /* width of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_width 12
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
 /* default value of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
 
 /* tx data_tc{t}_weight[8:0] bitfield definitions
  * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
@@ -2166,17 +2174,17 @@
  */
 
 /* register address for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
 /* bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
 /* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
 /* lower bit position of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_shift 0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
 /* width of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_width 9
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
 /* default value of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_default 0x0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
 
 /* tx reg_res_dsbl bitfield definitions
  * preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2184,17 +2192,17 @@
  */
 
 /* register address for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_adr 0x00007000
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
 /* bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
 /* inverted bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
 /* lower bit position of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_shift 29
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
 /* width of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_width 1
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
 /* default value of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_default 0x1
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
 
 /* mac_phy register access busy bitfield definitions
  * preprocessor definitions for the bitfield "register access busy".
@@ -2202,15 +2210,15 @@
  */
 
 /* register address for bitfield register access busy */
-#define msm_reg_access_busy_adr 0x00004400
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
 /* bitmask for bitfield register access busy */
-#define msm_reg_access_busy_msk 0x00001000
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
 /* inverted bitmask for bitfield register access busy */
-#define msm_reg_access_busy_mskn 0xffffefff
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
 /* lower bit position of bitfield register access busy */
-#define msm_reg_access_busy_shift 12
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
 /* width of bitfield register access busy */
-#define msm_reg_access_busy_width 1
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
 
 /* mac_phy msm register address[7:0] bitfield definitions
  * preprocessor definitions for the bitfield "msm register address[7:0]".
@@ -2218,17 +2226,17 @@
  */
 
 /* register address for bitfield msm register address[7:0] */
-#define msm_reg_addr_adr 0x00004400
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
 /* bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_msk 0x000000ff
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
 /* inverted bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_mskn 0xffffff00
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
 /* lower bit position of bitfield msm register address[7:0] */
-#define msm_reg_addr_shift 0
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
 /* width of bitfield msm register address[7:0] */
-#define msm_reg_addr_width 8
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
 /* default value of bitfield msm register address[7:0] */
-#define msm_reg_addr_default 0x0
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
 
 /* mac_phy register read strobe bitfield definitions
  * preprocessor definitions for the bitfield "register read strobe".
@@ -2236,17 +2244,17 @@
  */
 
 /* register address for bitfield register read strobe */
-#define msm_reg_rd_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
 /* bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_msk 0x00000200
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
 /* inverted bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_mskn 0xfffffdff
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
 /* lower bit position of bitfield register read strobe */
-#define msm_reg_rd_strobe_shift 9
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
 /* width of bitfield register read strobe */
-#define msm_reg_rd_strobe_width 1
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
 /* default value of bitfield register read strobe */
-#define msm_reg_rd_strobe_default 0x0
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
 
 /* mac_phy msm register read data[31:0] bitfield definitions
  * preprocessor definitions for the bitfield "msm register read data[31:0]".
@@ -2254,15 +2262,15 @@
  */
 
 /* register address for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_adr 0x00004408
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
 /* bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
 /* inverted bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
 /* lower bit position of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_shift 0
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
 /* width of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_width 32
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
 
 /* mac_phy msm register write data[31:0] bitfield definitions
  * preprocessor definitions for the bitfield "msm register write data[31:0]".
@@ -2270,17 +2278,17 @@
  */
 
 /* register address for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_adr 0x00004404
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
 /* bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
 /* inverted bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
 /* lower bit position of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_shift 0
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
 /* width of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_width 32
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
 /* default value of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_default 0x0
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
 
 /* mac_phy register write strobe bitfield definitions
  * preprocessor definitions for the bitfield "register write strobe".
@@ -2288,17 +2296,17 @@
  */
 
 /* register address for bitfield register write strobe */
-#define msm_reg_wr_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
 /* bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_msk 0x00000100
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
 /* inverted bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_mskn 0xfffffeff
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
 /* lower bit position of bitfield register write strobe */
-#define msm_reg_wr_strobe_shift 8
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
 /* width of bitfield register write strobe */
-#define msm_reg_wr_strobe_width 1
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
 /* default value of bitfield register write strobe */
-#define msm_reg_wr_strobe_default 0x0
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
 
 /* mif soft reset bitfield definitions
  * preprocessor definitions for the bitfield "soft reset".
@@ -2306,17 +2314,17 @@
  */
 
 /* register address for bitfield soft reset */
-#define glb_soft_res_adr 0x00000000
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
 /* bitmask for bitfield soft reset */
-#define glb_soft_res_msk 0x00008000
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
 /* inverted bitmask for bitfield soft reset */
-#define glb_soft_res_mskn 0xffff7fff
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
 /* lower bit position of bitfield soft reset */
-#define glb_soft_res_shift 15
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
 /* width of bitfield soft reset */
-#define glb_soft_res_width 1
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
 /* default value of bitfield soft reset */
-#define glb_soft_res_default 0x0
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
 
 /* mif register reset disable bitfield definitions
  * preprocessor definitions for the bitfield "register reset disable".
@@ -2324,27 +2332,27 @@
  */
 
 /* register address for bitfield register reset disable */
-#define glb_reg_res_dis_adr 0x00000000
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
 /* bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_msk 0x00004000
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
 /* inverted bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_mskn 0xffffbfff
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
 /* lower bit position of bitfield register reset disable */
-#define glb_reg_res_dis_shift 14
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
 /* width of bitfield register reset disable */
-#define glb_reg_res_dis_width 1
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
 /* default value of bitfield register reset disable */
-#define glb_reg_res_dis_default 0x1
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
 
 /* tx dma debug control definitions */
-#define tx_dma_debug_ctl_adr 0x00008920u
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
 
 /* tx dma descriptor base address msw definitions */
-#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
 			(0x00007c04u + (descriptor) * 0x40)
 
 /* tx dma total request limit */
-#define tx_dma_total_req_limit_adr 0x00007b20u
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
 
 /* tx interrupt moderation control register definitions
  * Preprocessor definitions for TX Interrupt Moderation Control Register
@@ -2352,7 +2360,7 @@
  * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
  */
 
-#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
 
 /* pcie reg_res_dsbl bitfield definitions
  * preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2360,22 +2368,23 @@
  */
 
 /* register address for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_adr 0x00001000
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
 /* bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
 /* inverted bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
 /* lower bit position of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_shift 29
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
 /* width of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_width 1
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
 /* default value of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_default 0x1
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
 
 /* PCI core control register */
-#define pci_reg_control6_adr 0x1014u
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
 
 /* global microprocessor scratch pad definitions */
-#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+	(0x00000300u + (scratch_scp) * 0x4)
 
 #endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index f2ce12ed4218..967f0fd07fcf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -11,41 +11,244 @@
  * abstraction layer.
  */
 
-#include "../aq_hw.h"
+#include "../aq_nic.h"
 #include "../aq_hw_utils.h"
-#include "../aq_pci_func.h"
-#include "../aq_ring.h"
-#include "../aq_vec.h"
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
 
 #include <linux/random.h>
 
 #define HW_ATL_UCP_0X370_REG    0x0370U
 
 #define HW_ATL_FW_SM_RAM        0x2U
+#define HW_ATL_MPI_FW_VERSION	0x18
 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
 #define HW_ATL_MPI_STATE_ADR    0x036CU
 
 #define HW_ATL_MPI_STATE_MSK    0x00FFU
 #define HW_ATL_MPI_STATE_SHIFT  0U
-#define HW_ATL_MPI_SPEED_MSK    0xFFFFU
+#define HW_ATL_MPI_SPEED_MSK    0xFFFF0000U
 #define HW_ATL_MPI_SPEED_SHIFT  16U
 
-static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
-					 u32 *p, u32 cnt)
+#define HW_ATL_MPI_DAISY_CHAIN_STATUS	0x704
+#define HW_ATL_MPI_BOOT_EXIT_CODE	0x388
+
+#define HW_ATL_MAC_PHY_CONTROL	0x4000
+#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
+
+#define HW_ATL_FW_VER_1X 0x01050006U
+#define HW_ATL_FW_VER_2X 0x02000000U
+#define HW_ATL_FW_VER_3X 0x03000000U
+
+#define FORCE_FLASHLESS 0
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
 {
 	int err = 0;
 
-	AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
-					   HW_ATL_FW_SM_RAM) == 1U,
-					   1U, 10000U);
+	err = hw_atl_utils_soft_reset(self);
+	if (err)
+		return err;
+
+	hw_atl_utils_hw_chip_features_init(self,
+					   &self->chip_features);
+
+	hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+
+	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+				   self->fw_ver_actual) == 0) {
+		*fw_ops = &aq_fw_1x_ops;
+	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
+					self->fw_ver_actual) == 0) {
+		*fw_ops = &aq_fw_2x_ops;
+	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
+					self->fw_ver_actual) == 0) {
+		*fw_ops = &aq_fw_2x_ops;
+	} else {
+		aq_pr_err("Bad FW version detected: %x\n",
+			  self->fw_ver_actual);
+		return -EOPNOTSUPP;
+	}
+	self->aq_fw_ops = *fw_ops;
+	err = self->aq_fw_ops->init(self);
+	return err;
+}
+
+static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
+{
+	int k = 0;
+	u32 gsr;
+
+	aq_hw_write_reg(self, 0x404, 0x40e1);
+	AQ_HW_SLEEP(50);
+
+	/* Cleanup SPI */
+	aq_hw_write_reg(self, 0x534, 0xA0);
+	aq_hw_write_reg(self, 0x100, 0x9F);
+	aq_hw_write_reg(self, 0x100, 0x809F);
+
+	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+	/* Kickstart MAC */
+	aq_hw_write_reg(self, 0x404, 0x80e0);
+	aq_hw_write_reg(self, 0x32a8, 0x0);
+	aq_hw_write_reg(self, 0x520, 0x1);
+	AQ_HW_SLEEP(10);
+	aq_hw_write_reg(self, 0x404, 0x180e0);
+
+	for (k = 0; k < 1000; k++) {
+		u32 flb_status = aq_hw_read_reg(self,
+						HW_ATL_MPI_DAISY_CHAIN_STATUS);
+
+		flb_status = flb_status & 0x10;
+		if (flb_status)
+			break;
+		AQ_HW_SLEEP(10);
+	}
+	if (k == 1000) {
+		aq_pr_err("MAC kickstart failed\n");
+		return -EIO;
+	}
+
+	/* FW reset */
+	aq_hw_write_reg(self, 0x404, 0x80e0);
+	AQ_HW_SLEEP(50);
+	aq_hw_write_reg(self, 0x3a0, 0x1);
+
+	/* Kickstart PHY - skipped */
+
+	/* Global software reset*/
+	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+	for (k = 0; k < 1000; k++) {
+		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+		if (fw_state)
+			break;
+		AQ_HW_SLEEP(10);
+	}
+	if (k == 1000) {
+		aq_pr_err("FW kickstart failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
+{
+	u32 gsr, rbl_status;
+	int k;
+
+	aq_hw_write_reg(self, 0x404, 0x40e1);
+	aq_hw_write_reg(self, 0x3a0, 0x1);
+	aq_hw_write_reg(self, 0x32a8, 0x0);
+
+	/* Alter RBL status */
+	aq_hw_write_reg(self, 0x388, 0xDEAD);
+
+	/* Global software reset*/
+	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
+			(gsr & 0xFFFFBFFF) | 0x8000);
+
+	if (FORCE_FLASHLESS)
+		aq_hw_write_reg(self, 0x534, 0x0);
+
+	aq_hw_write_reg(self, 0x404, 0x40e0);
+
+	/* Wait for RBL boot */
+	for (k = 0; k < 1000; k++) {
+		rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
+		if (rbl_status && rbl_status != 0xDEAD)
+			break;
+		AQ_HW_SLEEP(10);
+	}
+	if (!rbl_status || rbl_status == 0xDEAD) {
+		aq_pr_err("RBL Restart failed");
+		return -EIO;
+	}
+
+	/* Restore NVR */
+	if (FORCE_FLASHLESS)
+		aq_hw_write_reg(self, 0x534, 0xA0);
+
+	if (rbl_status == 0xF1A7) {
+		aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
+		return -ENOTSUPP;
+	}
+
+	for (k = 0; k < 1000; k++) {
+		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+		if (fw_state)
+			break;
+		AQ_HW_SLEEP(10);
+	}
+	if (k == 1000) {
+		aq_pr_err("FW kickstart failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+{
+	int k;
+	u32 boot_exit_code = 0;
+
+	for (k = 0; k < 1000; ++k) {
+		u32 flb_status = aq_hw_read_reg(self,
+						HW_ATL_MPI_DAISY_CHAIN_STATUS);
+		boot_exit_code = aq_hw_read_reg(self,
+						HW_ATL_MPI_BOOT_EXIT_CODE);
+		if (flb_status != 0x06000000 || boot_exit_code != 0)
+			break;
+	}
+
+	if (k == 1000) {
+		aq_pr_err("Neither RBL nor FLB firmware started\n");
+		return -EOPNOTSUPP;
+	}
+
+	self->rbl_enabled = (boot_exit_code != 0);
+
+	if (self->rbl_enabled)
+		return hw_atl_utils_soft_reset_rbl(self);
+	else
+		return hw_atl_utils_soft_reset_flb(self);
+}
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+				  u32 *p, u32 cnt)
+{
+	int err = 0;
+
+	AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+						  HW_ATL_FW_SM_RAM) == 1U,
+						  1U, 10000U);
 
 	if (err < 0) {
 		bool is_locked;
 
-		reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
-		is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+		hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+		is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
 		if (!is_locked) {
 			err = -ETIME;
 			goto err_exit;
@@ -66,7 +269,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
 		*(p++) = aq_hw_read_reg(self, 0x0000020CU);
 	}
 
-	reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
 
 err_exit:
 	return err;
@@ -78,7 +281,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
 	int err = 0;
 	bool is_locked;
 
-	is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+	is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
 	if (!is_locked) {
 		err = -ETIME;
 		goto err_exit;
@@ -97,7 +300,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
 		}
 	}
 
-	reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
 
 err_exit:
 	return err;
@@ -119,7 +322,7 @@ err_exit:
 }
 
 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
-				 struct aq_hw_caps_s *aq_hw_caps)
+				 const struct aq_hw_caps_s *aq_hw_caps)
 {
 	int err = 0;
 
@@ -133,20 +336,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
 	}
 
-	reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+	AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
 			aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
 
-	err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
-				     aq_hw_read_reg(self, 0x18U));
-
-	if (err < 0)
-		pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
-		       AQ_CFG_DRV_NAME,
-		       aq_hw_caps->fw_ver_expected,
-		       aq_hw_read_reg(self, 0x18U));
 	return err;
 }
 
@@ -174,14 +369,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
 		err = -1;
 		goto err_exit;
 	}
-	err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
-					    (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+	err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+					    (u32 *)(void *)&self->rpc,
 					    (rpc_size + sizeof(u32) -
 					    sizeof(u8)) / sizeof(u32));
 	if (err < 0)
 		goto err_exit;
 
-	sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+	sw.tid = 0xFFFFU & (++self->rpc_tid);
 	sw.len = (u16)rpc_size;
 	aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
 
@@ -199,7 +394,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 	do {
 		sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
 
-		PHAL_ATLANTIC->rpc_tid = sw.tid;
+		self->rpc_tid = sw.tid;
 
 		AQ_HW_WAIT_FOR(sw.tid ==
 				(fw.val =
@@ -221,9 +416,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 		if (fw.len) {
 			err =
 			hw_atl_utils_fw_downld_dwords(self,
-						      PHAL_ATLANTIC->rpc_addr,
+						      self->rpc_addr,
 						      (u32 *)(void *)
-						      &PHAL_ATLANTIC->rpc,
+						      &self->rpc,
 						      (fw.len + sizeof(u32) -
 						      sizeof(u8)) /
 						      sizeof(u32));
@@ -231,19 +426,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 				goto err_exit;
 		}
 
-		*rpc = &PHAL_ATLANTIC->rpc;
+		*rpc = &self->rpc;
 	}
 
 err_exit:
 	return err;
 }
 
-static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
-				   struct aq_hw_caps_s *aq_hw_caps)
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
 {
 	int err = 0;
 
-	err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+	err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
 	if (err < 0)
 		goto err_exit;
 
@@ -259,7 +453,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
 			       struct hw_aq_atl_utils_mbox_header *pmbox)
 {
 	return hw_atl_utils_fw_downld_dwords(self,
-				      PHAL_ATLANTIC->mbox_addr,
+				      self->mbox_addr,
 				      (u32 *)(void *)pmbox,
 				      sizeof(*pmbox) / sizeof(u32));
 }
@@ -270,7 +464,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
 	int err = 0;
 
 	err = hw_atl_utils_fw_downld_dwords(self,
-					    PHAL_ATLANTIC->mbox_addr,
+					    self->mbox_addr,
 					    (u32 *)(void *)pmbox,
 					    sizeof(*pmbox) / sizeof(u32));
 	if (err < 0)
@@ -281,27 +475,27 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
 					self->aq_nic_cfg->mtu : 1514U;
 		pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
 		pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
-		pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+		pmbox->stats.dpc = atomic_read(&self->dpc);
 	} else {
-		pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+		pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
 	}
 
 err_exit:;
 }
 
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
-			       enum hal_atl_utils_fw_state_e state)
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
 {
-	u32 ucp_0x368 = 0;
+	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
 
-	ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
-	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+	val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
 
 	return 0;
 }
 
 void hw_atl_utils_mpi_set(struct aq_hw_s *self,
-			  enum hal_atl_utils_fw_state_e state, u32 speed)
+			  enum hal_atl_utils_fw_state_e state,
+			  u32 speed)
 {
 	int err = 0;
 	u32 transaction_id = 0;
@@ -320,11 +514,22 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 			goto err_exit;
 	}
 
-	err = hw_atl_utils_mpi_set_speed(self, speed, state);
+	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
+			(speed << HW_ATL_MPI_SPEED_SHIFT) | state);
 
 err_exit:;
 }
 
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+				      enum hal_atl_utils_fw_state_e state)
+{
+	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+	val = state | (val & HW_ATL_MPI_SPEED_MSK);
+	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+	return 0;
+}
+
 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
 {
 	u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
@@ -365,7 +570,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
 }
 
 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
-				   struct aq_hw_caps_s *aq_hw_caps,
 				   u8 *mac)
 {
 	int err = 0;
@@ -373,15 +577,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 	u32 l = 0U;
 	u32 mac_addr[2];
 
-	self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
-
-	hw_atl_utils_hw_chip_features_init(self,
-					   &PHAL_ATLANTIC_A0->chip_features);
-
-	err = hw_atl_utils_mpi_create(self, aq_hw_caps);
-	if (err < 0)
-		goto err_exit;
-
 	if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
 		unsigned int rnd = 0;
 		unsigned int ucp_0x370 = 0;
@@ -396,7 +591,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 					    aq_hw_read_reg(self, 0x00000374U) +
 					    (40U * 4U),
 					    mac_addr,
-					    AQ_DIMOF(mac_addr));
+					    ARRAY_SIZE(mac_addr));
 	if (err < 0) {
 		mac_addr[0] = 0U;
 		mac_addr[1] = 0U;
@@ -427,7 +622,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 		mac[0] = (u8)(0xFFU & h);
 	}
 
-err_exit:
 	return err;
 }
 
@@ -465,7 +659,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
 {
 	u32 chip_features = 0U;
-	u32 val = reg_glb_mif_id_get(self);
+	u32 val = hw_atl_reg_glb_mif_id_get(self);
 	u32 mif_rev = val & 0xFFU;
 
 	if ((3U & mif_rev) == 1U) {
@@ -500,13 +694,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
 
 int hw_atl_utils_update_stats(struct aq_hw_s *self)
 {
-	struct hw_atl_s *hw_self = PHAL_ATLANTIC;
 	struct hw_aq_atl_utils_mbox mbox;
 
 	hw_atl_utils_mpi_read_stats(self, &mbox);
 
-#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
-			mbox.stats._N_ - hw_self->last_stats._N_)
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+			mbox.stats._N_ - self->last_stats._N_)
+
 	if (self->aq_link_status.mbps) {
 		AQ_SDELTA(uprc);
 		AQ_SDELTA(mprc);
@@ -527,19 +721,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
 		AQ_SDELTA(dpc);
 	}
 #undef AQ_SDELTA
-	hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
-	hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
-	hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
-	hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
+	self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
+	self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
+	self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
+	self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
 
-	memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+	memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
 
 	return 0;
 }
 
 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
 {
-	return &PHAL_ATLANTIC->curr_stats;
+	return &self->curr_stats;
 }
 
 static const u32 hw_atl_utils_hw_mac_regs[] = {
@@ -568,14 +762,14 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
 };
 
 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
-			     struct aq_hw_caps_s *aq_hw_caps,
+			     const struct aq_hw_caps_s *aq_hw_caps,
 			     u32 *regs_buff)
 {
 	unsigned int i = 0U;
 
 	for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
 		regs_buff[i] = aq_hw_read_reg(self,
-			hw_atl_utils_hw_mac_regs[i]);
+					      hw_atl_utils_hw_mac_regs[i]);
 	return 0;
 }
 
@@ -584,3 +778,13 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
 	*fw_version = aq_hw_read_reg(self, 0x18U);
 	return 0;
 }
+
+const struct aq_fw_ops aq_fw_1x_ops = {
+	.init = hw_atl_utils_mpi_create,
+	.reset = NULL,
+	.get_mac_permanent = hw_atl_utils_get_mac_permanent,
+	.set_link_speed = hw_atl_utils_mpi_set_speed,
+	.set_state = hw_atl_utils_mpi_set_state,
+	.update_link_status = hw_atl_utils_mpi_get_link_status,
+	.update_stats = hw_atl_utils_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 21aeca6908d3..2c690947910a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -14,10 +14,39 @@
 #ifndef HW_ATL_UTILS_H
 #define HW_ATL_UTILS_H
 
-#include "../aq_common.h"
-
 #define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
 
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+	u64 buf_addr;
+	u32 ctl;
+	u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+	u32 rsvd;
+	u32 len;
+	u32 ctl;
+	u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+	u64 buf_addr;
+	u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+	u32 type;
+	u32 rss_hash;
+	u16 status;
+	u16 pkt_len;
+	u16 next_desc_ptr;
+	u16 vlan;
+};
+
 struct __packed hw_atl_stats_s {
 	u32 uprc;
 	u32 mprc;
@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
 	struct hw_atl_stats_s stats;
 };
 
-struct __packed hw_atl_s {
-	struct aq_hw_s base;
-	struct hw_atl_stats_s last_stats;
-	struct aq_stats_s curr_stats;
-	u64 speed;
-	unsigned int chip_features;
-	u32 fw_ver_actual;
-	atomic_t dpc;
-	u32 mbox_addr;
-	u32 rpc_addr;
-	u32 rpc_tid;
-	struct hw_aq_atl_utils_fw_rpc rpc;
-};
-
-#define SELF ((struct hw_atl_s *)self)
-
-#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
-
 #define HAL_ATLANTIC_UTILS_CHIP_MIPS         0x00000001U
 #define HAL_ATLANTIC_UTILS_CHIP_TPO2         0x00000002U
 #define HAL_ATLANTIC_UTILS_CHIP_RPF2         0x00000004U
@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
 #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0  0x02000000U
 
 #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
-				PHAL_ATLANTIC->chip_features)
+	self->chip_features)
 
 enum hal_atl_utils_fw_state_e {
 	MPI_DEINIT = 0,
@@ -171,6 +180,73 @@ enum hal_atl_utils_fw_state_e {
 #define HAL_ATLANTIC_RATE_100M       BIT(5)
 #define HAL_ATLANTIC_RATE_INVALID    BIT(6)
 
+enum hw_atl_fw2x_rate {
+	FW2X_RATE_100M    = 0x20,
+	FW2X_RATE_1G      = 0x100,
+	FW2X_RATE_2G5     = 0x200,
+	FW2X_RATE_5G      = 0x400,
+	FW2X_RATE_10G     = 0x800,
+};
+
+enum hw_atl_fw2x_caps_lo {
+	CAPS_LO_10BASET_HD = 0x00,
+	CAPS_LO_10BASET_FD,
+	CAPS_LO_100BASETX_HD,
+	CAPS_LO_100BASET4_HD,
+	CAPS_LO_100BASET2_HD,
+	CAPS_LO_100BASETX_FD,
+	CAPS_LO_100BASET2_FD,
+	CAPS_LO_1000BASET_HD,
+	CAPS_LO_1000BASET_FD,
+	CAPS_LO_2P5GBASET_FD,
+	CAPS_LO_5GBASET_FD,
+	CAPS_LO_10GBASET_FD,
+};
+
+enum hw_atl_fw2x_caps_hi {
+	CAPS_HI_RESERVED1 = 0x00,
+	CAPS_HI_10BASET_EEE,
+	CAPS_HI_RESERVED2,
+	CAPS_HI_PAUSE,
+	CAPS_HI_ASYMMETRIC_PAUSE,
+	CAPS_HI_100BASETX_EEE,
+	CAPS_HI_RESERVED3,
+	CAPS_HI_RESERVED4,
+	CAPS_HI_1000BASET_FD_EEE,
+	CAPS_HI_2P5GBASET_FD_EEE,
+	CAPS_HI_5GBASET_FD_EEE,
+	CAPS_HI_10GBASET_FD_EEE,
+	CAPS_HI_RESERVED5,
+	CAPS_HI_RESERVED6,
+	CAPS_HI_RESERVED7,
+	CAPS_HI_RESERVED8,
+	CAPS_HI_RESERVED9,
+	CAPS_HI_CABLE_DIAG,
+	CAPS_HI_TEMPERATURE,
+	CAPS_HI_DOWNSHIFT,
+	CAPS_HI_PTP_AVB_EN,
+	CAPS_HI_MEDIA_DETECT,
+	CAPS_HI_LINK_DROP,
+	CAPS_HI_SLEEP_PROXY,
+	CAPS_HI_WOL,
+	CAPS_HI_MAC_STOP,
+	CAPS_HI_EXT_LOOPBACK,
+	CAPS_HI_INT_LOOPBACK,
+	CAPS_HI_EFUSE_AGENT,
+	CAPS_HI_WOL_TIMER,
+	CAPS_HI_STATISTICS,
+	CAPS_HI_TRANSACTION_ID,
+};
+
+struct aq_hw_s;
+struct aq_fw_ops;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self);
+
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
 
 int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
@@ -183,19 +259,15 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 			  enum hal_atl_utils_fw_state_e state,
 			  u32 speed);
 
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
-			       enum hal_atl_utils_fw_state_e state);
-
 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
 
 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
-				   struct aq_hw_caps_s *aq_hw_caps,
 				   u8 *mac);
 
 unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
 
 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
-			     struct aq_hw_caps_s *aq_hw_caps,
+			     const struct aq_hw_caps_s *aq_hw_caps,
 			     u32 *regs_buff);
 
 int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
@@ -208,5 +280,10 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 int hw_atl_utils_update_stats(struct aq_hw_s *self);
 
 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+				  u32 *p, u32 cnt);
+
+extern const struct aq_fw_ops aq_fw_1x_ops;
+extern const struct aq_fw_ops aq_fw_2x_ops;
 
 #endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
new file mode 100644
index 000000000000..8cfce95c82fc
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -0,0 +1,184 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
+ * Atlantic hardware abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR	0x364
+#define HW_ATL_FW2X_MPI_MBOX_ADDR	0x360
+
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR	0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR	0x36C
+
+#define HW_ATL_FW2X_MPI_STATE_ADDR	0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR	0x374
+
+static int aq_fw2x_init(struct aq_hw_s *self)
+{
+	int err = 0;
+
+	/* check 10 times by 1ms */
+	AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
+			aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
+		       1000U, 10U);
+	return err;
+}
+
+static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
+{
+	enum hw_atl_fw2x_rate rate = 0;
+
+	if (speed & AQ_NIC_RATE_10G)
+		rate |= FW2X_RATE_10G;
+
+	if (speed & AQ_NIC_RATE_5G)
+		rate |= FW2X_RATE_5G;
+
+	if (speed & AQ_NIC_RATE_5GSR)
+		rate |= FW2X_RATE_5G;
+
+	if (speed & AQ_NIC_RATE_2GS)
+		rate |= FW2X_RATE_2G5;
+
+	if (speed & AQ_NIC_RATE_1G)
+		rate |= FW2X_RATE_1G;
+
+	if (speed & AQ_NIC_RATE_100M)
+		rate |= FW2X_RATE_100M;
+
+	return rate;
+}
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+	u32 val = link_speed_mask_2fw2x_ratemask(speed);
+
+	aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val);
+
+	return 0;
+}
+
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+			     enum hal_atl_utils_fw_state_e state)
+{
+	/* No explicit state in 2x fw */
+	return 0;
+}
+
+static int aq_fw2x_update_link_status(struct aq_hw_s *self)
+{
+	u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+	u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+				FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
+	struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+
+	if (speed) {
+		if (speed & FW2X_RATE_10G)
+			link_status->mbps = 10000;
+		else if (speed & FW2X_RATE_5G)
+			link_status->mbps = 5000;
+		else if (speed & FW2X_RATE_2G5)
+			link_status->mbps = 2500;
+		else if (speed & FW2X_RATE_1G)
+			link_status->mbps = 1000;
+		else if (speed & FW2X_RATE_100M)
+			link_status->mbps = 100;
+		else
+			link_status->mbps = 10000;
+	} else {
+		link_status->mbps = 0;
+	}
+
+	return 0;
+}
+
+int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+	int err = 0;
+	u32 h = 0U;
+	u32 l = 0U;
+	u32 mac_addr[2] = { 0 };
+	u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+
+	if (efuse_addr != 0) {
+		err = hw_atl_utils_fw_downld_dwords(self,
+						    efuse_addr + (40U * 4U),
+						    mac_addr,
+						    ARRAY_SIZE(mac_addr));
+		if (err)
+			return err;
+		mac_addr[0] = __swab32(mac_addr[0]);
+		mac_addr[1] = __swab32(mac_addr[1]);
+	}
+
+	ether_addr_copy(mac, (u8 *)mac_addr);
+
+	if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+		unsigned int rnd = 0;
+
+		get_random_bytes(&rnd, sizeof(unsigned int));
+
+		l = 0xE3000000U
+			| (0xFFFFU & rnd)
+			| (0x00 << 16);
+		h = 0x8001300EU;
+
+		mac[5] = (u8)(0xFFU & l);
+		l >>= 8;
+		mac[4] = (u8)(0xFFU & l);
+		l >>= 8;
+		mac[3] = (u8)(0xFFU & l);
+		l >>= 8;
+		mac[2] = (u8)(0xFFU & l);
+		mac[1] = (u8)(0xFFU & h);
+		h >>= 8;
+		mac[0] = (u8)(0xFFU & h);
+	}
+	return err;
+}
+
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
+{
+	int err = 0;
+	u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+	u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+
+	/* Toggle statistics bit for FW to update */
+	mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
+	aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+	/* Wait FW to report back */
+	AQ_HW_WAIT_FOR(orig_stats_val !=
+		       (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+				       BIT(CAPS_HI_STATISTICS)),
+		       1U, 10000U);
+	if (err)
+		return err;
+
+	return hw_atl_utils_update_stats(self);
+}
+
+const struct aq_fw_ops aq_fw_2x_ops = {
+	.init = aq_fw2x_init,
+	.reset = NULL,
+	.get_mac_permanent = aq_fw2x_get_mac_permanent,
+	.set_link_speed = aq_fw2x_set_link_speed,
+	.set_state = aq_fw2x_set_state,
+	.update_link_status = aq_fw2x_update_link_status,
+	.update_stats = aq_fw2x_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 9009f2651e70..5265b937677b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -10,9 +10,9 @@
 #ifndef VER_H
 #define VER_H
 
-#define NIC_MAJOR_DRIVER_VERSION           1
-#define NIC_MINOR_DRIVER_VERSION           6
-#define NIC_BUILD_DRIVER_VERSION           13
+#define NIC_MAJOR_DRIVER_VERSION           2
+#define NIC_MINOR_DRIVER_VERSION           0
+#define NIC_BUILD_DRIVER_VERSION           2
 #define NIC_REVISION_DRIVER_VERSION        0
 
 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d9346e2ac720..14a59e51db67 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
 	struct bcm63xx_enet_platform_data *pd;
 	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
 	struct mii_bus *bus;
-	const char *clk_name;
 	int i, ret;
 
 	if (!bcm_enet_shared_base[0])
@@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
 	dev->irq = priv->irq = res_irq->start;
 	priv->irq_rx = res_irq_rx->start;
 	priv->irq_tx = res_irq_tx->start;
-	priv->mac_id = pdev->id;
 
-	/* get rx & tx dma channel id for this mac */
-	if (priv->mac_id == 0) {
-		priv->rx_chan = 0;
-		priv->tx_chan = 1;
-		clk_name = "enet0";
-	} else {
-		priv->rx_chan = 2;
-		priv->tx_chan = 3;
-		clk_name = "enet1";
-	}
-
-	priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
+	priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
 	if (IS_ERR(priv->mac_clk)) {
 		ret = PTR_ERR(priv->mac_clk);
 		goto out;
@@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
 		priv->dma_chan_width = pd->dma_chan_width;
 		priv->dma_has_sram = pd->dma_has_sram;
 		priv->dma_desc_shift = pd->dma_desc_shift;
+		priv->rx_chan = pd->rx_chan;
+		priv->tx_chan = pd->tx_chan;
 	}
 
-	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+	if (priv->has_phy && !priv->use_external_mii) {
 		/* using internal PHY, enable clock */
 		priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
 		if (IS_ERR(priv->phy_clk)) {
@@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
 		bus->priv = priv;
 		bus->read = bcm_enet_mdio_read_phylib;
 		bus->write = bcm_enet_mdio_write_phylib;
-		sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+		sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
 
 		/* only probe bus where we think the PHY is, because
 		 * the mdio read operation return 0 instead of 0xffff
@@ -2139,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev)
 
 	/* allocate rx dma ring */
 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
-	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 	if (!p) {
 		dev_err(kdev, "cannot allocate rx ring %u\n", size);
 		ret = -ENOMEM;
 		goto out_freeirq_tx;
 	}
 
-	memset(p, 0, size);
 	priv->rx_desc_alloc_size = size;
 	priv->rx_desc_cpu = p;
 
 	/* allocate tx dma ring */
 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
-	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 	if (!p) {
 		dev_err(kdev, "cannot allocate tx ring\n");
 		ret = -ENOMEM;
 		goto out_free_rx_ring;
 	}
 
-	memset(p, 0, size);
 	priv->tx_desc_alloc_size = size;
 	priv->tx_desc_cpu = p;
 
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 5a66728d4776..1d3c917eb830 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -193,9 +193,6 @@ struct bcm_enet_mib_counters {
 
 struct bcm_enet_priv {
 
-	/* mac id (from platform device id) */
-	int mac_id;
-
 	/* base remapped address of device */
 	void __iomem *base;
 
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 087f01b4dc3a..f15a8fc6dfc9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
 		goto out;
 	}
 
-	/* The Ethernet switch we are interfaced with needs packets to be at
-	 * least 64 bytes (including FCS) otherwise they will be discarded when
-	 * they enter the switch port logic. When Broadcom tags are enabled, we
-	 * need to make sure that packets are at least 68 bytes
-	 * (including FCS and tag) because the length verification is done after
-	 * the Broadcom tag is stripped off the ingress packet.
-	 */
-	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
-		ret = NETDEV_TX_OK;
-		goto out;
-	}
-
 	/* Insert TSB and checksum infos */
 	if (priv->tsb_en) {
 		skb = bcm_sysport_insert_tsb(skb, dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1d96cd594ade..8eef9fb6b1fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
 	dma_desc->ctl1 = cpu_to_le32(ctl1);
 }
 
-#define ENET_BRCM_TAG_LEN	4
-
 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
 				    struct bgmac_dma_ring *ring,
 				    struct sk_buff *skb)
@@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
 	u32 flags;
 	int i;
 
-	/* The Ethernet switch we are interfaced with needs packets to be at
-	 * least 64 bytes (including FCS) otherwise they will be discarded when
-	 * they enter the switch port logic. When Broadcom tags are enabled, we
-	 * need to make sure that packets are at least 68 bytes
-	 * (including FCS and tag) because the length verification is done after
-	 * the Broadcom tag is stripped off the ingress packet.
-	 */
-	if (netdev_uses_dsa(net_dev)) {
-		if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
-			goto err_stats;
-	}
-
 	if (skb->len > BGMAC_DESC_CTL1_LEN) {
 		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
 		goto err_drop;
@@ -240,7 +226,6 @@ err_dma_head:
 
 err_drop:
 	dev_kfree_skb(skb);
-err_stats:
 	net_dev->stats.tx_dropped++;
 	net_dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 7919f6112ecf..5e34b34f7740 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5818,8 +5818,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 	struct l2_fhdr *rx_hdr;
 	int ret = -ENODEV;
 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
-	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
-	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+	struct bnx2_tx_ring_info *txr;
+	struct bnx2_rx_ring_info *rxr;
 
 	tx_napi = bnapi;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8ae269ec17a1..d7c98e807ca8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 			break;
 		default:
-			WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
-				  be16_to_cpu(skb->protocol));
+			netdev_WARN_ONCE(bp->dev,
+					 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+					 be16_to_cpu(skb->protocol));
 		}
 	}
 #endif
@@ -2482,8 +2483,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
 	 */
 	if (bp->dev->features & NETIF_F_LRO)
 		fp->mode = TPA_MODE_LRO;
-	else if (bp->dev->features & NETIF_F_GRO &&
-		 bnx2x_mtu_allows_gro(bp->dev->mtu))
+	else if (bp->dev->features & NETIF_F_GRO_HW)
 		fp->mode = TPA_MODE_GRO;
 	else
 		fp->mode = TPA_MODE_DISABLED;
@@ -4874,6 +4874,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
 	 */
 	dev->mtu = new_mtu;
 
+	if (!bnx2x_mtu_allows_gro(new_mtu))
+		dev->features &= ~NETIF_F_GRO_HW;
+
 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
 
@@ -4903,10 +4906,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
 	}
 
 	/* TPA requires Rx CSUM offloading */
-	if (!(features & NETIF_F_RXCSUM)) {
+	if (!(features & NETIF_F_RXCSUM))
+		features &= ~NETIF_F_LRO;
+
+	if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+		features &= ~NETIF_F_GRO_HW;
+	if (features & NETIF_F_GRO_HW)
 		features &= ~NETIF_F_LRO;
-		features &= ~NETIF_F_GRO;
-	}
 
 	return features;
 }
@@ -4933,13 +4939,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 		}
 	}
 
-	/* if GRO is changed while LRO is enabled, don't force a reload */
-	if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
-		changes &= ~NETIF_F_GRO;
-
-	/* if GRO is changed while HW TPA is off, don't force a reload */
-	if ((changes & NETIF_F_GRO) && bp->disable_tpa)
-		changes &= ~NETIF_F_GRO;
+	/* Don't care about GRO changes */
+	changes &= ~NETIF_F_GRO;
 
 	if (changes)
 		bnx2x_reload = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ddd5d3ebd201..7b08323e3f3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12409,8 +12409,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
 	/* Set TPA flags */
 	if (bp->disable_tpa) {
-		bp->dev->hw_features &= ~NETIF_F_LRO;
-		bp->dev->features &= ~NETIF_F_LRO;
+		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 	}
 
 	if (CHIP_IS_E1(bp))
@@ -13282,7 +13282,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
-		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
 		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
 	if (!chip_is_e1x) {
 		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13318,6 +13318,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
+	if (dev->features & NETIF_F_LRO)
+		dev->features &= ~NETIF_F_GRO_HW;
 
 	/* Add Loopback capability to the device */
 	dev->hw_features |= NETIF_F_LOOPBACK;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9c1cad..7c560d545c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_BNXT) += bnxt_en.o
 
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
 bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61ca4eb7c6fa..1500243b9886 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -107,6 +107,7 @@ enum board_idx {
 	BCM57416_NPAR,
 	BCM57452,
 	BCM57454,
+	BCM5745x_NPAR,
 	BCM58802,
 	BCM58804,
 	BCM58808,
@@ -147,6 +148,7 @@ static const struct {
 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -156,6 +158,8 @@ static const struct {
 };
 
 static const struct pci_device_id bnxt_pci_tbl[] = {
+	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
+	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
@@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
 
 static const u16 bnxt_vf_req_snif[] = {
 	HWRM_FUNC_CFG,
+	HWRM_FUNC_VF_CFG,
 	HWRM_PORT_PHY_QCFG,
 	HWRM_CFA_L2_FILTER_ALLOC,
 };
@@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
 
 		*event |= BNXT_RX_EVENT;
-		goto next_rx_no_prod;
+		goto next_rx_no_prod_no_len;
 
 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
 		skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
@@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 			rc = 1;
 		}
 		*event |= BNXT_RX_EVENT;
-		goto next_rx_no_prod;
+		goto next_rx_no_prod_no_len;
 	}
 
 	cons = rxcmp->rx_cmp_opaque;
@@ -1644,7 +1649,10 @@ next_rx:
 	rxr->rx_prod = NEXT_RX(prod);
 	rxr->rx_next_cons = NEXT_RX(cons);
 
-next_rx_no_prod:
+	cpr->rx_packets += 1;
+	cpr->rx_bytes += len;
+
+next_rx_no_prod_no_len:
 	*raw_cons = tmp_raw_cons;
 
 	return rc;
@@ -1706,12 +1714,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
 
 		if (BNXT_VF(bp))
 			goto async_event_process_exit;
-		if (data1 & 0x20000) {
+
+		/* print unsupported speed warning in forced speed mode only */
+		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
+		    (data1 & 0x20000)) {
 			u16 fw_speed = link_info->force_link_speed;
 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
 
-			netdev_warn(bp->dev, "Link speed %d no longer supported\n",
-				    speed);
+			if (speed != SPEED_UNKNOWN)
+				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+					    speed);
 		}
 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
 		/* fall thru */
@@ -1798,6 +1810,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 	u32 cons = RING_CMP(cpr->cp_raw_cons);
 
+	cpr->event_ctr++;
 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
 	napi_schedule(&bnapi->napi);
 	return IRQ_HANDLED;
@@ -2021,6 +2034,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
 			break;
 		}
 	}
+	if (bp->flags & BNXT_FLAG_DIM) {
+		struct net_dim_sample dim_sample;
+
+		net_dim_sample(cpr->event_ctr,
+			       cpr->rx_packets,
+			       cpr->rx_bytes,
+			       &dim_sample);
+		net_dim(&cpr->dim, dim_sample);
+	}
 	mmiowb();
 	return work_done;
 }
@@ -2243,6 +2265,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
 		if (rxr->xdp_prog)
 			bpf_prog_put(rxr->xdp_prog);
 
+		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
+			xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
 		kfree(rxr->rx_tpa);
 		rxr->rx_tpa = NULL;
 
@@ -2276,6 +2301,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
 
 		ring = &rxr->rx_ring_struct;
 
+		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+		if (rc < 0)
+			return rc;
+
 		rc = bnxt_alloc_ring(bp, ring);
 		if (rc)
 			return rc;
@@ -2606,6 +2635,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
 
 		ring->fw_ring_id = INVALID_HW_RING_ID;
+		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
 	}
 }
 
@@ -2751,7 +2782,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
 		return;
 	if (bp->dev->features & NETIF_F_LRO)
 		bp->flags |= BNXT_FLAG_LRO;
-	if (bp->dev->features & NETIF_F_GRO)
+	else if (bp->dev->features & NETIF_F_GRO_HW)
 		bp->flags |= BNXT_FLAG_GRO;
 }
 
@@ -2830,6 +2861,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
 	bp->cp_ring_mask = bp->cp_bit - 1;
 }
 
+/* Changing allocation mode of RX rings.
+ * TODO: Update when extending xdp_rxq_info to support allocation modes.
+ */
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 {
 	if (page_mode) {
@@ -2839,10 +2873,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 			min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
-		bp->dev->hw_features &= ~NETIF_F_LRO;
-		bp->dev->features &= ~NETIF_F_LRO;
 		bp->rx_dir = DMA_BIDIRECTIONAL;
 		bp->rx_skb_func = bnxt_rx_page_skb;
+		/* Disable LRO or GRO_HW */
+		netdev_update_features(bp->dev);
 	} else {
 		bp->dev->max_mtu = bp->max_mtu;
 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
@@ -4469,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
 	}
 }
 
+static int bnxt_hwrm_get_rings(struct bnxt *bp)
+{
+	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	struct hwrm_func_qcfg_input req = {0};
+	int rc;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		return -EIO;
+	}
+
+	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		u16 cp, stats;
+
+		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+		hw_resc->resv_hw_ring_grps =
+			le32_to_cpu(resp->alloc_hw_ring_grps);
+		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+		cp = le16_to_cpu(resp->alloc_cmpl_rings);
+		stats = le16_to_cpu(resp->alloc_stat_ctx);
+		cp = min_t(u16, cp, stats);
+		hw_resc->resv_cp_rings = cp;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return 0;
+}
+
 /* Caller must hold bp->hwrm_cmd_lock */
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
 {
@@ -4488,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
 	return rc;
 }
 
-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+			   int ring_grps, int cp_rings, int vnics)
 {
 	struct hwrm_func_cfg_input req = {0};
+	u32 enables = 0;
 	int rc;
 
-	if (bp->hwrm_spec_code < 0x10601)
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+		enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+				      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+		enables |= ring_grps ?
+			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+		enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+		req.num_rx_rings = cpu_to_le16(rx_rings);
+		req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+		req.num_cmpl_rings = cpu_to_le16(cp_rings);
+		req.num_stat_ctxs = req.num_cmpl_rings;
+		req.num_vnics = cpu_to_le16(vnics);
+	}
+	if (!enables)
 		return 0;
 
-	if (BNXT_VF(bp))
+	req.enables = cpu_to_le32(enables);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		return -ENOMEM;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		bp->hw_resc.resv_tx_rings = tx_rings;
+
+	rc = bnxt_hwrm_get_rings(bp);
+	return rc;
+}
+
+static int
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+			   int ring_grps, int cp_rings, int vnics)
+{
+	struct hwrm_func_vf_cfg_input req = {0};
+	u32 enables = 0;
+	int rc;
+
+	if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+		bp->hw_resc.resv_tx_rings = tx_rings;
 		return 0;
+	}
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
-	req.num_tx_rings = cpu_to_le16(*tx_rings);
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+	enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+			      FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+	enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	req.num_rx_rings = cpu_to_le16(rx_rings);
+	req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+	req.num_cmpl_rings = cpu_to_le16(cp_rings);
+	req.num_stat_ctxs = req.num_cmpl_rings;
+	req.num_vnics = cpu_to_le16(vnics);
+
+	req.enables = cpu_to_le32(enables);
 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 	if (rc)
+		return -ENOMEM;
+
+	rc = bnxt_hwrm_get_rings(bp);
+	return rc;
+}
+
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
+				   int cp, int vnic)
+{
+	if (BNXT_PF(bp))
+		return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+	else
+		return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+			   bool shared);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	int tx = bp->tx_nr_rings;
+	int rx = bp->rx_nr_rings;
+	int cp = bp->cp_nr_rings;
+	int grp, rx_rings, rc;
+	bool sh = false;
+	int vnic = 1;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		return 0;
+
+	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+		sh = true;
+	if (bp->flags & BNXT_FLAG_RFS)
+		vnic = rx + 1;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx <<= 1;
+
+	grp = bp->rx_nr_rings;
+	if (tx == hw_resc->resv_tx_rings &&
+	    (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+	      (rx == hw_resc->resv_rx_rings &&
+	       grp == hw_resc->resv_hw_ring_grps &&
+	       cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
+		return 0;
+
+	rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+	if (rc)
 		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	if (!rc)
-		bp->tx_reserved_rings = *tx_rings;
+	tx = hw_resc->resv_tx_rings;
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		rx = hw_resc->resv_rx_rings;
+		cp = hw_resc->resv_cp_rings;
+		grp = hw_resc->resv_hw_ring_grps;
+		vnic = hw_resc->resv_vnics;
+	}
+
+	rx_rings = rx;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+		if (rx >= 2) {
+			rx_rings = rx >> 1;
+		} else {
+			if (netif_running(bp->dev))
+				return -ENOMEM;
+
+			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+			bp->dev->hw_features &= ~NETIF_F_LRO;
+			bp->dev->features &= ~NETIF_F_LRO;
+			bnxt_set_ring_params(bp);
+		}
+	}
+	rx_rings = min_t(int, rx_rings, grp);
+	rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx = rx_rings << 1;
+	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+	bp->tx_nr_rings = tx;
+	bp->rx_nr_rings = rx_rings;
+	bp->cp_nr_rings = cp;
+
+	if (!tx || !rx || !cp || !grp || !vnic)
+		return -ENOMEM;
+
 	return rc;
 }
 
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	int rx = bp->rx_nr_rings;
+	int vnic = 1;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		return false;
+
+	if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+		return true;
+
+	if (bp->flags & BNXT_FLAG_RFS)
+		vnic = rx + 1;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx <<= 1;
+	if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+	    (hw_resc->resv_rx_rings != rx ||
+	     hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+	     hw_resc->resv_vnics != vnic))
+		return true;
+	return false;
+}
+
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				    int ring_grps, int cp_rings)
+{
+	struct hwrm_func_vf_cfg_input req = {0};
+	u32 flags, enables;
 	int rc;
 
-	if (bp->hwrm_spec_code < 0x10801)
+	if (!(bp->flags & BNXT_FLAG_NEW_RM))
 		return 0;
 
-	if (BNXT_VF(bp))
-		return 0;
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+	enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+	req.flags = cpu_to_le32(flags);
+	req.enables = cpu_to_le32(enables);
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	req.num_rx_rings = cpu_to_le16(rx_rings);
+	req.num_cmpl_rings = cpu_to_le16(cp_rings);
+	req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+	req.num_stat_ctxs = cpu_to_le16(cp_rings);
+	req.num_vnics = cpu_to_le16(1);
+	if (bp->flags & BNXT_FLAG_RFS)
+		req.num_vnics = cpu_to_le16(rx_rings + 1);
+	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		return -ENOMEM;
+	return 0;
+}
+
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				    int ring_grps, int cp_rings)
+{
+	struct hwrm_func_cfg_input req = {0};
+	u32 flags, enables;
+	int rc;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 	req.fid = cpu_to_le16(0xffff);
-	req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+	enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
 	req.num_tx_rings = cpu_to_le16(tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+		enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+			   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+			   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+			   FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+		req.num_rx_rings = cpu_to_le16(rx_rings);
+		req.num_cmpl_rings = cpu_to_le16(cp_rings);
+		req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+		req.num_stat_ctxs = cpu_to_le16(cp_rings);
+		req.num_vnics = cpu_to_le16(1);
+		if (bp->flags & BNXT_FLAG_RFS)
+			req.num_vnics = cpu_to_le16(rx_rings + 1);
+	}
+	req.flags = cpu_to_le32(flags);
+	req.enables = cpu_to_le32(enables);
 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 	if (rc)
 		return -ENOMEM;
 	return 0;
 }
 
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				 int ring_grps, int cp_rings)
+{
+	if (bp->hwrm_spec_code < 0x10801)
+		return 0;
+
+	if (BNXT_PF(bp))
+		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+						ring_grps, cp_rings);
+
+	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+					cp_rings);
+}
+
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
 {
@@ -4579,6 +4877,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
 	req->flags = cpu_to_le16(flags);
 }
 
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_coal coal;
+	unsigned int grp_idx;
+
+	/* Tick values in micro seconds.
+	 * 1 coal_buf x bufs_per_record = 1 completion record.
+	 */
+	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+	if (!bnapi->rx_ring)
+		return -ENODEV;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+	bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+	grp_idx = bnapi->index;
+	req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+	return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+				 HWRM_CMD_TIMEOUT);
+}
+
 int bnxt_hwrm_set_coal(struct bnxt *bp)
 {
 	int i, rc = 0;
@@ -4732,11 +5060,60 @@ func_qcfg_exit:
 	return rc;
 }
 
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_resource_qcaps_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	int rc;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		rc = -EIO;
+		goto hwrm_func_resc_qcaps_exit;
+	}
+
+	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		pf->vf_resv_strategy =
+			le16_to_cpu(resp->vf_reservation_strategy);
+		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+	}
+hwrm_func_resc_qcaps_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
 	int rc = 0;
 	struct hwrm_func_qcaps_input req = {0};
 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u32 flags;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
 	req.fid = cpu_to_le16(0xffff);
@@ -4746,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (rc)
 		goto hwrm_func_qcaps_exit;
 
-	if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+	flags = le32_to_cpu(resp->flags);
+	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
-	if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
 
 	bp->tx_push_thresh = 0;
-	if (resp->flags &
-	    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+	if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
 
+	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+	if (!hw_resc->max_hw_ring_grps)
+		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
+	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
 	if (BNXT_PF(bp)) {
 		struct bnxt_pf_info *pf = &bp->pf;
 
@@ -4763,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		pf->port_id = le16_to_cpu(resp->port_id);
 		bp->dev->dev_port = pf->port_id;
 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
-		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
-		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
-		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
-		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
-		pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
-		if (!pf->max_hw_ring_grps)
-			pf->max_hw_ring_grps = pf->max_tx_rings;
-		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
-		pf->max_vnics = le16_to_cpu(resp->max_vnics);
-		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
 		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
@@ -4781,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
 		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
 		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
-		if (resp->flags &
-		    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
 			bp->flags |= BNXT_FLAG_WOL_CAP;
 	} else {
 #ifdef CONFIG_BNXT_SRIOV
 		struct bnxt_vf_info *vf = &bp->vf;
 
 		vf->fw_fid = le16_to_cpu(resp->fid);
-
-		vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
-		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
-		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
-		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
-		vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
-		if (!vf->max_hw_ring_grps)
-			vf->max_hw_ring_grps = vf->max_tx_rings;
-		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
-		vf->max_vnics = le16_to_cpu(resp->max_vnics);
-		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
-
 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
 #endif
 	}
@@ -4810,6 +5175,21 @@ hwrm_func_qcaps_exit:
 	return rc;
 }
 
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+	int rc;
+
+	rc = __bnxt_hwrm_func_qcaps(bp);
+	if (rc)
+		return rc;
+	if (bp->hwrm_spec_code >= 0x10803) {
+		rc = bnxt_hwrm_func_resc_qcaps(bp);
+		if (!rc)
+			bp->flags |= BNXT_FLAG_NEW_RM;
+	}
+	return 0;
+}
+
 static int bnxt_hwrm_func_reset(struct bnxt *bp)
 {
 	struct hwrm_func_reset_input req = {0};
@@ -4879,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
 
 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
 
-	bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
-			     resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
-	if (resp->hwrm_intf_maj < 1) {
+	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+			     resp->hwrm_intf_min_8b << 8 |
+			     resp->hwrm_intf_upd_8b;
+	if (resp->hwrm_intf_maj_8b < 1) {
 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
-			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
-			    resp->hwrm_intf_upd);
+			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+			    resp->hwrm_intf_upd_8b);
 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
 	}
 	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
-		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
-		 resp->hwrm_fw_rsvd);
+		 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
+		 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
 
 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
 	if (!bp->hwrm_cmd_timeout)
 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
 
-	if (resp->hwrm_intf_maj >= 1)
+	if (resp->hwrm_intf_maj_8b >= 1)
 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
 
 	bp->chip_num = le16_to_cpu(resp->chip_num);
@@ -5031,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
 	return rc;
 }
 
+static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
+{
+	struct hwrm_func_cfg_input req = {0};
+	int rc;
+
+	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+	req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+	if (size == 128)
+		req.cache_linesize =
+			FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		rc = -EIO;
+	return rc;
+}
+
 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
 {
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5166,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
 				   rc);
 			goto err_out;
 		}
-		if (bp->tx_reserved_rings != bp->tx_nr_rings) {
-			int tx = bp->tx_nr_rings;
-
-			if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
-			    tx < bp->tx_nr_rings) {
-				rc = -ENOMEM;
-				goto err_out;
-			}
-		}
 	}
 
 	rc = bnxt_hwrm_ring_alloc(bp);
@@ -5394,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
 #ifdef CONFIG_RFS_ACCEL
 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_rsscos_ctxs;
-#endif
-	return bp->pf.max_rsscos_ctxs;
+	return bp->hw_resc.max_rsscos_ctxs;
 }
 
 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_vnics;
-#endif
-	return bp->pf.max_vnics;
+	return bp->hw_resc.max_vnics;
 }
 #endif
 
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_stat_ctxs;
-#endif
-	return bp->pf.max_stat_ctxs;
+	return bp->hw_resc.max_stat_ctxs;
 }
 
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		bp->vf.max_stat_ctxs = max;
-	else
-#endif
-		bp->pf.max_stat_ctxs = max;
+	bp->hw_resc.max_stat_ctxs = max;
 }
 
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_cp_rings;
-#endif
-	return bp->pf.max_cp_rings;
+	return bp->hw_resc.max_cp_rings;
 }
 
 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		bp->vf.max_cp_rings = max;
-	else
-#endif
-		bp->pf.max_cp_rings = max;
+	bp->hw_resc.max_cp_rings = max;
 }
 
 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return min_t(unsigned int, bp->vf.max_irqs,
-			     bp->vf.max_cp_rings);
-#endif
-	return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
 }
 
 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		bp->vf.max_irqs = max_irqs;
-	else
-#endif
-		bp->pf.max_irqs = max_irqs;
+	bp->hw_resc.max_irqs = max_irqs;
 }
 
 static int bnxt_init_msix(struct bnxt *bp)
@@ -5567,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
 }
 
+static int bnxt_reserve_rings(struct bnxt *bp)
+{
+	int orig_cp = bp->hw_resc.resv_cp_rings;
+	int tcs = netdev_get_num_tc(bp->dev);
+	int rc;
+
+	if (!bnxt_need_reserve_rings(bp))
+		return 0;
+
+	rc = __bnxt_reserve_rings(bp);
+	if (rc) {
+		netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+		return rc;
+	}
+	if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+		bnxt_clear_int_mode(bp);
+		rc = bnxt_init_int_mode(bp);
+		if (rc)
+			return rc;
+	}
+	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+		netdev_err(bp->dev, "tx ring reservation failure\n");
+		netdev_reset_tc(bp->dev);
+		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+		return -ENOMEM;
+	}
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+	return 0;
+}
+
 static void bnxt_free_irq(struct bnxt *bp)
 {
 	struct bnxt_irq *irq;
@@ -5692,8 +6082,14 @@ static void bnxt_disable_napi(struct bnxt *bp)
 	if (!bp->bnapi)
 		return;
 
-	for (i = 0; i < bp->cp_nr_rings; i++)
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+		if (bp->bnapi[i]->rx_ring)
+			cancel_work_sync(&cpr->dim.work);
+
 		napi_disable(&bp->bnapi[i]->napi);
+	}
 }
 
 static void bnxt_enable_napi(struct bnxt *bp)
@@ -5701,7 +6097,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
 	int i;
 
 	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
 		bp->bnapi[i]->in_reset = false;
+
+		if (bp->bnapi[i]->rx_ring) {
+			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+			cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+		}
 		napi_enable(&bp->bnapi[i]->napi);
 	}
 }
@@ -6311,6 +6713,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 	bnxt_preset_reg_win(bp);
 	netif_carrier_off(bp->dev);
 	if (irq_re_init) {
+		rc = bnxt_reserve_rings(bp);
+		if (rc)
+			return rc;
+
 		rc = bnxt_setup_int_mode(bp);
 		if (rc) {
 			netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
@@ -6446,23 +6852,13 @@ static bool bnxt_drv_busy(struct bnxt *bp)
 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
 }
 
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+			     bool link_re_init)
 {
-	int rc = 0;
-
-#ifdef CONFIG_BNXT_SRIOV
-	if (bp->sriov_cfg) {
-		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
-						      !bp->sriov_cfg,
-						      BNXT_SRIOV_CFG_WAIT_TMO);
-		if (rc)
-			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
-	}
-
 	/* Close the VF-reps before closing PF */
 	if (BNXT_PF(bp))
 		bnxt_vf_reps_close(bp);
-#endif
+
 	/* Change device state to avoid TX queue wake up's */
 	bnxt_tx_disable(bp);
 
@@ -6485,6 +6881,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 		bnxt_del_napi(bp);
 	}
 	bnxt_free_mem(bp, irq_re_init);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+	if (bp->sriov_cfg) {
+		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+						      !bp->sriov_cfg,
+						      BNXT_SRIOV_CFG_WAIT_TMO);
+		if (rc)
+			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+	}
+#endif
+	__bnxt_close_nic(bp, irq_re_init, link_re_init);
 	return rc;
 }
 
@@ -6764,13 +7176,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
 		max_rss_ctxs = max_vnics;
 	if (vnics > max_vnics || vnics > max_rss_ctxs) {
-		netdev_warn(bp->dev,
-			    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
-			    min(max_rss_ctxs - 1, max_vnics - 1));
+		if (bp->rx_nr_rings > 1)
+			netdev_warn(bp->dev,
+				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+				    min(max_rss_ctxs - 1, max_vnics - 1));
 		return false;
 	}
 
-	return true;
+	if (!(bp->flags & BNXT_FLAG_NEW_RM))
+		return true;
+
+	if (vnics == bp->hw_resc.resv_vnics)
+		return true;
+
+	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+	if (vnics <= bp->hw_resc.resv_vnics)
+		return true;
+
+	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
+	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+	return false;
 #else
 	return false;
 #endif
@@ -6784,6 +7209,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
 		features &= ~NETIF_F_NTUPLE;
 
+	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+	if (!(features & NETIF_F_GRO))
+		features &= ~NETIF_F_GRO_HW;
+
+	if (features & NETIF_F_GRO_HW)
+		features &= ~NETIF_F_LRO;
+
 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
 	 * turned on or off together.
 	 */
@@ -6817,9 +7251,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
 	bool update_tpa = false;
 
 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
-	if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+	if (features & NETIF_F_GRO_HW)
 		flags |= BNXT_FLAG_GRO;
-	if (features & NETIF_F_LRO)
+	else if (features & NETIF_F_LRO)
 		flags |= BNXT_FLAG_LRO;
 
 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
@@ -7096,7 +7530,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
 {
 	int max_rx, max_tx, tx_sets = 1;
 	int tx_rings_needed;
-	int rc;
+	int rx_rings = rx;
+	int cp, rc;
 
 	if (tcs)
 		tx_sets = tcs;
@@ -7112,7 +7547,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
 	if (max_tx < tx_rings_needed)
 		return -ENOMEM;
 
-	return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx_rings <<= 1;
+	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
 }
 
 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7346,7 +7784,8 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 {
 	struct bnxt *bp = cb_priv;
 
-	if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev))
+	if (!bnxt_tc_flower_enabled(bp) ||
+	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -7717,12 +8156,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
 
 	switch (attr->id) {
 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
-		/* In SRIOV each PF-pool (PF + child VFs) serves as a
-		 * switching domain, the PF's perm mac-addr can be used
-		 * as the unique parent-id
-		 */
-		attr->u.ppid.id_len = ETH_ALEN;
-		ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+		attr->u.ppid.id_len = sizeof(bp->switch_id);
+		memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -7800,8 +8235,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
 	bnxt_dcb_free(bp);
 	kfree(bp->edev);
 	bp->edev = NULL;
-	if (bp->xdp_prog)
-		bpf_prog_put(bp->xdp_prog);
 	bnxt_cleanup_pci(bp);
 	free_netdev(dev);
 }
@@ -7869,24 +8302,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 				int *max_cp)
 {
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	int max_ring_grps = 0;
 
-#ifdef CONFIG_BNXT_SRIOV
-	if (!BNXT_PF(bp)) {
-		*max_tx = bp->vf.max_tx_rings;
-		*max_rx = bp->vf.max_rx_rings;
-		*max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
-		*max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
-		max_ring_grps = bp->vf.max_hw_ring_grps;
-	} else
-#endif
-	{
-		*max_tx = bp->pf.max_tx_rings;
-		*max_rx = bp->pf.max_rx_rings;
-		*max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
-		*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
-		max_ring_grps = bp->pf.max_hw_ring_grps;
-	}
+	*max_tx = hw_resc->max_tx_rings;
+	*max_rx = hw_resc->max_rx_rings;
+	*max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+	*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+	max_ring_grps = hw_resc->max_hw_ring_grps;
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
 		*max_cp -= 1;
 		*max_rx -= 2;
@@ -7922,8 +8345,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 		if (rc)
 			return rc;
 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
-		bp->dev->hw_features &= ~NETIF_F_LRO;
-		bp->dev->features &= ~NETIF_F_LRO;
+		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 		bnxt_set_ring_params(bp);
 	}
 
@@ -7951,6 +8374,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 	return rc;
 }
 
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+{
+	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+	bp->rx_nr_rings = bp->cp_nr_rings;
+	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+}
+
 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
 {
 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -7966,14 +8400,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
 		return rc;
 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+	if (sh)
+		bnxt_trim_dflt_sh_rings(bp);
+	else
+		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
 
-	rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+	rc = __bnxt_reserve_rings(bp);
 	if (rc)
 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+	if (sh)
+		bnxt_trim_dflt_sh_rings(bp);
 
-	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
-	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
-			       bp->tx_nr_rings + bp->rx_nr_rings;
+	/* Rings may have been trimmed, re-reserve the trimmed rings. */
+	if (bnxt_need_reserve_rings(bp)) {
+		rc = __bnxt_reserve_rings(bp);
+		if (rc)
+			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+	}
 	bp->num_stat_ctxs = bp->cp_nr_rings;
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 		bp->rx_nr_rings++;
@@ -7982,11 +8428,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
 	return rc;
 }
 
-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+int bnxt_restore_pf_fw_resources(struct bnxt *bp)
 {
+	int rc;
+
 	ASSERT_RTNL();
+	if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+		return 0;
+
 	bnxt_hwrm_func_qcaps(bp);
-	bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+	__bnxt_close_nic(bp, true, false);
+	bnxt_clear_int_mode(bp);
+	rc = bnxt_init_int_mode(bp);
+	if (rc)
+		dev_close(bp->dev);
+	else
+		rc = bnxt_open_nic(bp, true, false);
+	return rc;
 }
 
 static int bnxt_init_mac_addr(struct bnxt *bp)
@@ -8000,7 +8458,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
 		struct bnxt_vf_info *vf = &bp->vf;
 
 		if (is_valid_ether_addr(vf->mac_addr)) {
-			/* overwrite netdev dev_adr with admin VF MAC */
+			/* overwrite netdev dev_addr with admin VF MAC */
 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
 		} else {
 			eth_hw_addr_random(bp->dev);
@@ -8106,7 +8564,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
 			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+		dev->hw_features |= NETIF_F_GRO_HW;
 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+	if (dev->features & NETIF_F_GRO_HW)
+		dev->features &= ~NETIF_F_LRO;
 	dev->priv_flags |= IFF_UNICAST_FLT;
 
 #ifdef CONFIG_BNXT_SRIOV
@@ -8208,6 +8670,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	else
 		device_set_wakeup_capable(&pdev->dev, false);
 
+	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+
 	if (BNXT_PF(bp)) {
 		if (!bnxt_pf_wq) {
 			bnxt_pf_wq =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5359a1f0045f..1989c470172c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,7 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
 #define BNXT_H
 
 #define DRV_MODULE_NAME		"bnxt_en"
-#define DRV_MODULE_VERSION	"1.8.0"
+#define DRV_MODULE_VERSION	"1.9.0"
 
 #define DRV_VER_MAJ	1
-#define DRV_VER_MIN	8
+#define DRV_VER_MIN	9
 #define DRV_VER_UPD	0
 
 #include <linux/interrupt.h>
@@ -23,6 +23,8 @@
 #include <net/devlink.h>
 #include <net/dst_metadata.h>
 #include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
 
 struct tx_bd {
 	__le32 tx_bd_len_flags_type;
@@ -607,6 +609,17 @@ struct bnxt_tx_ring_info {
 	struct bnxt_ring_struct	tx_ring_struct;
 };
 
+struct bnxt_coal {
+	u16			coal_ticks;
+	u16			coal_ticks_irq;
+	u16			coal_bufs;
+	u16			coal_bufs_irq;
+			/* RING_IDLE enabled when coal ticks < idle_thresh  */
+	u16			idle_thresh;
+	u8			bufs_per_record;
+	u8			budget;
+};
+
 struct bnxt_tpa_info {
 	void			*data;
 	u8			*data_ptr;
@@ -664,12 +677,20 @@ struct bnxt_rx_ring_info {
 
 	struct bnxt_ring_struct	rx_ring_struct;
 	struct bnxt_ring_struct	rx_agg_ring_struct;
+	struct xdp_rxq_info	xdp_rxq;
 };
 
 struct bnxt_cp_ring_info {
 	u32			cp_raw_cons;
 	void __iomem		*cp_doorbell;
 
+	struct bnxt_coal	rx_ring_coal;
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			event_ctr;
+
+	struct net_dim		dim;
+
 	struct tx_cmp		*cp_desc_ring[MAX_CP_PAGES];
 
 	dma_addr_t		cp_desc_mapping[MAX_CP_PAGES];
@@ -755,19 +776,38 @@ struct bnxt_vnic_info {
 #define BNXT_VNIC_RFS_NEW_RSS_FLAG	0x10
 };
 
-#if defined(CONFIG_BNXT_SRIOV)
-struct bnxt_vf_info {
-	u16	fw_fid;
-	u8	mac_addr[ETH_ALEN];
+struct bnxt_hw_resc {
+	u16	min_rsscos_ctxs;
 	u16	max_rsscos_ctxs;
+	u16	min_cp_rings;
 	u16	max_cp_rings;
+	u16	resv_cp_rings;
+	u16	min_tx_rings;
 	u16	max_tx_rings;
+	u16	resv_tx_rings;
+	u16	min_rx_rings;
 	u16	max_rx_rings;
+	u16	resv_rx_rings;
+	u16	min_hw_ring_grps;
 	u16	max_hw_ring_grps;
+	u16	resv_hw_ring_grps;
+	u16	min_l2_ctxs;
 	u16	max_l2_ctxs;
-	u16	max_irqs;
+	u16	min_vnics;
 	u16	max_vnics;
+	u16	resv_vnics;
+	u16	min_stat_ctxs;
 	u16	max_stat_ctxs;
+	u16	max_irqs;
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+	u16	fw_fid;
+	u8	mac_addr[ETH_ALEN];	/* PF assigned MAC Address */
+	u8	vf_mac_addr[ETH_ALEN];	/* VF assigned MAC address, only
+					 * stored by PF.
+					 */
 	u16	vlan;
 	u32	flags;
 #define BNXT_VF_QOS		0x1
@@ -788,15 +828,6 @@ struct bnxt_pf_info {
 	u16	fw_fid;
 	u16	port_id;
 	u8	mac_addr[ETH_ALEN];
-	u16	max_rsscos_ctxs;
-	u16	max_cp_rings;
-	u16	max_tx_rings; /* HW assigned max tx rings for this PF */
-	u16	max_rx_rings; /* HW assigned max rx rings for this PF */
-	u16	max_hw_ring_grps;
-	u16	max_irqs;
-	u16	max_l2_ctxs;
-	u16	max_vnics;
-	u16	max_stat_ctxs;
 	u32	first_vf_id;
 	u16	active_vfs;
 	u16	max_vfs;
@@ -808,6 +839,9 @@ struct bnxt_pf_info {
 	u32	max_rx_wm_flows;
 	unsigned long	*vf_event_bmap;
 	u16	hwrm_cmd_req_pages;
+	u8	vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL	1
 	void			*hwrm_cmd_req_addr[4];
 	dma_addr_t		hwrm_cmd_req_dma_addr[4];
 	struct bnxt_vf_info	*vf;
@@ -944,17 +978,6 @@ struct bnxt_test_info {
 #define BNXT_CAG_REG_LEGACY_INT_STATUS	0x4014
 #define BNXT_CAG_REG_BASE		0x300000
 
-struct bnxt_coal {
-	u16			coal_ticks;
-	u16			coal_ticks_irq;
-	u16			coal_bufs;
-	u16			coal_bufs_irq;
-			/* RING_IDLE enabled when coal ticks < idle_thresh  */
-	u16			idle_thresh;
-	u8			bufs_per_record;
-	u8			budget;
-};
-
 struct bnxt_tc_flow_stats {
 	u64		packets;
 	u64		bytes;
@@ -1126,6 +1149,8 @@ struct bnxt {
 	#define BNXT_FLAG_DOUBLE_DB	0x400000
 	#define BNXT_FLAG_FW_DCBX_AGENT	0x800000
 	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
+	#define BNXT_FLAG_DIM		0x2000000
+	#define BNXT_FLAG_NEW_RM	0x8000000
 
 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
 					    BNXT_FLAG_RFS |		\
@@ -1185,7 +1210,6 @@ struct bnxt {
 	int			tx_nr_rings;
 	int			tx_nr_rings_per_tc;
 	int			tx_nr_rings_xdp;
-	int			tx_reserved_rings;
 
 	int			tx_wake_thresh;
 	int			tx_push_thresh;
@@ -1297,6 +1321,7 @@ struct bnxt {
 #define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
 #define BNXT_FLOW_STATS_SP_EVENT	15
 
+	struct bnxt_hw_resc	hw_resc;
 	struct bnxt_pf_info	pf;
 #ifdef CONFIG_BNXT_SRIOV
 	int			nr_vfs;
@@ -1346,6 +1371,7 @@ struct bnxt {
 	enum devlink_eswitch_mode eswitch_mode;
 	struct bnxt_vf_rep	**vf_reps; /* array of vf-rep ptrs */
 	u16			*cfa_code_map; /* cfa_code -> vf_idx map */
+	u8			switch_id[8];
 	struct bnxt_tc_info	*tc_info;
 };
 
@@ -1421,6 +1447,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
 		     int tx_xdp);
 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_restore_pf_fw_resources(struct bnxt *bp);
 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
 #endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fed37cd9ae1d..3c746f2d9ed8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 
 	n = IEEE_8021QAZ_MAX_TCS;
 	data_len = sizeof(*data) + sizeof(*fw_app) * n;
-	data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
-				  GFP_KERNEL);
+	data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
+				   GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	memset(data, 0, data_len);
 	bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
 	get.dest_data_addr = cpu_to_le64(mapping);
 	get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 000000000000..408dd190331e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+	struct net_dim *dim = container_of(work, struct net_dim,
+					   work);
+	struct bnxt_cp_ring_info *cpr = container_of(dim,
+						     struct bnxt_cp_ring_info,
+						     dim);
+	struct bnxt_napi *bnapi = container_of(cpr,
+					       struct bnxt_napi,
+					       cp_ring);
+	struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+								  dim->profile_ix);
+
+	cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+	cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+	bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+	dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b13ce5ebde8d..1801582076be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
 
 	memset(coal, 0, sizeof(*coal));
 
+	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
 	hw_coal = &bp->rx_coal;
 	mult = hw_coal->bufs_per_record;
 	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
 	int rc = 0;
 	u16 mult;
 
+	if (coal->use_adaptive_rx_coalesce) {
+		bp->flags |= BNXT_FLAG_DIM;
+	} else {
+		if (bp->flags & BNXT_FLAG_DIM) {
+			bp->flags &= ~(BNXT_FLAG_DIM);
+			goto reset_coalesce;
+		}
+	}
+
 	hw_coal = &bp->rx_coal;
 	mult = hw_coal->bufs_per_record;
 	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
 		update_stats = true;
 	}
 
+reset_coalesce:
 	if (netif_running(dev)) {
 		if (update_stats) {
 			rc = bnxt_close_nic(bp, true, false);
@@ -1376,6 +1388,9 @@ static int bnxt_firmware_reset(struct net_device *dev,
 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
 		break;
+	case BNXT_FW_RESET_AP:
+		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -2522,6 +2537,14 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
 		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
 		if (!rc)
 			netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+	} else if (*flags == ETH_RESET_AP) {
+		/* This feature is not supported in older firmware versions */
+		if (bp->hwrm_spec_code < 0x10803)
+			return -EOPNOTSUPP;
+
+		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
+		if (!rc)
+			netdev_info(dev, "Reset Application Processor request successful.\n");
 	} else {
 		rc = -EINVAL;
 	}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ff601b42fcc8..836ef682f24c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -34,6 +34,7 @@ struct bnxt_led_cfg {
 #define BNXT_LED_DFLT_ENABLES(x)			\
 	cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
 
+#define BNXT_FW_RESET_AP	0xfffe
 #define BNXT_FW_RESET_CHIP	0xffff
 
 extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c99f4d0880e4..82d17f8cc0db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,2437 +1,2700 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
  */
 
-#ifndef BNXT_HSI_H
-#define BNXT_HSI_H
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST     CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST                    0x1UL
+#define TLV_TYPE_HWRM_RESPONSE                   0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND                 0x3UL
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_NONCE                0x8002UL
+#define TLV_TYPE_ENGINE_CKV_IV                   0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG             0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT           0x8005UL
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS           0x8006UL
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY       0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE      0x8008UL
+#define TLV_TYPE_LAST                           TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+	__le16	cmd_discr;
+	u8	reserved_8b;
+	u8	flags;
+	#define TLV_FLAGS_MORE         0x1UL
+	#define TLV_FLAGS_MORE_LAST      0x0UL
+	#define TLV_FLAGS_MORE_NOT_LAST  0x1UL
+	#define TLV_FLAGS_REQUIRED     0x2UL
+	#define TLV_FLAGS_REQUIRED_NO    (0x0UL << 1)
+	#define TLV_FLAGS_REQUIRED_YES   (0x1UL << 1)
+	#define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+	__le16	tlv_type;
+	__le16	length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+};
 
-/* HSI and HWRM Specification 1.8.3 */
-#define HWRM_VERSION_MAJOR	1
-#define HWRM_VERSION_MINOR	8
-#define HWRM_VERSION_UPDATE	3
+/* output (size:64b/8B) */
+struct output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+};
 
-#define HWRM_VERSION_RSVD	1 /* non-zero means beta version */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+	__le16	req_type;
+	__le16	signature;
+	#define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+	#define SHORT_REQ_SIGNATURE_LAST     SHORT_REQ_SIGNATURE_SHORT_CMD
+	__le16	unused_0;
+	__le16	size;
+	__le64	req_addr;
+};
 
-#define HWRM_VERSION_STR	"1.8.3.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE	((__le32)(-1))
-#define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN    (280)  /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE	40
-#define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
-
-/* Statistics Ejection Buffer Completion Record (16 bytes) */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+	__le16	req_type;
+	#define HWRM_VER_GET                              0x0UL
+	#define HWRM_FUNC_BUF_UNRGTR                      0xeUL
+	#define HWRM_FUNC_VF_CFG                          0xfUL
+	#define HWRM_RESERVED1                            0x10UL
+	#define HWRM_FUNC_RESET                           0x11UL
+	#define HWRM_FUNC_GETFID                          0x12UL
+	#define HWRM_FUNC_VF_ALLOC                        0x13UL
+	#define HWRM_FUNC_VF_FREE                         0x14UL
+	#define HWRM_FUNC_QCAPS                           0x15UL
+	#define HWRM_FUNC_QCFG                            0x16UL
+	#define HWRM_FUNC_CFG                             0x17UL
+	#define HWRM_FUNC_QSTATS                          0x18UL
+	#define HWRM_FUNC_CLR_STATS                       0x19UL
+	#define HWRM_FUNC_DRV_UNRGTR                      0x1aUL
+	#define HWRM_FUNC_VF_RESC_FREE                    0x1bUL
+	#define HWRM_FUNC_VF_VNIC_IDS_QUERY               0x1cUL
+	#define HWRM_FUNC_DRV_RGTR                        0x1dUL
+	#define HWRM_FUNC_DRV_QVER                        0x1eUL
+	#define HWRM_FUNC_BUF_RGTR                        0x1fUL
+	#define HWRM_PORT_PHY_CFG                         0x20UL
+	#define HWRM_PORT_MAC_CFG                         0x21UL
+	#define HWRM_PORT_TS_QUERY                        0x22UL
+	#define HWRM_PORT_QSTATS                          0x23UL
+	#define HWRM_PORT_LPBK_QSTATS                     0x24UL
+	#define HWRM_PORT_CLR_STATS                       0x25UL
+	#define HWRM_PORT_LPBK_CLR_STATS                  0x26UL
+	#define HWRM_PORT_PHY_QCFG                        0x27UL
+	#define HWRM_PORT_MAC_QCFG                        0x28UL
+	#define HWRM_PORT_MAC_PTP_QCFG                    0x29UL
+	#define HWRM_PORT_PHY_QCAPS                       0x2aUL
+	#define HWRM_PORT_PHY_I2C_WRITE                   0x2bUL
+	#define HWRM_PORT_PHY_I2C_READ                    0x2cUL
+	#define HWRM_PORT_LED_CFG                         0x2dUL
+	#define HWRM_PORT_LED_QCFG                        0x2eUL
+	#define HWRM_PORT_LED_QCAPS                       0x2fUL
+	#define HWRM_QUEUE_QPORTCFG                       0x30UL
+	#define HWRM_QUEUE_QCFG                           0x31UL
+	#define HWRM_QUEUE_CFG                            0x32UL
+	#define HWRM_FUNC_VLAN_CFG                        0x33UL
+	#define HWRM_FUNC_VLAN_QCFG                       0x34UL
+	#define HWRM_QUEUE_PFCENABLE_QCFG                 0x35UL
+	#define HWRM_QUEUE_PFCENABLE_CFG                  0x36UL
+	#define HWRM_QUEUE_PRI2COS_QCFG                   0x37UL
+	#define HWRM_QUEUE_PRI2COS_CFG                    0x38UL
+	#define HWRM_QUEUE_COS2BW_QCFG                    0x39UL
+	#define HWRM_QUEUE_COS2BW_CFG                     0x3aUL
+	#define HWRM_QUEUE_DSCP_QCAPS                     0x3bUL
+	#define HWRM_QUEUE_DSCP2PRI_QCFG                  0x3cUL
+	#define HWRM_QUEUE_DSCP2PRI_CFG                   0x3dUL
+	#define HWRM_VNIC_ALLOC                           0x40UL
+	#define HWRM_VNIC_FREE                            0x41UL
+	#define HWRM_VNIC_CFG                             0x42UL
+	#define HWRM_VNIC_QCFG                            0x43UL
+	#define HWRM_VNIC_TPA_CFG                         0x44UL
+	#define HWRM_VNIC_TPA_QCFG                        0x45UL
+	#define HWRM_VNIC_RSS_CFG                         0x46UL
+	#define HWRM_VNIC_RSS_QCFG                        0x47UL
+	#define HWRM_VNIC_PLCMODES_CFG                    0x48UL
+	#define HWRM_VNIC_PLCMODES_QCFG                   0x49UL
+	#define HWRM_VNIC_QCAPS                           0x4aUL
+	#define HWRM_RING_ALLOC                           0x50UL
+	#define HWRM_RING_FREE                            0x51UL
+	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS        0x52UL
+	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS     0x53UL
+	#define HWRM_RING_RESET                           0x5eUL
+	#define HWRM_RING_GRP_ALLOC                       0x60UL
+	#define HWRM_RING_GRP_FREE                        0x61UL
+	#define HWRM_RESERVED5                            0x64UL
+	#define HWRM_RESERVED6                            0x65UL
+	#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC            0x70UL
+	#define HWRM_VNIC_RSS_COS_LB_CTX_FREE             0x71UL
+	#define HWRM_CFA_L2_FILTER_ALLOC                  0x90UL
+	#define HWRM_CFA_L2_FILTER_FREE                   0x91UL
+	#define HWRM_CFA_L2_FILTER_CFG                    0x92UL
+	#define HWRM_CFA_L2_SET_RX_MASK                   0x93UL
+	#define HWRM_CFA_VLAN_ANTISPOOF_CFG               0x94UL
+	#define HWRM_CFA_TUNNEL_FILTER_ALLOC              0x95UL
+	#define HWRM_CFA_TUNNEL_FILTER_FREE               0x96UL
+	#define HWRM_CFA_ENCAP_RECORD_ALLOC               0x97UL
+	#define HWRM_CFA_ENCAP_RECORD_FREE                0x98UL
+	#define HWRM_CFA_NTUPLE_FILTER_ALLOC              0x99UL
+	#define HWRM_CFA_NTUPLE_FILTER_FREE               0x9aUL
+	#define HWRM_CFA_NTUPLE_FILTER_CFG                0x9bUL
+	#define HWRM_CFA_EM_FLOW_ALLOC                    0x9cUL
+	#define HWRM_CFA_EM_FLOW_FREE                     0x9dUL
+	#define HWRM_CFA_EM_FLOW_CFG                      0x9eUL
+	#define HWRM_TUNNEL_DST_PORT_QUERY                0xa0UL
+	#define HWRM_TUNNEL_DST_PORT_ALLOC                0xa1UL
+	#define HWRM_TUNNEL_DST_PORT_FREE                 0xa2UL
+	#define HWRM_STAT_CTX_ALLOC                       0xb0UL
+	#define HWRM_STAT_CTX_FREE                        0xb1UL
+	#define HWRM_STAT_CTX_QUERY                       0xb2UL
+	#define HWRM_STAT_CTX_CLR_STATS                   0xb3UL
+	#define HWRM_FW_RESET                             0xc0UL
+	#define HWRM_FW_QSTATUS                           0xc1UL
+	#define HWRM_FW_SET_TIME                          0xc8UL
+	#define HWRM_FW_GET_TIME                          0xc9UL
+	#define HWRM_FW_SET_STRUCTURED_DATA               0xcaUL
+	#define HWRM_FW_GET_STRUCTURED_DATA               0xcbUL
+	#define HWRM_FW_IPC_MAILBOX                       0xccUL
+	#define HWRM_EXEC_FWD_RESP                        0xd0UL
+	#define HWRM_REJECT_FWD_RESP                      0xd1UL
+	#define HWRM_FWD_RESP                             0xd2UL
+	#define HWRM_FWD_ASYNC_EVENT_CMPL                 0xd3UL
+	#define HWRM_TEMP_MONITOR_QUERY                   0xe0UL
+	#define HWRM_WOL_FILTER_ALLOC                     0xf0UL
+	#define HWRM_WOL_FILTER_FREE                      0xf1UL
+	#define HWRM_WOL_FILTER_QCFG                      0xf2UL
+	#define HWRM_WOL_REASON_QCFG                      0xf3UL
+	#define HWRM_CFA_METER_PROFILE_ALLOC              0xf5UL
+	#define HWRM_CFA_METER_PROFILE_FREE               0xf6UL
+	#define HWRM_CFA_METER_PROFILE_CFG                0xf7UL
+	#define HWRM_CFA_METER_INSTANCE_ALLOC             0xf8UL
+	#define HWRM_CFA_METER_INSTANCE_FREE              0xf9UL
+	#define HWRM_CFA_VFR_ALLOC                        0xfdUL
+	#define HWRM_CFA_VFR_FREE                         0xfeUL
+	#define HWRM_CFA_VF_PAIR_ALLOC                    0x100UL
+	#define HWRM_CFA_VF_PAIR_FREE                     0x101UL
+	#define HWRM_CFA_VF_PAIR_INFO                     0x102UL
+	#define HWRM_CFA_FLOW_ALLOC                       0x103UL
+	#define HWRM_CFA_FLOW_FREE                        0x104UL
+	#define HWRM_CFA_FLOW_FLUSH                       0x105UL
+	#define HWRM_CFA_FLOW_STATS                       0x106UL
+	#define HWRM_CFA_FLOW_INFO                        0x107UL
+	#define HWRM_CFA_DECAP_FILTER_ALLOC               0x108UL
+	#define HWRM_CFA_DECAP_FILTER_FREE                0x109UL
+	#define HWRM_CFA_VLAN_ANTISPOOF_QCFG              0x10aUL
+	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC       0x10bUL
+	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE        0x10cUL
+	#define HWRM_CFA_PAIR_ALLOC                       0x10dUL
+	#define HWRM_CFA_PAIR_FREE                        0x10eUL
+	#define HWRM_CFA_PAIR_INFO                        0x10fUL
+	#define HWRM_FW_IPC_MSG                           0x110UL
+	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO        0x111UL
+	#define HWRM_ENGINE_CKV_HELLO                     0x12dUL
+	#define HWRM_ENGINE_CKV_STATUS                    0x12eUL
+	#define HWRM_ENGINE_CKV_CKEK_ADD                  0x12fUL
+	#define HWRM_ENGINE_CKV_CKEK_DELETE               0x130UL
+	#define HWRM_ENGINE_CKV_KEY_ADD                   0x131UL
+	#define HWRM_ENGINE_CKV_KEY_DELETE                0x132UL
+	#define HWRM_ENGINE_CKV_FLUSH                     0x133UL
+	#define HWRM_ENGINE_CKV_RNG_GET                   0x134UL
+	#define HWRM_ENGINE_CKV_KEY_GEN                   0x135UL
+	#define HWRM_ENGINE_QG_CONFIG_QUERY               0x13cUL
+	#define HWRM_ENGINE_QG_QUERY                      0x13dUL
+	#define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+	#define HWRM_ENGINE_QG_METER_PROFILE_QUERY        0x13fUL
+	#define HWRM_ENGINE_QG_METER_PROFILE_ALLOC        0x140UL
+	#define HWRM_ENGINE_QG_METER_PROFILE_FREE         0x141UL
+	#define HWRM_ENGINE_QG_METER_QUERY                0x142UL
+	#define HWRM_ENGINE_QG_METER_BIND                 0x143UL
+	#define HWRM_ENGINE_QG_METER_UNBIND               0x144UL
+	#define HWRM_ENGINE_QG_FUNC_BIND                  0x145UL
+	#define HWRM_ENGINE_SG_CONFIG_QUERY               0x146UL
+	#define HWRM_ENGINE_SG_QUERY                      0x147UL
+	#define HWRM_ENGINE_SG_METER_QUERY                0x148UL
+	#define HWRM_ENGINE_SG_METER_CONFIG               0x149UL
+	#define HWRM_ENGINE_SG_QG_BIND                    0x14aUL
+	#define HWRM_ENGINE_QG_SG_UNBIND                  0x14bUL
+	#define HWRM_ENGINE_CONFIG_QUERY                  0x154UL
+	#define HWRM_ENGINE_STATS_CONFIG                  0x155UL
+	#define HWRM_ENGINE_STATS_CLEAR                   0x156UL
+	#define HWRM_ENGINE_STATS_QUERY                   0x157UL
+	#define HWRM_ENGINE_RQ_ALLOC                      0x15eUL
+	#define HWRM_ENGINE_RQ_FREE                       0x15fUL
+	#define HWRM_ENGINE_CQ_ALLOC                      0x160UL
+	#define HWRM_ENGINE_CQ_FREE                       0x161UL
+	#define HWRM_ENGINE_NQ_ALLOC                      0x162UL
+	#define HWRM_ENGINE_NQ_FREE                       0x163UL
+	#define HWRM_ENGINE_ON_DIE_RQE_CREDITS            0x164UL
+	#define HWRM_FUNC_RESOURCE_QCAPS                  0x190UL
+	#define HWRM_FUNC_VF_RESOURCE_CFG                 0x191UL
+	#define HWRM_SELFTEST_QLIST                       0x200UL
+	#define HWRM_SELFTEST_EXEC                        0x201UL
+	#define HWRM_SELFTEST_IRQ                         0x202UL
+	#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA        0x203UL
+	#define HWRM_DBG_READ_DIRECT                      0xff10UL
+	#define HWRM_DBG_READ_INDIRECT                    0xff11UL
+	#define HWRM_DBG_WRITE_DIRECT                     0xff12UL
+	#define HWRM_DBG_WRITE_INDIRECT                   0xff13UL
+	#define HWRM_DBG_DUMP                             0xff14UL
+	#define HWRM_DBG_ERASE_NVM                        0xff15UL
+	#define HWRM_DBG_CFG                              0xff16UL
+	#define HWRM_DBG_COREDUMP_LIST                    0xff17UL
+	#define HWRM_DBG_COREDUMP_INITIATE                0xff18UL
+	#define HWRM_DBG_COREDUMP_RETRIEVE                0xff19UL
+	#define HWRM_NVM_FACTORY_DEFAULTS                 0xffeeUL
+	#define HWRM_NVM_VALIDATE_OPTION                  0xffefUL
+	#define HWRM_NVM_FLUSH                            0xfff0UL
+	#define HWRM_NVM_GET_VARIABLE                     0xfff1UL
+	#define HWRM_NVM_SET_VARIABLE                     0xfff2UL
+	#define HWRM_NVM_INSTALL_UPDATE                   0xfff3UL
+	#define HWRM_NVM_MODIFY                           0xfff4UL
+	#define HWRM_NVM_VERIFY_UPDATE                    0xfff5UL
+	#define HWRM_NVM_GET_DEV_INFO                     0xfff6UL
+	#define HWRM_NVM_ERASE_DIR_ENTRY                  0xfff7UL
+	#define HWRM_NVM_MOD_DIR_ENTRY                    0xfff8UL
+	#define HWRM_NVM_FIND_DIR_ENTRY                   0xfff9UL
+	#define HWRM_NVM_GET_DIR_ENTRIES                  0xfffaUL
+	#define HWRM_NVM_GET_DIR_INFO                     0xfffbUL
+	#define HWRM_NVM_RAW_DUMP                         0xfffcUL
+	#define HWRM_NVM_READ                             0xfffdUL
+	#define HWRM_NVM_WRITE                            0xfffeUL
+	#define HWRM_NVM_RAW_WRITE_BLK                    0xffffUL
+	#define HWRM_LAST                                HWRM_NVM_RAW_WRITE_BLK
+	__le16	unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+	__le16	error_code;
+	#define HWRM_ERR_CODE_SUCCESS                0x0UL
+	#define HWRM_ERR_CODE_FAIL                   0x1UL
+	#define HWRM_ERR_CODE_INVALID_PARAMS         0x2UL
+	#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+	#define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR   0x4UL
+	#define HWRM_ERR_CODE_INVALID_FLAGS          0x5UL
+	#define HWRM_ERR_CODE_INVALID_ENABLES        0x6UL
+	#define HWRM_ERR_CODE_UNSUPPORTED_TLV        0x7UL
+	#define HWRM_ERR_CODE_NO_BUFFER              0x8UL
+	#define HWRM_ERR_CODE_HWRM_ERROR             0xfUL
+	#define HWRM_ERR_CODE_UNKNOWN_ERR            0xfffeUL
+	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED      0xffffUL
+	#define HWRM_ERR_CODE_LAST                  HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+	__le16	unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	opaque_0;
+	__le16	opaque_1;
+	u8	cmd_err;
+	u8	valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 280
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 0
+#define HWRM_VERSION_STR "1.9.0.0"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	hwrm_intf_maj;
+	u8	hwrm_intf_min;
+	u8	hwrm_intf_upd;
+	u8	unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	hwrm_intf_maj_8b;
+	u8	hwrm_intf_min_8b;
+	u8	hwrm_intf_upd_8b;
+	u8	hwrm_intf_rsvd_8b;
+	u8	hwrm_fw_maj_8b;
+	u8	hwrm_fw_min_8b;
+	u8	hwrm_fw_bld_8b;
+	u8	hwrm_fw_rsvd_8b;
+	u8	mgmt_fw_maj_8b;
+	u8	mgmt_fw_min_8b;
+	u8	mgmt_fw_bld_8b;
+	u8	mgmt_fw_rsvd_8b;
+	u8	netctrl_fw_maj_8b;
+	u8	netctrl_fw_min_8b;
+	u8	netctrl_fw_bld_8b;
+	u8	netctrl_fw_rsvd_8b;
+	__le32	dev_caps_cfg;
+	#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED     0x1UL
+	#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED     0x2UL
+	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED         0x4UL
+	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED          0x8UL
+	u8	roce_fw_maj_8b;
+	u8	roce_fw_min_8b;
+	u8	roce_fw_bld_8b;
+	u8	roce_fw_rsvd_8b;
+	char	hwrm_fw_name[16];
+	char	mgmt_fw_name[16];
+	char	netctrl_fw_name[16];
+	u8	reserved2[16];
+	char	roce_fw_name[16];
+	__le16	chip_num;
+	u8	chip_rev;
+	u8	chip_metal;
+	u8	chip_bond_id;
+	u8	chip_platform_type;
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC      0x0UL
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA      0x1UL
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST     VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+	__le16	max_req_win_len;
+	__le16	max_resp_len;
+	__le16	def_req_timeout;
+	u8	flags;
+	#define VER_GET_RESP_FLAGS_DEV_NOT_RDY       0x1UL
+	#define VER_GET_RESP_FLAGS_EXT_VER_AVAIL     0x2UL
+	u8	unused_0[2];
+	u8	always_1;
+	__le16	hwrm_intf_major;
+	__le16	hwrm_intf_minor;
+	__le16	hwrm_intf_build;
+	__le16	hwrm_intf_patch;
+	__le16	hwrm_fw_major;
+	__le16	hwrm_fw_minor;
+	__le16	hwrm_fw_build;
+	__le16	hwrm_fw_patch;
+	__le16	mgmt_fw_major;
+	__le16	mgmt_fw_minor;
+	__le16	mgmt_fw_build;
+	__le16	mgmt_fw_patch;
+	__le16	netctrl_fw_major;
+	__le16	netctrl_fw_minor;
+	__le16	netctrl_fw_build;
+	__le16	netctrl_fw_patch;
+	__le16	roce_fw_major;
+	__le16	roce_fw_minor;
+	__le16	roce_fw_build;
+	__le16	roce_fw_patch;
+	__le16	max_ext_req_len;
+	u8	unused_1[5];
+	u8	valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
 struct eject_cmpl {
-	__le16 type;
-	#define EJECT_CMPL_TYPE_MASK				    0x3fUL
-	#define EJECT_CMPL_TYPE_SFT				    0
-	#define EJECT_CMPL_TYPE_STAT_EJECT			   0x1aUL
-	__le16 len;
-	__le32 opaque;
-	__le32 v;
-	#define EJECT_CMPL_V					    0x1UL
-	__le32 unused_2;
-};
-
-/* HWRM Completion Record (16 bytes) */
+	__le16	type;
+	#define EJECT_CMPL_TYPE_MASK      0x3fUL
+	#define EJECT_CMPL_TYPE_SFT       0
+	#define EJECT_CMPL_TYPE_STAT_EJECT  0x1aUL
+	#define EJECT_CMPL_TYPE_LAST       EJECT_CMPL_TYPE_STAT_EJECT
+	__le16	len;
+	__le32	opaque;
+	__le32	v;
+	#define EJECT_CMPL_V     0x1UL
+	__le32	unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
 struct hwrm_cmpl {
-	__le16 type;
-	#define CMPL_TYPE_MASK					    0x3fUL
-	#define CMPL_TYPE_SFT					    0
-	#define CMPL_TYPE_HWRM_DONE				   0x20UL
-	__le16 sequence_id;
-	__le32 unused_1;
-	__le32 v;
-	#define CMPL_V						    0x1UL
-	__le32 unused_3;
-};
-
-/* HWRM Forwarded Request (16 bytes) */
+	__le16	type;
+	#define CMPL_TYPE_MASK     0x3fUL
+	#define CMPL_TYPE_SFT      0
+	#define CMPL_TYPE_HWRM_DONE  0x20UL
+	#define CMPL_TYPE_LAST      CMPL_TYPE_HWRM_DONE
+	__le16	sequence_id;
+	__le32	unused_1;
+	__le32	v;
+	#define CMPL_V     0x1UL
+	__le32	unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
 struct hwrm_fwd_req_cmpl {
-	__le16 req_len_type;
-	#define FWD_REQ_CMPL_TYPE_MASK				    0x3fUL
-	#define FWD_REQ_CMPL_TYPE_SFT				    0
-	#define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ			   0x22UL
-	#define FWD_REQ_CMPL_REQ_LEN_MASK			    0xffc0UL
-	#define FWD_REQ_CMPL_REQ_LEN_SFT			    6
-	__le16 source_id;
-	__le32 unused_0;
-	__le32 req_buf_addr_v[2];
-	#define FWD_REQ_CMPL_V					    0x1UL
-	#define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK			    0xfffffffeUL
-	#define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT			    1
-};
-
-/* HWRM Forwarded Response (16 bytes) */
+	__le16	req_len_type;
+	#define FWD_REQ_CMPL_TYPE_MASK        0x3fUL
+	#define FWD_REQ_CMPL_TYPE_SFT         0
+	#define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ  0x22UL
+	#define FWD_REQ_CMPL_TYPE_LAST         FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+	#define FWD_REQ_CMPL_REQ_LEN_MASK     0xffc0UL
+	#define FWD_REQ_CMPL_REQ_LEN_SFT      6
+	__le16	source_id;
+	__le32	unused0;
+	__le32	req_buf_addr_v[2];
+	#define FWD_REQ_CMPL_V                0x1UL
+	#define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+	#define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
 struct hwrm_fwd_resp_cmpl {
-	__le16 type;
-	#define FWD_RESP_CMPL_TYPE_MASK			    0x3fUL
-	#define FWD_RESP_CMPL_TYPE_SFT				    0
-	#define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP		   0x24UL
-	__le16 source_id;
-	__le16 resp_len;
-	__le16 unused_1;
-	__le32 resp_buf_addr_v[2];
-	#define FWD_RESP_CMPL_V				    0x1UL
-	#define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK		    0xfffffffeUL
-	#define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT		    1
-};
-
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+	__le16	type;
+	#define FWD_RESP_CMPL_TYPE_MASK         0x3fUL
+	#define FWD_RESP_CMPL_TYPE_SFT          0
+	#define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP  0x24UL
+	#define FWD_RESP_CMPL_TYPE_LAST          FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+	__le16	source_id;
+	__le16	resp_len;
+	__le16	unused_1;
+	__le32	resp_buf_addr_v[2];
+	#define FWD_RESP_CMPL_V                 0x1UL
+	#define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+	#define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
 struct hwrm_async_event_cmpl {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_TYPE_MASK			    0x3fUL
-	#define ASYNC_EVENT_CMPL_TYPE_SFT			    0
-	#define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT		   0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE      0x0UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE	   0x1UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE       0x2UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE       0x3UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED   0x4UL
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_TYPE_LAST             ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE         0x0UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE            0x1UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE          0x2UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE          0x3UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED      0x4UL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE   0x6UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE     0x7UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD	   0x10UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD	   0x11UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT     0x12UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD	   0x20UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD		   0x21UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR		   0x30UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE      0x31UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE	   0x33UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE	   0x34UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR		   0xffUL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_V				    0x1UL
-	#define ASYNC_EVENT_CMPL_OPAQUE_MASK			    0xfeUL
-	#define ASYNC_EVENT_CMPL_OPAQUE_SFT			    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-};
-
-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE      0x6UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE        0x7UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD           0x10UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD             0x11UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT        0x12UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD             0x20UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD               0x21UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR                     0x30UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE         0x31UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE   0x32UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE              0x33UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE            0x34UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR                 0xffUL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LAST                      ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_V          0x1UL
+	#define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
 struct hwrm_async_event_cmpl_link_status_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK      0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT       0
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST             ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK    0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT     1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST    ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
-};
-
-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
-struct hwrm_async_event_cmpl_link_mtu_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK       0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK       0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK     0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT      1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST    ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
-struct hwrm_async_event_cmpl_dcb_config_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK       0x3fUL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
-	__le32 event_data2;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK     0xfeUL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT      1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST    ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST    ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-};
-
-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST              ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V          0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE     0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN  0x0UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP    0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK       0xeUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT        1
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK    0xffff0UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT     4
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
 struct hwrm_async_event_cmpl_port_conn_not_allowed {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK   0x3fUL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT    0
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST             ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V	    0x1UL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST                 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V          0x1UL
 	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT  1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST    ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V      0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK                 0xffffUL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT                  0
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK      0xff0000UL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT       16
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE        (0x0UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX   (0x1UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG  (0x2UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN     (0x3UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST       ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
 struct hwrm_async_event_cmpl_link_speed_cfg_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK   0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT    0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST             ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V	    0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST                 ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V          0x1UL
 	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT  1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_unload {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK      0xfeUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT       1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_load {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK     0x3fUL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT      0
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK   0xfeUL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK                     0xffffUL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT                      0
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE     0x10000UL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG           0x20000UL
+};
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
 struct hwrm_async_event_cmpl_pf_drvr_unload {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST             ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST          ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V          0x1UL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
 	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
 	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_pf_drvr_load {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT		    0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
-struct hwrm_async_event_cmpl_vf_flr {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK		    0x3fUL
-	#define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT		    0
-	#define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT     0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR	   0x30UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_VF_FLR_V			    0x1UL
-	#define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK		    0xfeUL
-	#define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT		    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK     0xffffUL
-	#define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT      0
-};
-
-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
-struct hwrm_async_event_cmpl_vf_mac_addr_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK      0x3fUL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT       0
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK    0xfeUL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT     1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V	    0x1UL
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
-};
-
-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK   0x70000UL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT    16
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
 struct hwrm_async_event_cmpl_vf_cfg_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST             ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
-};
-
-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
-struct hwrm_async_event_cmpl_hwrm_error {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK		    0x3fUL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT		    0
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR   0xffUL
-	__le32 event_data2;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST    ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_V			    0x1UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT		    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP  0x1UL
-};
-
-/* hwrm_ver_get */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 hwrm_intf_maj;
-	u8 hwrm_intf_min;
-	u8 hwrm_intf_upd;
-	u8 unused_0[5];
-};
-
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 hwrm_intf_maj;
-	u8 hwrm_intf_min;
-	u8 hwrm_intf_upd;
-	u8 hwrm_intf_rsvd;
-	u8 hwrm_fw_maj;
-	u8 hwrm_fw_min;
-	u8 hwrm_fw_bld;
-	u8 hwrm_fw_rsvd;
-	u8 mgmt_fw_maj;
-	u8 mgmt_fw_min;
-	u8 mgmt_fw_bld;
-	u8 mgmt_fw_rsvd;
-	u8 netctrl_fw_maj;
-	u8 netctrl_fw_min;
-	u8 netctrl_fw_bld;
-	u8 netctrl_fw_rsvd;
-	__le32 dev_caps_cfg;
-	#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED  0x1UL
-	#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED  0x2UL
-	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED      0x4UL
-	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED       0x8UL
-	u8 roce_fw_maj;
-	u8 roce_fw_min;
-	u8 roce_fw_bld;
-	u8 roce_fw_rsvd;
-	char hwrm_fw_name[16];
-	char mgmt_fw_name[16];
-	char netctrl_fw_name[16];
-	__le32 reserved2[4];
-	char roce_fw_name[16];
-	__le16 chip_num;
-	u8 chip_rev;
-	u8 chip_metal;
-	u8 chip_bond_id;
-	u8 chip_platform_type;
-	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC		   0x0UL
-	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA		   0x1UL
-	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM	   0x2UL
-	__le16 max_req_win_len;
-	__le16 max_resp_len;
-	__le16 def_req_timeout;
-	u8 init_pending;
-	#define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY		    0x1UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 valid;
-};
-
-/* hwrm_func_reset */
-/* Input (24 bytes) */
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST         ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V          0x1UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE               0x1UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE               0x2UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE     0x4UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE         0x8UL
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
 struct hwrm_func_reset_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID		    0x1UL
-	__le16 vf_id;
-	u8 func_reset_level;
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL	   0x0UL
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME	   0x1UL
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN     0x2UL
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF	   0x3UL
-	u8 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID     0x1UL
+	__le16	vf_id;
+	u8	func_reset_level;
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL      0x0UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME       0x1UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF       0x3UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST         FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+	u8	unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
 struct hwrm_func_reset_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_getfid */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
 struct hwrm_func_getfid_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_GETFID_REQ_ENABLES_PCI_ID			    0x1UL
-	__le16 pci_id;
-	__le16 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_GETFID_REQ_ENABLES_PCI_ID     0x1UL
+	__le16	pci_id;
+	u8	unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
 struct hwrm_func_getfid_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 fid;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_func_vf_alloc */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	fid;
+	u8	unused_0[5];
+	u8	valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
 struct hwrm_func_vf_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID		    0x1UL
-	__le16 first_vf_id;
-	__le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID     0x1UL
+	__le16	first_vf_id;
+	__le16	num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
 struct hwrm_func_vf_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 first_vf_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_func_vf_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	first_vf_id;
+	u8	unused_0[5];
+	u8	valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
 struct hwrm_func_vf_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID		    0x1UL
-	__le16 first_vf_id;
-	__le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID     0x1UL
+	__le16	first_vf_id;
+	__le16	num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
 struct hwrm_func_vf_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_vf_cfg */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
 struct hwrm_func_vf_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_VF_CFG_REQ_ENABLES_MTU			    0x1UL
-	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN		    0x2UL
-	#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4UL
-	#define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x8UL
-	__le16 mtu;
-	__le16 guest_vlan;
-	__le16 async_event_cr;
-	u8 dflt_mac_addr[6];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_VF_CFG_REQ_ENABLES_MTU                  0x1UL
+	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN           0x2UL
+	#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR       0x4UL
+	#define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR        0x8UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS      0x10UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS       0x20UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS         0x40UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS         0x80UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS          0x100UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS            0x200UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS        0x400UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS     0x800UL
+	__le16	mtu;
+	__le16	guest_vlan;
+	__le16	async_event_cr;
+	u8	dflt_mac_addr[6];
+	__le32	flags;
+	#define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST             0x1UL
+	#define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST             0x2UL
+	#define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST           0x4UL
+	#define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST     0x8UL
+	#define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST       0x10UL
+	#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST       0x20UL
+	#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST           0x40UL
+	#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST         0x80UL
+	__le16	num_rsscos_ctxs;
+	__le16	num_cmpl_rings;
+	__le16	num_tx_rings;
+	__le16	num_rx_rings;
+	__le16	num_l2_ctxs;
+	__le16	num_vnics;
+	__le16	num_stat_ctxs;
+	__le16	num_hw_ring_grps;
+	u8	unused_0[4];
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
 struct hwrm_func_vf_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_qcaps */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
 struct hwrm_func_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (80 bytes) */
+/* hwrm_func_qcaps_output (size:640b/80B) */
 struct hwrm_func_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 fid;
-	__le16 port_id;
-	__le32 flags;
-	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED	    0x1UL
-	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
-	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED		    0x4UL
-	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED	    0x8UL
-	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED	    0x10UL
-	#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED       0x20UL
-	#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED	    0x40UL
-	#define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED	    0x80UL
-	#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED	    0x100UL
-	#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED      0x200UL
-	#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED	    0x400UL
-	#define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED   0x800UL
-	u8 mac_address[6];
-	__le16 max_rsscos_ctx;
-	__le16 max_cmpl_rings;
-	__le16 max_tx_rings;
-	__le16 max_rx_rings;
-	__le16 max_l2_ctxs;
-	__le16 max_vnics;
-	__le16 first_vf_id;
-	__le16 max_vfs;
-	__le16 max_stat_ctx;
-	__le32 max_encap_records;
-	__le32 max_decap_records;
-	__le32 max_tx_em_flows;
-	__le32 max_tx_wm_flows;
-	__le32 max_rx_em_flows;
-	__le32 max_rx_wm_flows;
-	__le32 max_mcast_filters;
-	__le32 max_flow_id;
-	__le32 max_hw_ring_grps;
-	__le16 max_sp_tx_rings;
-	u8 unused_0;
-	u8 valid;
-};
-
-/* hwrm_func_qcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	fid;
+	__le16	port_id;
+	__le32	flags;
+	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED            0x1UL
+	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING        0x2UL
+	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED                  0x4UL
+	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED              0x8UL
+	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED              0x10UL
+	#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED         0x20UL
+	#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED              0x40UL
+	#define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED           0x80UL
+	#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED            0x100UL
+	#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED        0x200UL
+	#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED            0x400UL
+	#define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED     0x800UL
+	#define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED     0x1000UL
+	#define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED      0x2000UL
+	#define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED        0x4000UL
+	#define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED       0x8000UL
+	u8	mac_address[6];
+	__le16	max_rsscos_ctx;
+	__le16	max_cmpl_rings;
+	__le16	max_tx_rings;
+	__le16	max_rx_rings;
+	__le16	max_l2_ctxs;
+	__le16	max_vnics;
+	__le16	first_vf_id;
+	__le16	max_vfs;
+	__le16	max_stat_ctx;
+	__le32	max_encap_records;
+	__le32	max_decap_records;
+	__le32	max_tx_em_flows;
+	__le32	max_tx_wm_flows;
+	__le32	max_rx_em_flows;
+	__le32	max_rx_wm_flows;
+	__le32	max_mcast_filters;
+	__le32	max_flow_id;
+	__le32	max_hw_ring_grps;
+	__le16	max_sp_tx_rings;
+	u8	unused_0;
+	u8	valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
 struct hwrm_func_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
 struct hwrm_func_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 fid;
-	__le16 port_id;
-	__le16 vlan;
-	__le16 flags;
-	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED      0x1UL
-	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED	    0x2UL
-	#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED	    0x4UL
-	#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED      0x8UL
-	#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED	    0x10UL
-	#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST		    0x20UL
-	u8 mac_address[6];
-	__le16 pci_id;
-	__le16 alloc_rsscos_ctx;
-	__le16 alloc_cmpl_rings;
-	__le16 alloc_tx_rings;
-	__le16 alloc_rx_rings;
-	__le16 alloc_l2_ctx;
-	__le16 alloc_vnics;
-	__le16 mtu;
-	__le16 mru;
-	__le16 stat_ctx_id;
-	u8 port_partition_type;
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF		   0x0UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS	   0x1UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0	   0x2UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5	   0x3UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0	   0x4UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN	   0xffUL
-	u8 port_pf_cnt;
-	#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL		   0x0UL
-	__le16 dflt_vnic_id;
-	__le16 max_mtu_configured;
-	__le32 min_bw;
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT		    0
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE			    0x10000000UL
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS		   (0x0UL << 28)
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST    FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK	    0xe0000000UL
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT	    29
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA	   (0x0UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO	   (0x2UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE	   (0x4UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA	   (0x6UL << 29)
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	fid;
+	__le16	port_id;
+	__le16	vlan;
+	__le16	flags;
+	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED     0x1UL
+	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED          0x2UL
+	#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED        0x4UL
+	#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED     0x8UL
+	#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED        0x10UL
+	#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST                   0x20UL
+	u8	mac_address[6];
+	__le16	pci_id;
+	__le16	alloc_rsscos_ctx;
+	__le16	alloc_cmpl_rings;
+	__le16	alloc_tx_rings;
+	__le16	alloc_rx_rings;
+	__le16	alloc_l2_ctx;
+	__le16	alloc_vnics;
+	__le16	mtu;
+	__le16	mru;
+	__le16	stat_ctx_id;
+	u8	port_partition_type;
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF     0x0UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS    0x1UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST   FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+	u8	port_pf_cnt;
+	#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+	#define FUNC_QCFG_RESP_PORT_PF_CNT_LAST   FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+	__le16	dflt_vnic_id;
+	__le16	max_mtu_configured;
+	__le32	min_bw;
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT              0
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE                     0x10000000UL
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST                 FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
 	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
 	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 max_bw;
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT		    0
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE			    0x10000000UL
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS		   (0x0UL << 28)
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST    FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK	    0xe0000000UL
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT	    29
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA	   (0x0UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO	   (0x2UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE	   (0x4UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA	   (0x6UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	max_bw;
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT              0
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE                     0x10000000UL
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST                 FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
 	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
 	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 evb_mode;
-	#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB			   0x0UL
-	#define FUNC_QCFG_RESP_EVB_MODE_VEB			   0x1UL
-	#define FUNC_QCFG_RESP_EVB_MODE_VEPA			   0x2UL
-	u8 unused_0;
-	__le16 alloc_vfs;
-	__le32 alloc_mcast_filters;
-	__le32 alloc_hw_ring_grps;
-	__le16 alloc_sp_tx_rings;
-	u8 unused_1;
-	u8 valid;
-};
-
-/* hwrm_func_vlan_cfg */
-/* Input (48 bytes) */
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	evb_mode;
+	#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+	#define FUNC_QCFG_RESP_EVB_MODE_VEB    0x1UL
+	#define FUNC_QCFG_RESP_EVB_MODE_VEPA   0x2UL
+	#define FUNC_QCFG_RESP_EVB_MODE_LAST  FUNC_QCFG_RESP_EVB_MODE_VEPA
+	u8	cache_linesize;
+	#define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64  0x0UL
+	#define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+	#define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST              FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+	__le16	alloc_vfs;
+	__le32	alloc_mcast_filters;
+	__le32	alloc_hw_ring_grps;
+	__le16	alloc_sp_tx_rings;
+	__le16	alloc_stat_ctx;
+	u8	unused_2[7];
+	u8	valid;
+};
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
 struct hwrm_func_vlan_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 enables;
-	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID		    0x1UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID		    0x2UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP		    0x4UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP		    0x8UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID		    0x10UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID		    0x20UL
-	__le16 stag_vid;
-	u8 stag_pcp;
-	u8 unused_2;
-	__be16 stag_tpid;
-	__le16 ctag_vid;
-	u8 ctag_pcp;
-	u8 unused_3;
-	__be16 ctag_tpid;
-	__le32 rsvd1;
-	__le32 rsvd2;
-	__le32 unused_4;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[2];
+	__le32	enables;
+	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID      0x1UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID      0x2UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP      0x4UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP      0x8UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID     0x10UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID     0x20UL
+	__le16	stag_vid;
+	u8	stag_pcp;
+	u8	unused_1;
+	__be16	stag_tpid;
+	__le16	ctag_vid;
+	u8	ctag_pcp;
+	u8	unused_2;
+	__be16	ctag_tpid;
+	__le32	rsvd1;
+	__le32	rsvd2;
+	u8	unused_3[4];
+};
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
 struct hwrm_func_vlan_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_cfg */
-/* Input (88 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_cfg_input (size:704b/88B) */
 struct hwrm_func_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 flags;
-	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE      0x1UL
-	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE       0x2UL
-	#define FUNC_CFG_REQ_FLAGS_RSVD_MASK			    0x1fcUL
-	#define FUNC_CFG_REQ_FLAGS_RSVD_SFT			    2
-	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE	    0x200UL
-	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE	    0x400UL
-	#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST		    0x800UL
-	#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC	    0x1000UL
-	#define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST		    0x2000UL
-	__le32 enables;
-	#define FUNC_CFG_REQ_ENABLES_MTU			    0x1UL
-	#define FUNC_CFG_REQ_ENABLES_MRU			    0x2UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS		    0x4UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS		    0x8UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS		    0x10UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS		    0x20UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS		    0x40UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_VNICS			    0x80UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS		    0x100UL
-	#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x200UL
-	#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN			    0x400UL
-	#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR		    0x800UL
-	#define FUNC_CFG_REQ_ENABLES_MIN_BW			    0x1000UL
-	#define FUNC_CFG_REQ_ENABLES_MAX_BW			    0x2000UL
-	#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4000UL
-	#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE	    0x8000UL
-	#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS		    0x10000UL
-	#define FUNC_CFG_REQ_ENABLES_EVB_MODE			    0x20000UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS		    0x40000UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS		    0x80000UL
-	__le16 mtu;
-	__le16 mru;
-	__le16 num_rsscos_ctxs;
-	__le16 num_cmpl_rings;
-	__le16 num_tx_rings;
-	__le16 num_rx_rings;
-	__le16 num_l2_ctxs;
-	__le16 num_vnics;
-	__le16 num_stat_ctxs;
-	__le16 num_hw_ring_grps;
-	u8 dflt_mac_addr[6];
-	__le16 dflt_vlan;
-	__be32 dflt_ip_addr[4];
-	__le32 min_bw;
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT		    0
-	#define FUNC_CFG_REQ_MIN_BW_SCALE			    0x10000000UL
-	#define FUNC_CFG_REQ_MIN_BW_SCALE_BITS			   (0x0UL << 28)
-	#define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_CFG_REQ_MIN_BW_SCALE_LAST    FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK		    0xe0000000UL
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT		    29
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA		   (0x0UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO		   (0x2UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE		   (0x4UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA		   (0x6UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID	   (0x7UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 max_bw;
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT		    0
-	#define FUNC_CFG_REQ_MAX_BW_SCALE			    0x10000000UL
-	#define FUNC_CFG_REQ_MAX_BW_SCALE_BITS			   (0x0UL << 28)
-	#define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_CFG_REQ_MAX_BW_SCALE_LAST    FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK		    0xe0000000UL
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT		    29
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA		   (0x0UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO		   (0x2UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE		   (0x4UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA		   (0x6UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID	   (0x7UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
-	__le16 async_event_cr;
-	u8 vlan_antispoof_mode;
-	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK	   0x0UL
-	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    0x1UL
-	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[2];
+	__le32	flags;
+	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE     0x1UL
+	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE      0x2UL
+	#define FUNC_CFG_REQ_FLAGS_RSVD_MASK                      0x1fcUL
+	#define FUNC_CFG_REQ_FLAGS_RSVD_SFT                       2
+	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE        0x200UL
+	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE       0x400UL
+	#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST               0x800UL
+	#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC         0x1000UL
+	#define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST                 0x2000UL
+	#define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST                 0x4000UL
+	#define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST               0x8000UL
+	#define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST         0x10000UL
+	#define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST           0x20000UL
+	#define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST           0x40000UL
+	#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST               0x80000UL
+	#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST             0x100000UL
+	__le32	enables;
+	#define FUNC_CFG_REQ_ENABLES_MTU                     0x1UL
+	#define FUNC_CFG_REQ_ENABLES_MRU                     0x2UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS         0x4UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS          0x8UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS            0x10UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS            0x20UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS             0x40UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_VNICS               0x80UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS           0x100UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR           0x200UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN               0x400UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR            0x800UL
+	#define FUNC_CFG_REQ_ENABLES_MIN_BW                  0x1000UL
+	#define FUNC_CFG_REQ_ENABLES_MAX_BW                  0x2000UL
+	#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR          0x4000UL
+	#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE     0x8000UL
+	#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS       0x10000UL
+	#define FUNC_CFG_REQ_ENABLES_EVB_MODE                0x20000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS       0x40000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS        0x80000UL
+	#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE          0x100000UL
+	__le16	mtu;
+	__le16	mru;
+	__le16	num_rsscos_ctxs;
+	__le16	num_cmpl_rings;
+	__le16	num_tx_rings;
+	__le16	num_rx_rings;
+	__le16	num_l2_ctxs;
+	__le16	num_vnics;
+	__le16	num_stat_ctxs;
+	__le16	num_hw_ring_grps;
+	u8	dflt_mac_addr[6];
+	__le16	dflt_vlan;
+	__be32	dflt_ip_addr[4];
+	__le32	min_bw;
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT              0
+	#define FUNC_CFG_REQ_MIN_BW_SCALE                     0x10000000UL
+	#define FUNC_CFG_REQ_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_CFG_REQ_MIN_BW_SCALE_LAST                 FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	max_bw;
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT              0
+	#define FUNC_CFG_REQ_MAX_BW_SCALE                     0x10000000UL
+	#define FUNC_CFG_REQ_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_CFG_REQ_MAX_BW_SCALE_LAST                 FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+	__le16	async_event_cr;
+	u8	vlan_antispoof_mode;
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK                 0x0UL
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN           0x1UL
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE       0x2UL
 	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
-	u8 allowed_vlan_pris;
-	u8 evb_mode;
-	#define FUNC_CFG_REQ_EVB_MODE_NO_EVB			   0x0UL
-	#define FUNC_CFG_REQ_EVB_MODE_VEB			   0x1UL
-	#define FUNC_CFG_REQ_EVB_MODE_VEPA			   0x2UL
-	u8 unused_2;
-	__le16 num_mcast_filters;
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST                   FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+	u8	allowed_vlan_pris;
+	u8	evb_mode;
+	#define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+	#define FUNC_CFG_REQ_EVB_MODE_VEB    0x1UL
+	#define FUNC_CFG_REQ_EVB_MODE_VEPA   0x2UL
+	#define FUNC_CFG_REQ_EVB_MODE_LAST  FUNC_CFG_REQ_EVB_MODE_VEPA
+	u8	cache_linesize;
+	#define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64  0x0UL
+	#define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+	#define FUNC_CFG_REQ_CACHE_LINESIZE_LAST              FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+	__le16	num_mcast_filters;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* Output (16 bytes) */
-struct hwrm_func_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_qstats */
-/* Input (24 bytes) */
+/* hwrm_func_qstats_input (size:192b/24B) */
 struct hwrm_func_qstats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
 struct hwrm_func_qstats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 tx_ucast_pkts;
-	__le64 tx_mcast_pkts;
-	__le64 tx_bcast_pkts;
-	__le64 tx_discard_pkts;
-	__le64 tx_drop_pkts;
-	__le64 tx_ucast_bytes;
-	__le64 tx_mcast_bytes;
-	__le64 tx_bcast_bytes;
-	__le64 rx_ucast_pkts;
-	__le64 rx_mcast_pkts;
-	__le64 rx_bcast_pkts;
-	__le64 rx_discard_pkts;
-	__le64 rx_drop_pkts;
-	__le64 rx_ucast_bytes;
-	__le64 rx_mcast_bytes;
-	__le64 rx_bcast_bytes;
-	__le64 rx_agg_pkts;
-	__le64 rx_agg_bytes;
-	__le64 rx_agg_events;
-	__le64 rx_agg_aborts;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_clr_stats */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	tx_ucast_pkts;
+	__le64	tx_mcast_pkts;
+	__le64	tx_bcast_pkts;
+	__le64	tx_discard_pkts;
+	__le64	tx_drop_pkts;
+	__le64	tx_ucast_bytes;
+	__le64	tx_mcast_bytes;
+	__le64	tx_bcast_bytes;
+	__le64	rx_ucast_pkts;
+	__le64	rx_mcast_pkts;
+	__le64	rx_bcast_pkts;
+	__le64	rx_discard_pkts;
+	__le64	rx_drop_pkts;
+	__le64	rx_ucast_bytes;
+	__le64	rx_mcast_bytes;
+	__le64	rx_bcast_bytes;
+	__le64	rx_agg_pkts;
+	__le64	rx_agg_bytes;
+	__le64	rx_agg_events;
+	__le64	rx_agg_aborts;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
 struct hwrm_func_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
 struct hwrm_func_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_vf_resc_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
 struct hwrm_func_vf_resc_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 vf_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
 struct hwrm_func_vf_resc_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_vf_vnic_ids_query */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
 struct hwrm_func_vf_vnic_ids_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 vf_id;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 max_vnic_id_cnt;
-	__le64 vnic_id_tbl_addr;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	u8	unused_0[2];
+	__le32	max_vnic_id_cnt;
+	__le64	vnic_id_tbl_addr;
+};
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
 struct hwrm_func_vf_vnic_ids_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 vnic_id_cnt;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_func_drv_rgtr */
-/* Input (80 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	vnic_id_cnt;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:832b/104B) */
 struct hwrm_func_drv_rgtr_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE		    0x1UL
-	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE		    0x2UL
-	__le32 enables;
-	#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE		    0x1UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_VER			    0x2UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP		    0x4UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD		    0x8UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD	    0x10UL
-	__le16 os_type;
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN		   0x0UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER		   0x1UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS		   0xeUL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS		   0x12UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS		   0x1dUL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX		   0x24UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD		   0x2aUL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI			   0x68UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864		   0x73UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2		   0x74UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI			   0x8000UL
-	u8 ver_maj;
-	u8 ver_min;
-	u8 ver_upd;
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 timestamp;
-	__le32 unused_2;
-	__le32 vf_req_fwd[8];
-	__le32 async_event_fwd[8];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE      0x1UL
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE     0x2UL
+	__le32	enables;
+	#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE             0x1UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VER                 0x2UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP           0x4UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD          0x8UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD     0x10UL
+	__le16	os_type;
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN   0x0UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER     0x1UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS     0xeUL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS   0x12UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS   0x1dUL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX     0x24UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD   0x2aUL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI      0x68UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864    0x73UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI      0x8000UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST     FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+	u8	ver_maj;
+	u8	ver_min;
+	u8	ver_upd;
+	u8	unused_0[3];
+	__le32	timestamp;
+	u8	unused_1[4];
+	__le32	vf_req_fwd[8];
+	__le32	async_event_fwd[8];
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
 struct hwrm_func_drv_rgtr_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
 struct hwrm_func_drv_unrgtr_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
 	#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN     0x1UL
-	__le32 unused_0;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
 struct hwrm_func_drv_unrgtr_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_buf_rgtr */
-/* Input (128 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
 struct hwrm_func_buf_rgtr_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID		    0x1UL
-	#define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR		    0x2UL
-	__le16 vf_id;
-	__le16 req_buf_num_pages;
-	__le16 req_buf_page_size;
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B	   0x4UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K		   0xcUL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K		   0xdUL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K	   0x10UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M		   0x15UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M		   0x16UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G		   0x1eUL
-	__le16 req_buf_len;
-	__le16 resp_buf_len;
-	u8 unused_0;
-	u8 unused_1;
-	__le64 req_buf_page_addr0;
-	__le64 req_buf_page_addr1;
-	__le64 req_buf_page_addr2;
-	__le64 req_buf_page_addr3;
-	__le64 req_buf_page_addr4;
-	__le64 req_buf_page_addr5;
-	__le64 req_buf_page_addr6;
-	__le64 req_buf_page_addr7;
-	__le64 req_buf_page_addr8;
-	__le64 req_buf_page_addr9;
-	__le64 error_buf_addr;
-	__le64 resp_buf_addr;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID            0x1UL
+	#define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR     0x2UL
+	__le16	vf_id;
+	__le16	req_buf_num_pages;
+	__le16	req_buf_page_size;
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K  0xcUL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K  0xdUL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M  0x15UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M  0x16UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G  0x1eUL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+	__le16	req_buf_len;
+	__le16	resp_buf_len;
+	u8	unused_0[2];
+	__le64	req_buf_page_addr0;
+	__le64	req_buf_page_addr1;
+	__le64	req_buf_page_addr2;
+	__le64	req_buf_page_addr3;
+	__le64	req_buf_page_addr4;
+	__le64	req_buf_page_addr5;
+	__le64	req_buf_page_addr6;
+	__le64	req_buf_page_addr7;
+	__le64	req_buf_page_addr8;
+	__le64	req_buf_page_addr9;
+	__le64	error_buf_addr;
+	__le64	resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
 struct hwrm_func_buf_rgtr_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_func_drv_qver */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
 struct hwrm_func_drv_qver_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 reserved;
-	__le16 fid;
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	reserved;
+	__le16	fid;
+	u8	unused_0[2];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_drv_qver_output (size:128b/16B) */
 struct hwrm_func_drv_qver_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 os_type;
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN		   0x0UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER		   0x1UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS		   0xeUL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS		   0x12UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS		   0x1dUL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX		   0x24UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD		   0x2aUL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI		   0x68UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864		   0x73UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2		   0x74UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI		   0x8000UL
-	u8 ver_maj;
-	u8 ver_min;
-	u8 ver_upd;
-	u8 unused_0;
-	u8 unused_1;
-	u8 valid;
-};
-
-/* hwrm_port_phy_cfg */
-/* Input (56 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	os_type;
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN   0x0UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER     0x1UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS     0xeUL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS   0x12UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS   0x1dUL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX     0x24UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD   0x2aUL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI      0x68UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864    0x73UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI      0x8000UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_LAST     FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+	u8	ver_maj;
+	u8	ver_min;
+	u8	ver_upd;
+	u8	unused_0[2];
+	u8	valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+struct hwrm_func_resource_qcaps_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	max_vfs;
+	__le16	max_msix;
+	__le16	vf_reservation_strategy;
+	#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+	#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+	#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST   FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+	__le16	min_rsscos_ctx;
+	__le16	max_rsscos_ctx;
+	__le16	min_cmpl_rings;
+	__le16	max_cmpl_rings;
+	__le16	min_tx_rings;
+	__le16	max_tx_rings;
+	__le16	min_rx_rings;
+	__le16	max_rx_rings;
+	__le16	min_l2_ctxs;
+	__le16	max_l2_ctxs;
+	__le16	min_vnics;
+	__le16	max_vnics;
+	__le16	min_stat_ctx;
+	__le16	max_stat_ctx;
+	__le16	min_hw_ring_grps;
+	__le16	max_hw_ring_grps;
+	u8	unused_0;
+	u8	valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	__le16	max_msix;
+	__le16	min_rsscos_ctx;
+	__le16	max_rsscos_ctx;
+	__le16	min_cmpl_rings;
+	__le16	max_cmpl_rings;
+	__le16	min_tx_rings;
+	__le16	max_tx_rings;
+	__le16	min_rx_rings;
+	__le16	max_rx_rings;
+	__le16	min_l2_ctxs;
+	__le16	max_l2_ctxs;
+	__le16	min_vnics;
+	__le16	max_vnics;
+	__le16	min_stat_ctx;
+	__le16	max_stat_ctx;
+	__le16	min_hw_ring_grps;
+	__le16	max_hw_ring_grps;
+	u8	unused_0[4];
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	reserved_rsscos_ctx;
+	__le16	reserved_cmpl_rings;
+	__le16	reserved_tx_rings;
+	__le16	reserved_rx_rings;
+	__le16	reserved_l2_ctxs;
+	__le16	reserved_vnics;
+	__le16	reserved_stat_ctx;
+	__le16	reserved_hw_ring_grps;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
 struct hwrm_port_phy_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY		    0x1UL
-	#define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED		    0x2UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FORCE			    0x4UL
-	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG		    0x8UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE		    0x10UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE		    0x20UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE	    0x40UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE	    0x80UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE	    0x100UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE	    0x200UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE	    0x400UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE	    0x800UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE	    0x1000UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE	    0x2000UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN		    0x4000UL
-	__le32 enables;
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE		    0x1UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX		    0x2UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE		    0x4UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED	    0x8UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK      0x10UL
-	#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED		    0x20UL
-	#define PORT_PHY_CFG_REQ_ENABLES_LPBK			    0x40UL
-	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS		    0x80UL
-	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE		    0x100UL
-	#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK       0x200UL
-	#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER		    0x400UL
-	__le16 port_id;
-	__le16 force_link_speed;
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB	   0x1UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB	   0x19UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB		   0x64UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB		   0xc8UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB		   0xfaUL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB		   0x190UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB		   0x1f4UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB	   0x3e8UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB		   0xffffUL
-	u8 auto_mode;
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE		   0x0UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS		   0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED		   0x2UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW	   0x3UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK		   0x4UL
-	u8 auto_duplex;
-	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF		   0x0UL
-	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL		   0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH		   0x2UL
-	u8 auto_pause;
-	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX			    0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX			    0x2UL
-	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE	    0x4UL
-	u8 unused_0;
-	__le16 auto_link_speed;
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB		   0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB		   0x19UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB		   0x64UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB		   0xc8UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB		   0xfaUL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB		   0x190UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB		   0x1f4UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB		   0x3e8UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB		   0xffffUL
-	__le16 auto_link_speed_mask;
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB	    0x2UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD	    0x4UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB	    0x20UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB	    0x40UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB	    0x80UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB	    0x100UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB	    0x200UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB	    0x400UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB	    0x800UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD       0x1000UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB	    0x2000UL
-	u8 wirespeed;
-	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF			   0x0UL
-	#define PORT_PHY_CFG_REQ_WIRESPEED_ON			   0x1UL
-	u8 lpbk;
-	#define PORT_PHY_CFG_REQ_LPBK_NONE			   0x0UL
-	#define PORT_PHY_CFG_REQ_LPBK_LOCAL			   0x1UL
-	#define PORT_PHY_CFG_REQ_LPBK_REMOTE			   0x2UL
-	u8 force_pause;
-	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX		    0x1UL
-	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX		    0x2UL
-	u8 unused_1;
-	__le32 preemphasis;
-	__le16 eee_link_speed_mask;
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1	    0x1UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB	    0x2UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2	    0x4UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB	    0x8UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3	    0x10UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4	    0x20UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB	    0x40UL
-	u8 unused_2;
-	u8 unused_3;
-	__le32 tx_lpi_timer;
-	__le32 unused_4;
-	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK		    0xffffffUL
-	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT		    0
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY                0x1UL
+	#define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED               0x2UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE                    0x4UL
+	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG          0x8UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE               0x10UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE              0x20UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE        0x40UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE       0x80UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE       0x100UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE      0x200UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE      0x400UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE     0x800UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE      0x1000UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE     0x2000UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN           0x4000UL
+	__le32	enables;
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE                0x1UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX              0x2UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE               0x4UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED          0x8UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK     0x10UL
+	#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED                0x20UL
+	#define PORT_PHY_CFG_REQ_ENABLES_LPBK                     0x40UL
+	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS              0x80UL
+	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE              0x100UL
+	#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK      0x200UL
+	#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER             0x400UL
+	__le16	port_id;
+	__le16	force_link_speed;
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+	u8	auto_mode;
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE         0x0UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS   0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED    0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK   0x4UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_LAST        PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+	u8	auto_duplex;
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+	u8	auto_pause;
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX                0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX                0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE     0x4UL
+	u8	unused_0;
+	__le16	auto_link_speed;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+	__le16	auto_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD     0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB       0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD       0x4UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB         0x8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB         0x10UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB       0x20UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB        0x40UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB        0x80UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB        0x100UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB        0x200UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB        0x400UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB       0x800UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD      0x1000UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB        0x2000UL
+	u8	wirespeed;
+	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+	#define PORT_PHY_CFG_REQ_WIRESPEED_ON  0x1UL
+	#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+	u8	lpbk;
+	#define PORT_PHY_CFG_REQ_LPBK_NONE   0x0UL
+	#define PORT_PHY_CFG_REQ_LPBK_LOCAL  0x1UL
+	#define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+	#define PORT_PHY_CFG_REQ_LPBK_LAST  PORT_PHY_CFG_REQ_LPBK_REMOTE
+	u8	force_pause;
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX     0x1UL
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX     0x2UL
+	u8	unused_1;
+	__le32	preemphasis;
+	__le16	eee_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1     0x1UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB     0x2UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2     0x4UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB       0x8UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3     0x10UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4     0x20UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB      0x40UL
+	u8	unused_2[2];
+	__le32	tx_lpi_timer;
+	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+	__le32	unused_3;
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
 struct hwrm_port_phy_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_port_phy_qcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
 struct hwrm_port_phy_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
 struct hwrm_port_phy_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 link;
-	#define PORT_PHY_QCFG_RESP_LINK_NO_LINK		   0x0UL
-	#define PORT_PHY_QCFG_RESP_LINK_SIGNAL			   0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_LINK			   0x2UL
-	u8 unused_0;
-	__le16 link_speed;
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB		   0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB		   0x19UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB		   0x64UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB		   0xc8UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB		   0xfaUL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB		   0x190UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB		   0x1f4UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB		   0x3e8UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB		   0xffffUL
-	u8 duplex_cfg;
-	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF		   0x0UL
-	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL		   0x1UL
-	u8 pause;
-	#define PORT_PHY_QCFG_RESP_PAUSE_TX			    0x1UL
-	#define PORT_PHY_QCFG_RESP_PAUSE_RX			    0x2UL
-	__le16 support_speeds;
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD	    0x1UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB	    0x2UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD	    0x4UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB		    0x8UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB		    0x10UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB	    0x20UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB		    0x40UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB		    0x80UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB		    0x100UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB		    0x200UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB		    0x400UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB	    0x800UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD	    0x1000UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB		    0x2000UL
-	__le16 force_link_speed;
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB	   0x1UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB	   0xaUL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB	   0x14UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB	   0x19UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB	   0x64UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB	   0xc8UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB	   0xfaUL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB	   0x190UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB	   0x1f4UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB	   0x3e8UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB	   0xffffUL
-	u8 auto_mode;
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE		   0x0UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS	   0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED		   0x2UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW	   0x3UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK	   0x4UL
-	u8 auto_pause;
-	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX		    0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX		    0x2UL
-	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE	    0x4UL
-	__le16 auto_link_speed;
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB	   0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB	   0x19UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB	   0x64UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB	   0xc8UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB	   0xfaUL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB	   0x190UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB	   0x1f4UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB	   0x3e8UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB	   0xffffUL
-	__le16 auto_link_speed_mask;
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD      0x4UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB      0x20UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB       0x40UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB       0x80UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB      0x800UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD     0x1000UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB       0x2000UL
-	u8 wirespeed;
-	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF		   0x0UL
-	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON		   0x1UL
-	u8 lpbk;
-	#define PORT_PHY_QCFG_RESP_LPBK_NONE			   0x0UL
-	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL			   0x1UL
-	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE			   0x2UL
-	u8 force_pause;
-	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX		    0x1UL
-	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX		    0x2UL
-	u8 module_status;
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE		   0x0UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX	   0x1UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG       0x2UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN	   0x3UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED      0x4UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE    0xffUL
-	__le32 preemphasis;
-	u8 phy_maj;
-	u8 phy_min;
-	u8 phy_bld;
-	u8 phy_type;
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN		   0x0UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR		   0x1UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4		   0x2UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR		   0x3UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR		   0x4UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2		   0x5UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX		   0x6UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR		   0x7UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET		   0x8UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE		   0x9UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY	   0xaUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L       0xbUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S       0xcUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N       0xdUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR		   0xeUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4	   0xfUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4	   0x10UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4	   0x11UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4	   0x12UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10	   0x13UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4	   0x14UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4	   0x15UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4	   0x16UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4	   0x17UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE      0x18UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET		   0x19UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX		   0x1aUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX		   0x1bUL
-	u8 media_type;
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN		   0x0UL
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP		   0x1UL
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC		   0x2UL
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE		   0x3UL
-	u8 xcvr_pkg_type;
-	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL    0x1UL
-	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL    0x2UL
-	u8 eee_config_phy_addr;
-	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK		    0x1fUL
-	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT		    0
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED	    0x20UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE	    0x40UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI	    0x80UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK		    0xe0UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT		    5
-	u8 parallel_detect;
-	#define PORT_PHY_QCFG_RESP_PARALLEL_DETECT		    0x1UL
-	#define PORT_PHY_QCFG_RESP_RESERVED_MASK		    0xfeUL
-	#define PORT_PHY_QCFG_RESP_RESERVED_SFT		    1
-	__le16 link_partner_adv_speeds;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD   0x4UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB     0x8UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB     0x10UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB   0x20UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB    0x40UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB    0x80UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB   0x800UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD  0x1000UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB    0x2000UL
-	u8 link_partner_adv_auto_mode;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	link;
+	#define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+	#define PORT_PHY_QCFG_RESP_LINK_SIGNAL  0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_LINK    0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_LAST   PORT_PHY_QCFG_RESP_LINK_LINK
+	u8	unused_0;
+	__le16	link_speed;
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+	u8	duplex_cfg;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+	u8	pause;
+	#define PORT_PHY_QCFG_RESP_PAUSE_TX     0x1UL
+	#define PORT_PHY_QCFG_RESP_PAUSE_RX     0x2UL
+	__le16	support_speeds;
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD     0x1UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB       0x2UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD       0x4UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB         0x8UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB         0x10UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB       0x20UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB        0x40UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB        0x80UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB        0x100UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB        0x200UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB        0x400UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB       0x800UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD      0x1000UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB        0x2000UL
+	__le16	force_link_speed;
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+	u8	auto_mode;
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE         0x0UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS   0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED    0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK   0x4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST        PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+	u8	auto_pause;
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX                0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX                0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE     0x4UL
+	__le16	auto_link_speed;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+	__le16	auto_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD     0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB       0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD       0x4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB         0x8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB         0x10UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB       0x20UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB        0x40UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB        0x80UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB        0x100UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB        0x200UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB        0x400UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB       0x800UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD      0x1000UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB        0x2000UL
+	u8	wirespeed;
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON  0x1UL
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+	u8	lpbk;
+	#define PORT_PHY_QCFG_RESP_LPBK_NONE   0x0UL
+	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL  0x1UL
+	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+	#define PORT_PHY_QCFG_RESP_LPBK_LAST  PORT_PHY_QCFG_RESP_LPBK_REMOTE
+	u8	force_pause;
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX     0x1UL
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX     0x2UL
+	u8	module_status;
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE          0x0UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX     0x1UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG    0x2UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN       0x3UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED   0x4UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST         PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+	__le32	preemphasis;
+	u8	phy_maj;
+	u8	phy_min;
+	u8	phy_bld;
+	u8	phy_type;
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN          0x0UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR           0x1UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4          0x2UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR           0x3UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR           0x4UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2          0x5UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX           0x6UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR           0x7UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET            0x8UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE           0x9UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY      0xaUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L  0xbUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S  0xcUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N  0xdUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR       0xeUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4     0xfUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4     0x10UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4     0x11UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4     0x12UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10    0x13UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4      0x14UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4      0x15UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4      0x16UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4      0x17UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET         0x19UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX        0x1aUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX        0x1bUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST            PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+	u8	media_type;
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP      0x1UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC     0x2UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE   0x3UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST   PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+	u8	xcvr_pkg_type;
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST         PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+	u8	eee_config_phy_addr;
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK              0x1fUL
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT               0
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK            0xe0UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT             5
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED      0x20UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE       0x40UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI       0x80UL
+	u8	parallel_detect;
+	#define PORT_PHY_QCFG_RESP_PARALLEL_DETECT     0x1UL
+	__le16	link_partner_adv_speeds;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD     0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB       0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD       0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB         0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB         0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB       0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB        0x40UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB        0x80UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB        0x100UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB        0x200UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB        0x400UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB       0x800UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD      0x1000UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB        0x2000UL
+	u8	link_partner_adv_auto_mode;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE         0x0UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS   0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED    0x2UL
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
-	u8 link_partner_adv_pause;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
-	__le16 adv_eee_link_speed_mask;
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1   0x1UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB   0x2UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2   0x4UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB     0x8UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3   0x10UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4   0x20UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB    0x40UL
-	__le16 link_partner_adv_eee_link_speed_mask;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
-	__le32 xcvr_identifier_type_tx_lpi_timer;
-	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK		    0xffffffUL
-	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT		    0
-	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK       0xff000000UL
-	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT	    24
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK   0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST        PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+	u8	link_partner_adv_pause;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX     0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX     0x2UL
+	__le16	adv_eee_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1     0x1UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB     0x2UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2     0x4UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB       0x8UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3     0x10UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4     0x20UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB      0x40UL
+	__le16	link_partner_adv_eee_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1     0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB     0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2     0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB       0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3     0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4     0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB      0x40UL
+	__le32	xcvr_identifier_type_tx_lpi_timer;
+	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK            0xffffffUL
+	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT             0
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK    0xff000000UL
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT     24
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN   (0x0UL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP       (0x3UL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP      (0xcUL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS  (0xdUL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28    (0x11UL << 24)
-	__le16 fec_cfg;
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED      0x1UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED   0x2UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED     0x4UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED  0x8UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED    0x10UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED  0x20UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED    0x40UL
-	u8 duplex_state;
-	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF		   0x0UL
-	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL		   0x1UL
-	u8 unused_1;
-	char phy_vendor_name[16];
-	char phy_vendor_partnumber[16];
-	__le32 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
-};
-
-/* hwrm_port_mac_cfg */
-/* Input (40 bytes) */
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST     PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+	__le16	fec_cfg;
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED         0x1UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED      0x2UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED        0x4UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED     0x8UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED       0x10UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED     0x20UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED       0x40UL
+	u8	duplex_state;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+	u8	option_flags;
+	#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT     0x1UL
+	char	phy_vendor_name[16];
+	char	phy_vendor_partnumber[16];
+	u8	unused_2[7];
+	u8	valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
 struct hwrm_port_mac_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK		    0x1UL
-	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE	    0x2UL
-	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
-	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE	    0x8UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE    0x10UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE   0x20UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE    0x40UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE   0x80UL
-	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE		    0x100UL
-	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE		    0x200UL
-	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE	    0x400UL
-	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE      0x800UL
-	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE	    0x1000UL
-	__le32 enables;
-	#define PORT_MAC_CFG_REQ_ENABLES_IPG			    0x1UL
-	#define PORT_MAC_CFG_REQ_ENABLES_LPBK			    0x2UL
-	#define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI      0x4UL
-	#define PORT_MAC_CFG_REQ_ENABLES_RESERVED1		    0x8UL
-	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
-	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI	    0x20UL
-	#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
-	#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
-	#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG		    0x100UL
-	__le16 port_id;
-	u8 ipg;
-	u8 lpbk;
-	#define PORT_MAC_CFG_REQ_LPBK_NONE			   0x0UL
-	#define PORT_MAC_CFG_REQ_LPBK_LOCAL			   0x1UL
-	#define PORT_MAC_CFG_REQ_LPBK_REMOTE			   0x2UL
-	u8 vlan_pri2cos_map_pri;
-	u8 reserved1;
-	u8 tunnel_pri2cos_map_pri;
-	u8 dscp2pri_map_pri;
-	__le16 rx_ts_capture_ptp_msg_type;
-	__le16 tx_ts_capture_ptp_msg_type;
-	u8 cos_field_cfg;
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1		    0x1UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK   0x6UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT    1
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT  3
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK    0xe0UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT     5
-	u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK                    0x1UL
+	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE           0x2UL
+	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE         0x4UL
+	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE            0x8UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE      0x10UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE     0x20UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE      0x40UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE     0x80UL
+	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE                0x100UL
+	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE               0x200UL
+	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE          0x400UL
+	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE        0x800UL
+	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE           0x1000UL
+	__le32	enables;
+	#define PORT_MAC_CFG_REQ_ENABLES_IPG                            0x1UL
+	#define PORT_MAC_CFG_REQ_ENABLES_LPBK                           0x2UL
+	#define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI           0x4UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI         0x10UL
+	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI               0x20UL
+	#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE     0x40UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE     0x80UL
+	#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG                  0x100UL
+	__le16	port_id;
+	u8	ipg;
+	u8	lpbk;
+	#define PORT_MAC_CFG_REQ_LPBK_NONE   0x0UL
+	#define PORT_MAC_CFG_REQ_LPBK_LOCAL  0x1UL
+	#define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+	#define PORT_MAC_CFG_REQ_LPBK_LAST  PORT_MAC_CFG_REQ_LPBK_REMOTE
+	u8	vlan_pri2cos_map_pri;
+	u8	reserved1;
+	u8	tunnel_pri2cos_map_pri;
+	u8	dscp2pri_map_pri;
+	__le16	rx_ts_capture_ptp_msg_type;
+	__le16	tx_ts_capture_ptp_msg_type;
+	u8	cos_field_cfg;
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1                     0x1UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK         0x6UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT          1
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST      (0x0UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER          (0x1UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST      (0x2UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED    (0x3UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST          PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK       0x18UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT        3
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST    (0x0UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER        (0x1UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST    (0x2UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED  (0x3UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST        PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK          0xe0UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT           5
+	u8	unused_0[3];
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
 struct hwrm_port_mac_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 mru;
-	__le16 mtu;
-	u8 ipg;
-	u8 lpbk;
-	#define PORT_MAC_CFG_RESP_LPBK_NONE			   0x0UL
-	#define PORT_MAC_CFG_RESP_LPBK_LOCAL			   0x1UL
-	#define PORT_MAC_CFG_RESP_LPBK_REMOTE			   0x2UL
-	u8 unused_0;
-	u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	mru;
+	__le16	mtu;
+	u8	ipg;
+	u8	lpbk;
+	#define PORT_MAC_CFG_RESP_LPBK_NONE   0x0UL
+	#define PORT_MAC_CFG_RESP_LPBK_LOCAL  0x1UL
+	#define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+	#define PORT_MAC_CFG_RESP_LPBK_LAST  PORT_MAC_CFG_RESP_LPBK_REMOTE
+	u8	unused_0;
+	u8	valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
 struct hwrm_port_mac_ptp_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (80 bytes) */
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
 struct hwrm_port_mac_ptp_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 flags;
-	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS	    0x1UL
-	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS	    0x2UL
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 rx_ts_reg_off_lower;
-	__le32 rx_ts_reg_off_upper;
-	__le32 rx_ts_reg_off_seq_id;
-	__le32 rx_ts_reg_off_src_id_0;
-	__le32 rx_ts_reg_off_src_id_1;
-	__le32 rx_ts_reg_off_src_id_2;
-	__le32 rx_ts_reg_off_domain_id;
-	__le32 rx_ts_reg_off_fifo;
-	__le32 rx_ts_reg_off_fifo_adv;
-	__le32 rx_ts_reg_off_granularity;
-	__le32 tx_ts_reg_off_lower;
-	__le32 tx_ts_reg_off_upper;
-	__le32 tx_ts_reg_off_seq_id;
-	__le32 tx_ts_reg_off_fifo;
-	__le32 tx_ts_reg_off_granularity;
-	__le32 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
-};
-
-/* hwrm_port_qstats */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	flags;
+	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS     0x1UL
+	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS       0x2UL
+	u8	unused_0[3];
+	__le32	rx_ts_reg_off_lower;
+	__le32	rx_ts_reg_off_upper;
+	__le32	rx_ts_reg_off_seq_id;
+	__le32	rx_ts_reg_off_src_id_0;
+	__le32	rx_ts_reg_off_src_id_1;
+	__le32	rx_ts_reg_off_src_id_2;
+	__le32	rx_ts_reg_off_domain_id;
+	__le32	rx_ts_reg_off_fifo;
+	__le32	rx_ts_reg_off_fifo_adv;
+	__le32	rx_ts_reg_off_granularity;
+	__le32	tx_ts_reg_off_lower;
+	__le32	tx_ts_reg_off_upper;
+	__le32	tx_ts_reg_off_seq_id;
+	__le32	tx_ts_reg_off_fifo;
+	__le32	tx_ts_reg_off_granularity;
+	u8	unused_1[7];
+	u8	valid;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
 struct hwrm_port_qstats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2[3];
-	u8 unused_3;
-	__le64 tx_stat_host_addr;
-	__le64 rx_stat_host_addr;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
+	__le64	tx_stat_host_addr;
+	__le64	rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
 struct hwrm_port_qstats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 tx_stat_size;
-	__le16 rx_stat_size;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	tx_stat_size;
+	__le16	rx_stat_size;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
 struct hwrm_port_lpbk_qstats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (96 bytes) */
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
 struct hwrm_port_lpbk_qstats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 lpbk_ucast_frames;
-	__le64 lpbk_mcast_frames;
-	__le64 lpbk_bcast_frames;
-	__le64 lpbk_ucast_bytes;
-	__le64 lpbk_mcast_bytes;
-	__le64 lpbk_bcast_bytes;
-	__le64 tx_stat_discard;
-	__le64 tx_stat_error;
-	__le64 rx_stat_discard;
-	__le64 rx_stat_error;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_port_clr_stats */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	lpbk_ucast_frames;
+	__le64	lpbk_mcast_frames;
+	__le64	lpbk_bcast_frames;
+	__le64	lpbk_ucast_bytes;
+	__le64	lpbk_mcast_bytes;
+	__le64	lpbk_bcast_bytes;
+	__le64	tx_stat_discard;
+	__le64	tx_stat_error;
+	__le64	rx_stat_discard;
+	__le64	rx_stat_error;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
 struct hwrm_port_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
 struct hwrm_port_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
 struct hwrm_port_lpbk_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
 struct hwrm_port_lpbk_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_port_phy_qcaps */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
 struct hwrm_port_phy_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (24 bytes) */
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
 struct hwrm_port_phy_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 flags;
-	#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED	    0x1UL
-	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK		    0xfeUL
-	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT		    1
-	u8 port_cnt;
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN		   0x0UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_1			   0x1UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_2			   0x2UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_3			   0x3UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_4			   0x4UL
-	__le16 supported_speeds_force_mode;
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
-	__le16 supported_speeds_auto_mode;
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
-	__le16 supported_speeds_eee_mode;
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB  0x8UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
-	__le32 tx_lpi_timer_low;
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK	    0xffffffUL
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT	    0
-	#define PORT_PHY_QCAPS_RESP_RSVD2_MASK			    0xff000000UL
-	#define PORT_PHY_QCAPS_RESP_RSVD2_SFT			    24
-	__le32 valid_tx_lpi_timer_high;
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK	    0xffffffUL
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT	    0
-	#define PORT_PHY_QCAPS_RESP_VALID_MASK			    0xff000000UL
-	#define PORT_PHY_QCAPS_RESP_VALID_SFT			    24
-};
-
-/* hwrm_port_phy_i2c_read */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	flags;
+	#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED     0x1UL
+	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK        0xfeUL
+	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT         1
+	u8	port_cnt;
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_1       0x1UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_2       0x2UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_3       0x3UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_4       0x4UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST   PORT_PHY_QCAPS_RESP_PORT_CNT_4
+	__le16	supported_speeds_force_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD     0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB       0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD       0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB         0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB         0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB       0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB        0x40UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB        0x80UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB        0x100UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB        0x200UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB        0x400UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB       0x800UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD      0x1000UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB        0x2000UL
+	__le16	supported_speeds_auto_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD     0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB       0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD       0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB         0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB         0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB       0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB        0x40UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB        0x80UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB        0x100UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB        0x200UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB        0x400UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB       0x800UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD      0x1000UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB        0x2000UL
+	__le16	supported_speeds_eee_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1     0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB     0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2     0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB       0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3     0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4     0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB      0x40UL
+	__le32	tx_lpi_timer_low;
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+	#define PORT_PHY_QCAPS_RESP_RSVD2_MASK           0xff000000UL
+	#define PORT_PHY_QCAPS_RESP_RSVD2_SFT            24
+	__le32	valid_tx_lpi_timer_high;
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+	#define PORT_PHY_QCAPS_RESP_VALID_MASK            0xff000000UL
+	#define PORT_PHY_QCAPS_RESP_VALID_SFT             24
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
 struct hwrm_port_phy_i2c_read_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	__le32 enables;
-	#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET	    0x1UL
-	__le16 port_id;
-	u8 i2c_slave_addr;
-	u8 unused_0;
-	__le16 page_number;
-	__le16 page_offset;
-	u8 data_length;
-	u8 unused_1[7];
-};
-
-/* Output (80 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	__le32	enables;
+	#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET     0x1UL
+	__le16	port_id;
+	u8	i2c_slave_addr;
+	u8	unused_0;
+	__le16	page_number;
+	__le16	page_offset;
+	u8	data_length;
+	u8	unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
 struct hwrm_port_phy_i2c_read_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 data[16];
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_port_led_cfg */
-/* Input (64 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	data[16];
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
 struct hwrm_port_led_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_ID		    0x1UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_STATE		    0x2UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR		    0x4UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON		    0x8UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF	    0x10UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID		    0x20UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_ID		    0x40UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_STATE		    0x80UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR		    0x100UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON		    0x200UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF	    0x400UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID		    0x800UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_ID		    0x1000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_STATE		    0x2000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR		    0x4000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON		    0x8000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF	    0x10000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID		    0x20000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_ID		    0x40000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_STATE		    0x80000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR		    0x100000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON		    0x200000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF	    0x400000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID		    0x800000UL
-	__le16 port_id;
-	u8 num_leds;
-	u8 rsvd;
-	u8 led0_id;
-	u8 led0_state;
-	#define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT		   0x4UL
-	u8 led0_color;
-	#define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED0_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED0_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER		   0x3UL
-	u8 unused_0;
-	__le16 led0_blink_on;
-	__le16 led0_blink_off;
-	u8 led0_group_id;
-	u8 rsvd0;
-	u8 led1_id;
-	u8 led1_state;
-	#define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT		   0x4UL
-	u8 led1_color;
-	#define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED1_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED1_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER		   0x3UL
-	u8 unused_1;
-	__le16 led1_blink_on;
-	__le16 led1_blink_off;
-	u8 led1_group_id;
-	u8 rsvd1;
-	u8 led2_id;
-	u8 led2_state;
-	#define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT		   0x4UL
-	u8 led2_color;
-	#define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED2_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED2_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER		   0x3UL
-	u8 unused_2;
-	__le16 led2_blink_on;
-	__le16 led2_blink_off;
-	u8 led2_group_id;
-	u8 rsvd2;
-	u8 led3_id;
-	u8 led3_state;
-	#define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT		   0x4UL
-	u8 led3_color;
-	#define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED3_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED3_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER		   0x3UL
-	u8 unused_3;
-	__le16 led3_blink_on;
-	__le16 led3_blink_off;
-	u8 led3_group_id;
-	u8 rsvd3;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_ID            0x1UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_STATE         0x2UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR         0x4UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON      0x8UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF     0x10UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID      0x20UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_ID            0x40UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_STATE         0x80UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR         0x100UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON      0x200UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF     0x400UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID      0x800UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_ID            0x1000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_STATE         0x2000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR         0x4000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON      0x8000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF     0x10000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID      0x20000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_ID            0x40000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_STATE         0x80000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR         0x100000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON      0x200000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF     0x400000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID      0x800000UL
+	__le16	port_id;
+	u8	num_leds;
+	u8	rsvd;
+	u8	led0_id;
+	u8	led0_state;
+	#define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_LAST    PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+	u8	led0_color;
+	#define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_LAST      PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+	u8	unused_0;
+	__le16	led0_blink_on;
+	__le16	led0_blink_off;
+	u8	led0_group_id;
+	u8	rsvd0;
+	u8	led1_id;
+	u8	led1_state;
+	#define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_LAST    PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+	u8	led1_color;
+	#define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_LAST      PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+	u8	unused_1;
+	__le16	led1_blink_on;
+	__le16	led1_blink_off;
+	u8	led1_group_id;
+	u8	rsvd1;
+	u8	led2_id;
+	u8	led2_state;
+	#define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_LAST    PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+	u8	led2_color;
+	#define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_LAST      PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+	u8	unused_2;
+	__le16	led2_blink_on;
+	__le16	led2_blink_off;
+	u8	led2_group_id;
+	u8	rsvd2;
+	u8	led3_id;
+	u8	led3_state;
+	#define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_LAST    PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+	u8	led3_color;
+	#define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_LAST      PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+	u8	unused_3;
+	__le16	led3_blink_on;
+	__le16	led3_blink_off;
+	u8	led3_group_id;
+	u8	rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
 struct hwrm_port_led_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_port_led_qcaps */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_leds;
+	u8	led0_id;
+	u8	led0_type;
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_LAST    PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+	u8	led0_state;
+	#define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_LAST    PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+	u8	led0_color;
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_LAST      PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+	u8	unused_0;
+	__le16	led0_blink_on;
+	__le16	led0_blink_off;
+	u8	led0_group_id;
+	u8	led1_id;
+	u8	led1_type;
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_LAST    PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+	u8	led1_state;
+	#define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_LAST    PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+	u8	led1_color;
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_LAST      PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+	u8	unused_1;
+	__le16	led1_blink_on;
+	__le16	led1_blink_off;
+	u8	led1_group_id;
+	u8	led2_id;
+	u8	led2_type;
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_LAST    PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+	u8	led2_state;
+	#define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_LAST    PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+	u8	led2_color;
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_LAST      PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+	u8	unused_2;
+	__le16	led2_blink_on;
+	__le16	led2_blink_off;
+	u8	led2_group_id;
+	u8	led3_id;
+	u8	led3_type;
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_LAST    PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+	u8	led3_state;
+	#define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_LAST    PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+	u8	led3_color;
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_LAST      PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+	u8	unused_3;
+	__le16	led3_blink_on;
+	__le16	led3_blink_off;
+	u8	led3_group_id;
+	u8	unused_4[6];
+	u8	valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
 struct hwrm_port_led_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
 struct hwrm_port_led_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 num_leds;
-	u8 unused_0[3];
-	u8 led0_id;
-	u8 led0_type;
-	#define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID		   0xffUL
-	u8 led0_group_id;
-	u8 unused_1;
-	__le16 led0_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led0_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 led1_id;
-	u8 led1_type;
-	#define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID		   0xffUL
-	u8 led1_group_id;
-	u8 unused_2;
-	__le16 led1_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led1_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 led2_id;
-	u8 led2_type;
-	#define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID		   0xffUL
-	u8 led2_group_id;
-	u8 unused_3;
-	__le16 led2_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led2_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 led3_id;
-	u8 led3_type;
-	#define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID		   0xffUL
-	u8 led3_group_id;
-	u8 unused_4;
-	__le16 led3_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led3_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 unused_5;
-	u8 unused_6;
-	u8 unused_7;
-	u8 valid;
-};
-
-/* hwrm_queue_qportcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_leds;
+	u8	unused[3];
+	u8	led0_id;
+	u8	led0_type;
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST    PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+	u8	led0_group_id;
+	u8	unused_0;
+	__le16	led0_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led0_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	led1_id;
+	u8	led1_type;
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST    PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+	u8	led1_group_id;
+	u8	unused_1;
+	__le16	led1_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led1_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	led2_id;
+	u8	led2_type;
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST    PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+	u8	led2_group_id;
+	u8	unused_2;
+	__le16	led2_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led2_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	led3_id;
+	u8	led3_type;
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST    PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+	u8	led3_group_id;
+	u8	unused_3;
+	__le16	led3_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led3_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	unused_4[3];
+	u8	valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
 struct hwrm_queue_qportcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH			    0x1UL
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX		   0x0UL
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX		   0x1UL
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST    QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
-	__le16 port_id;
-	__le16 unused_0;
-};
-
-/* Output (32 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH     0x1UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX    0x0UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX    0x1UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+	__le16	port_id;
+	u8	unused_0[2];
+};
+
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
 struct hwrm_queue_qportcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 max_configurable_queues;
-	u8 max_configurable_lossless_queues;
-	u8 queue_cfg_allowed;
-	u8 queue_cfg_info;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG	    0x1UL
-	u8 queue_pfcenable_cfg_allowed;
-	u8 queue_pri2cos_cfg_allowed;
-	u8 queue_cos2bw_cfg_allowed;
-	u8 queue_id0;
-	u8 queue_id0_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	max_configurable_queues;
+	u8	max_configurable_lossless_queues;
+	u8	queue_cfg_allowed;
+	u8	queue_cfg_info;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG     0x1UL
+	u8	queue_pfcenable_cfg_allowed;
+	u8	queue_pri2cos_cfg_allowed;
+	u8	queue_cos2bw_cfg_allowed;
+	u8	queue_id0;
+	u8	queue_id0_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id1;
-	u8 queue_id1_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id1;
+	u8	queue_id1_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id2;
-	u8 queue_id2_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id2;
+	u8	queue_id2_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id3;
-	u8 queue_id3_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id3;
+	u8	queue_id3_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id4;
-	u8 queue_id4_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id4;
+	u8	queue_id4_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id5;
-	u8 queue_id5_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id5;
+	u8	queue_id5_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id6;
-	u8 queue_id6_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id6;
+	u8	queue_id6_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id7;
-	u8 queue_id7_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id7;
+	u8	queue_id7_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 valid;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+	u8	valid;
 };
 
-/* hwrm_queue_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_cfg_input (size:320b/40B) */
 struct hwrm_queue_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_CFG_REQ_FLAGS_PATH_MASK			    0x3UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_SFT			    0
-	#define QUEUE_CFG_REQ_FLAGS_PATH_TX			   0x0UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_RX			   0x1UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR			   0x2UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_LAST    QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
-	__le32 enables;
-	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN			    0x1UL
-	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE		    0x2UL
-	__le32 queue_id;
-	__le32 dflt_len;
-	u8 service_profile;
-	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY		   0x0UL
-	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS		   0x1UL
-	#define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN		   0xffUL
-	u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_SFT  0
+	#define QUEUE_CFG_REQ_FLAGS_PATH_TX     0x0UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_RX     0x1UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR  0x2UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_LAST  QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+	__le32	enables;
+	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN            0x1UL
+	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE     0x2UL
+	__le32	queue_id;
+	__le32	dflt_len;
+	u8	service_profile;
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY    0x0UL
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST    QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+	u8	unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
 struct hwrm_queue_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
 struct hwrm_queue_pfcenable_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
 struct hwrm_queue_pfcenable_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 flags;
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED   0x1UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED   0x2UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED   0x4UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED   0x8UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED   0x10UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED   0x20UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED   0x40UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED   0x80UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	flags;
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED     0x1UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED     0x2UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED     0x4UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED     0x8UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED     0x10UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED     0x20UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED     0x40UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED     0x80UL
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
 struct hwrm_queue_pfcenable_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED     0x1UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED     0x2UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED     0x4UL
@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED     0x20UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED     0x40UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED     0x80UL
-	__le16 port_id;
-	__le16 unused_0;
+	__le16	port_id;
+	u8	unused_0[2];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
 struct hwrm_queue_pfcenable_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
 struct hwrm_queue_pri2cos_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH		    0x1UL
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN		    0x2UL
-	u8 port_id;
-	u8 unused_0[3];
-};
-
-/* Output (24 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH      0x1UL
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX     0x0UL
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX     0x1UL
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST  QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN     0x2UL
+	u8	port_id;
+	u8	unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
 struct hwrm_queue_pri2cos_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 pri0_cos_queue_id;
-	u8 pri1_cos_queue_id;
-	u8 pri2_cos_queue_id;
-	u8 pri3_cos_queue_id;
-	u8 pri4_cos_queue_id;
-	u8 pri5_cos_queue_id;
-	u8 pri6_cos_queue_id;
-	u8 pri7_cos_queue_id;
-	u8 queue_cfg_info;
-	#define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG    0x1UL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	pri0_cos_queue_id;
+	u8	pri1_cos_queue_id;
+	u8	pri2_cos_queue_id;
+	u8	pri3_cos_queue_id;
+	u8	pri4_cos_queue_id;
+	u8	pri5_cos_queue_id;
+	u8	pri6_cos_queue_id;
+	u8	pri7_cos_queue_id;
+	u8	queue_cfg_info;
+	#define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG     0x1UL
+	u8	unused_0[6];
+	u8	valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
 struct hwrm_queue_pri2cos_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK		    0x3UL
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT		    0
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR		   (0x2UL << 0)
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN		    0x4UL
-	__le32 enables;
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID    0x1UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID    0x2UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID    0x4UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID    0x8UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID    0x10UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID    0x20UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID    0x40UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID    0x80UL
-	u8 port_id;
-	u8 pri0_cos_queue_id;
-	u8 pri1_cos_queue_id;
-	u8 pri2_cos_queue_id;
-	u8 pri3_cos_queue_id;
-	u8 pri4_cos_queue_id;
-	u8 pri5_cos_queue_id;
-	u8 pri6_cos_queue_id;
-	u8 pri7_cos_queue_id;
-	u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT  0
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX     0x0UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX     0x1UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR  0x2UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST  QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN     0x4UL
+	__le32	enables;
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID     0x1UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID     0x2UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID     0x4UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID     0x8UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID     0x10UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID     0x20UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID     0x40UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID     0x80UL
+	u8	port_id;
+	u8	pri0_cos_queue_id;
+	u8	pri1_cos_queue_id;
+	u8	pri2_cos_queue_id;
+	u8	pri3_cos_queue_id;
+	u8	pri4_cos_queue_id;
+	u8	pri5_cos_queue_id;
+	u8	pri6_cos_queue_id;
+	u8	pri7_cos_queue_id;
+	u8	unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
 struct hwrm_queue_pri2cos_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
 struct hwrm_queue_cos2bw_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (112 bytes) */
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
 struct hwrm_queue_cos2bw_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 queue_id0;
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 queue_id0_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id0_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id0_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS   0x1UL
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	queue_id0;
+	u8	unused_0;
+	__le16	unused_1;
+	__le32	queue_id0_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id0_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id0_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id0_pri_lvl;
-	u8 queue_id0_bw_weight;
-	u8 queue_id1;
-	__le32 queue_id1_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id1_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id1_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id0_pri_lvl;
+	u8	queue_id0_bw_weight;
+	u8	queue_id1;
+	__le32	queue_id1_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id1_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id1_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id1_pri_lvl;
-	u8 queue_id1_bw_weight;
-	u8 queue_id2;
-	__le32 queue_id2_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id2_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id2_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id1_pri_lvl;
+	u8	queue_id1_bw_weight;
+	u8	queue_id2;
+	__le32	queue_id2_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id2_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id2_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id2_pri_lvl;
-	u8 queue_id2_bw_weight;
-	u8 queue_id3;
-	__le32 queue_id3_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id3_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id3_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id2_pri_lvl;
+	u8	queue_id2_bw_weight;
+	u8	queue_id3;
+	__le32	queue_id3_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id3_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id3_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id3_pri_lvl;
-	u8 queue_id3_bw_weight;
-	u8 queue_id4;
-	__le32 queue_id4_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id4_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id4_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id3_pri_lvl;
+	u8	queue_id3_bw_weight;
+	u8	queue_id4;
+	__le32	queue_id4_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id4_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id4_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id4_pri_lvl;
-	u8 queue_id4_bw_weight;
-	u8 queue_id5;
-	__le32 queue_id5_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id5_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id5_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id4_pri_lvl;
+	u8	queue_id4_bw_weight;
+	u8	queue_id5;
+	__le32	queue_id5_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id5_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id5_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id5_pri_lvl;
-	u8 queue_id5_bw_weight;
-	u8 queue_id6;
-	__le32 queue_id6_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id6_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id6_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id5_pri_lvl;
+	u8	queue_id5_bw_weight;
+	u8	queue_id6;
+	__le32	queue_id6_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id6_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id6_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id6_pri_lvl;
-	u8 queue_id6_bw_weight;
-	u8 queue_id7;
-	__le32 queue_id7_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id7_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id7_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id6_pri_lvl;
+	u8	queue_id6_bw_weight;
+	u8	queue_id7;
+	__le32	queue_id7_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id7_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id7_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id7_pri_lvl;
-	u8 queue_id7_bw_weight;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg */
-/* Input (128 bytes) */
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id7_pri_lvl;
+	u8	queue_id7_bw_weight;
+	u8	unused_2[4];
+	u8	valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
 struct hwrm_queue_cos2bw_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	__le32 enables;
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID   0x1UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID   0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID   0x4UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID   0x8UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID   0x10UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID   0x20UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID   0x40UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID   0x80UL
-	__le16 port_id;
-	u8 queue_id0;
-	u8 unused_0;
-	__le32 queue_id0_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id0_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id0_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     0x1UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	__le32	enables;
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID     0x2UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID     0x4UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID     0x8UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID     0x10UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID     0x20UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID     0x40UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID     0x80UL
+	__le16	port_id;
+	u8	queue_id0;
+	u8	unused_0;
+	__le32	queue_id0_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id0_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id0_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id0_pri_lvl;
-	u8 queue_id0_bw_weight;
-	u8 queue_id1;
-	__le32 queue_id1_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id1_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id1_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id0_pri_lvl;
+	u8	queue_id0_bw_weight;
+	u8	queue_id1;
+	__le32	queue_id1_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id1_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id1_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id1_pri_lvl;
-	u8 queue_id1_bw_weight;
-	u8 queue_id2;
-	__le32 queue_id2_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id2_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id2_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id1_pri_lvl;
+	u8	queue_id1_bw_weight;
+	u8	queue_id2;
+	__le32	queue_id2_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id2_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id2_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id2_pri_lvl;
-	u8 queue_id2_bw_weight;
-	u8 queue_id3;
-	__le32 queue_id3_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id3_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id3_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id2_pri_lvl;
+	u8	queue_id2_bw_weight;
+	u8	queue_id3;
+	__le32	queue_id3_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id3_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id3_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id3_pri_lvl;
-	u8 queue_id3_bw_weight;
-	u8 queue_id4;
-	__le32 queue_id4_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id4_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id4_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id3_pri_lvl;
+	u8	queue_id3_bw_weight;
+	u8	queue_id4;
+	__le32	queue_id4_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id4_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id4_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id4_pri_lvl;
-	u8 queue_id4_bw_weight;
-	u8 queue_id5;
-	__le32 queue_id5_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id5_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id5_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id4_pri_lvl;
+	u8	queue_id4_bw_weight;
+	u8	queue_id5;
+	__le32	queue_id5_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id5_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id5_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id5_pri_lvl;
-	u8 queue_id5_bw_weight;
-	u8 queue_id6;
-	__le32 queue_id6_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id6_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id6_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id5_pri_lvl;
+	u8	queue_id5_bw_weight;
+	u8	queue_id6;
+	__le32	queue_id6_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id6_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id6_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id6_pri_lvl;
-	u8 queue_id6_bw_weight;
-	u8 queue_id7;
-	__le32 queue_id7_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id7_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id7_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id6_pri_lvl;
+	u8	queue_id6_bw_weight;
+	u8	queue_id7;
+	__le32	queue_id7_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id7_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id7_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id7_pri_lvl;
-	u8 queue_id7_bw_weight;
-	u8 unused_1[5];
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id7_pri_lvl;
+	u8	queue_id7_bw_weight;
+	u8	unused_1[5];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
 struct hwrm_queue_cos2bw_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
 struct hwrm_queue_dscp_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 port_id;
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	port_id;
+	u8	unused_0[7];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
 struct hwrm_queue_dscp_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 num_dscp_bits;
-	u8 unused_0;
-	__le16 max_entries;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_dscp_bits;
+	u8	unused_0;
+	__le16	max_entries;
+	u8	unused_1[3];
+	u8	valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
 struct hwrm_queue_dscp2pri_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 dest_data_addr;
-	u8 port_id;
-	u8 unused_0;
-	__le16 dest_data_buffer_size;
-	__le32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	dest_data_addr;
+	u8	port_id;
+	u8	unused_0;
+	__le16	dest_data_buffer_size;
+	u8	unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
 struct hwrm_queue_dscp2pri_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 entry_cnt;
-	u8 default_pri;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	entry_cnt;
+	u8	default_pri;
+	u8	unused_0[4];
+	u8	valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
 struct hwrm_queue_dscp2pri_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 src_data_addr;
-	__le32 flags;
-	#define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI    0x1UL
-	__le32 enables;
-	#define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI	    0x1UL
-	u8 port_id;
-	u8 default_pri;
-	__le16 entry_cnt;
-	__le32 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	src_data_addr;
+	__le32	flags;
+	#define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI     0x1UL
+	__le32	enables;
+	#define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI     0x1UL
+	u8	port_id;
+	u8	default_pri;
+	__le16	entry_cnt;
+	u8	unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
 struct hwrm_queue_dscp2pri_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_vnic_alloc */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
 struct hwrm_vnic_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_ALLOC_REQ_FLAGS_DEFAULT			    0x1UL
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_ALLOC_REQ_FLAGS_DEFAULT     0x1UL
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_alloc_output (size:128b/16B) */
 struct hwrm_vnic_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 vnic_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_vnic_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	vnic_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
 struct hwrm_vnic_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 vnic_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	vnic_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_free_output (size:128b/16B) */
 struct hwrm_vnic_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_vnic_cfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
 struct hwrm_vnic_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_CFG_REQ_FLAGS_DEFAULT			    0x1UL
-	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE		    0x2UL
-	#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE		    0x4UL
-	#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE		    0x8UL
-	#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE		    0x10UL
-	#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE		    0x20UL
-	#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
-	__le32 enables;
-	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP		    0x1UL
-	#define VNIC_CFG_REQ_ENABLES_RSS_RULE			    0x2UL
-	#define VNIC_CFG_REQ_ENABLES_COS_RULE			    0x4UL
-	#define VNIC_CFG_REQ_ENABLES_LB_RULE			    0x8UL
-	#define VNIC_CFG_REQ_ENABLES_MRU			    0x10UL
-	__le16 vnic_id;
-	__le16 dflt_ring_grp;
-	__le16 rss_rule;
-	__le16 cos_rule;
-	__le16 lb_rule;
-	__le16 mru;
-	__le32 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_CFG_REQ_FLAGS_DEFAULT                              0x1UL
+	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE                      0x2UL
+	#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE                        0x4UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE                  0x8UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE                  0x10UL
+	#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE                     0x20UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE     0x40UL
+	__le32	enables;
+	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP     0x1UL
+	#define VNIC_CFG_REQ_ENABLES_RSS_RULE          0x2UL
+	#define VNIC_CFG_REQ_ENABLES_COS_RULE          0x4UL
+	#define VNIC_CFG_REQ_ENABLES_LB_RULE           0x8UL
+	#define VNIC_CFG_REQ_ENABLES_MRU               0x10UL
+	__le16	vnic_id;
+	__le16	dflt_ring_grp;
+	__le16	rss_rule;
+	__le16	cos_rule;
+	__le16	lb_rule;
+	__le16	mru;
+	u8	unused_0[4];
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
 struct hwrm_vnic_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_vnic_qcaps */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
 struct hwrm_vnic_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	u8	unused_0[4];
 };
 
-/* Output (24 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
 struct hwrm_vnic_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 mru;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 flags;
-	#define VNIC_QCAPS_RESP_FLAGS_UNUSED			    0x1UL
-	#define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP		    0x2UL
-	#define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP		    0x4UL
-	#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP	    0x8UL
-	#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP	    0x10UL
-	#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP		    0x20UL
-	#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
-	__le32 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	mru;
+	u8	unused_0[2];
+	__le32	flags;
+	#define VNIC_QCAPS_RESP_FLAGS_UNUSED                              0x1UL
+	#define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP                      0x2UL
+	#define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP                        0x4UL
+	#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP                  0x8UL
+	#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP                  0x10UL
+	#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP                     0x20UL
+	#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP     0x40UL
+	u8	unused_1[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
 struct hwrm_vnic_tpa_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_TPA_CFG_REQ_FLAGS_TPA			    0x1UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA		    0x2UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE		    0x4UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_GRO			    0x8UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN		    0x10UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ       0x20UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK		    0x40UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK		    0x80UL
-	__le32 enables;
-	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS		    0x1UL
-	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS		    0x2UL
-	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER		    0x4UL
-	#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN		    0x8UL
-	__le16 vnic_id;
-	__le16 max_agg_segs;
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1		   0x0UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2		   0x1UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4		   0x2UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8		   0x3UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX		   0x1fUL
-	__le16 max_aggs;
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_1			   0x0UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_2			   0x1UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_4			   0x2UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_8			   0x3UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_16			   0x4UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX			   0x7UL
-	u8 unused_0;
-	u8 unused_1;
-	__le32 max_agg_timer;
-	__le32 min_agg_len;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_TPA_CFG_REQ_FLAGS_TPA                       0x1UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA                 0x2UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE            0x4UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO                       0x8UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN              0x10UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ     0x20UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK            0x40UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK             0x80UL
+	__le32	enables;
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS      0x1UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS          0x2UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER     0x4UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN       0x8UL
+	__le16	vnic_id;
+	__le16	max_agg_segs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1   0x0UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2   0x1UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4   0x2UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8   0x3UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+	__le16	max_aggs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_1   0x0UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_2   0x1UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_4   0x2UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_8   0x3UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_16  0x4UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+	u8	unused_0[2];
+	__le32	max_agg_timer;
+	__le32	min_agg_len;
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
 struct hwrm_vnic_tpa_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg */
-/* Input (48 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vnic_id;
+	u8	unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	flags;
+	#define VNIC_TPA_QCFG_RESP_FLAGS_TPA                       0x1UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA                 0x2UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE            0x4UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_GRO                       0x8UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN              0x10UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ     0x20UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK            0x40UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK             0x80UL
+	__le16	max_agg_segs;
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1   0x0UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2   0x1UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4   0x2UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8   0x3UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+	__le16	max_aggs;
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_1   0x0UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_2   0x1UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_4   0x2UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_8   0x3UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_16  0x4UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+	__le32	max_agg_timer;
+	__le32	min_agg_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
 struct hwrm_vnic_rss_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 hash_type;
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4		    0x1UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4		    0x2UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4		    0x4UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6		    0x8UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6		    0x10UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6		    0x20UL
-	__le32 unused_0;
-	__le64 ring_grp_tbl_addr;
-	__le64 hash_key_tbl_addr;
-	__le16 rss_ctx_idx;
-	__le16 unused_1[3];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	hash_type;
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4         0x1UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4     0x2UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4     0x4UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6         0x8UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6     0x10UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6     0x20UL
+	u8	unused_0[4];
+	__le64	ring_grp_tbl_addr;
+	__le64	hash_key_tbl_addr;
+	__le16	rss_ctx_idx;
+	u8	unused_1[6];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
 struct hwrm_vnic_rss_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
 struct hwrm_vnic_plcmodes_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT      0x1UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT	    0x2UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4		    0x4UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6		    0x8UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE		    0x10UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE		    0x20UL
-	__le32 enables;
-	#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID   0x1UL
-	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID     0x2UL
-	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID  0x4UL
-	__le32 vnic_id;
-	__le16 jumbo_thresh;
-	__le16 hds_offset;
-	__le16 hds_threshold;
-	__le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT     0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT       0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4              0x4UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6              0x8UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE              0x10UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE              0x20UL
+	__le32	enables;
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID      0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID        0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID     0x4UL
+	__le32	vnic_id;
+	__le16	jumbo_thresh;
+	__le16	hds_offset;
+	__le16	hds_threshold;
+	u8	unused_0[6];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
 struct hwrm_vnic_plcmodes_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 rss_cos_lb_ctx_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	rss_cos_lb_ctx_id;
+	u8	unused_0[5];
+	u8	valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
 struct hwrm_vnic_rss_cos_lb_ctx_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 rss_cos_lb_ctx_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	rss_cos_lb_ctx_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
 struct hwrm_vnic_rss_cos_lb_ctx_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_ring_alloc */
-/* Input (80 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
 struct hwrm_ring_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define RING_ALLOC_REQ_ENABLES_RESERVED1		    0x1UL
-	#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG		    0x2UL
-	#define RING_ALLOC_REQ_ENABLES_RESERVED3		    0x4UL
-	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID	    0x8UL
-	#define RING_ALLOC_REQ_ENABLES_RESERVED4		    0x10UL
-	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID		    0x20UL
-	u8 ring_type;
-	#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL		   0x0UL
-	#define RING_ALLOC_REQ_RING_TYPE_TX			   0x1UL
-	#define RING_ALLOC_REQ_RING_TYPE_RX			   0x2UL
-	#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL		   0x3UL
-	u8 unused_0;
-	__le16 unused_1;
-	__le64 page_tbl_addr;
-	__le32 fbo;
-	u8 page_size;
-	u8 page_tbl_depth;
-	u8 unused_2;
-	u8 unused_3;
-	__le32 length;
-	__le16 logical_id;
-	__le16 cmpl_ring_id;
-	__le16 queue_id;
-	u8 unused_4;
-	u8 unused_5;
-	__le32 reserved1;
-	__le16 ring_arb_cfg;
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK	    0xfUL
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT	    0
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP	   (0x1UL << 0)
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ	   (0x2UL << 0)
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST    RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
-	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK		    0xf0UL
-	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT		    4
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK  0xff00UL
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT   8
-	u8 unused_6;
-	u8 unused_7;
-	__le32 reserved3;
-	__le32 stat_ctx_id;
-	__le32 reserved4;
-	__le32 max_bw;
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT		    0
-	#define RING_ALLOC_REQ_MAX_BW_SCALE			    0x10000000UL
-	#define RING_ALLOC_REQ_MAX_BW_SCALE_BITS		   (0x0UL << 28)
-	#define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define RING_ALLOC_REQ_MAX_BW_SCALE_LAST    RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK	    0xe0000000UL
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT	    29
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA	   (0x0UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO	   (0x2UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE	   (0x4UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA	   (0x6UL << 29)
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG          0x2UL
+	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID     0x8UL
+	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID          0x20UL
+	u8	ring_type;
+	#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL   0x0UL
+	#define RING_ALLOC_REQ_RING_TYPE_TX        0x1UL
+	#define RING_ALLOC_REQ_RING_TYPE_RX        0x2UL
+	#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+	#define RING_ALLOC_REQ_RING_TYPE_LAST     RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+	u8	unused_0[3];
+	__le64	page_tbl_addr;
+	__le32	fbo;
+	u8	page_size;
+	u8	page_tbl_depth;
+	u8	unused_1[2];
+	__le32	length;
+	__le16	logical_id;
+	__le16	cmpl_ring_id;
+	__le16	queue_id;
+	u8	unused_2[2];
+	__le32	reserved1;
+	__le16	ring_arb_cfg;
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK      0xfUL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT       0
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP          0x1UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ         0x2UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST       RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK            0xf0UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT             4
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+	__le16	unused_3;
+	__le32	reserved3;
+	__le32	stat_ctx_id;
+	__le32	reserved4;
+	__le32	max_bw;
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT              0
+	#define RING_ALLOC_REQ_MAX_BW_SCALE                     0x10000000UL
+	#define RING_ALLOC_REQ_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define RING_ALLOC_REQ_MAX_BW_SCALE_LAST                 RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
 	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
 	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST    RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 int_mode;
-	#define RING_ALLOC_REQ_INT_MODE_LEGACY			   0x0UL
-	#define RING_ALLOC_REQ_INT_MODE_RSVD			   0x1UL
-	#define RING_ALLOC_REQ_INT_MODE_MSIX			   0x2UL
-	#define RING_ALLOC_REQ_INT_MODE_POLL			   0x3UL
-	u8 unused_8[3];
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST         RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	int_mode;
+	#define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+	#define RING_ALLOC_REQ_INT_MODE_RSVD   0x1UL
+	#define RING_ALLOC_REQ_INT_MODE_MSIX   0x2UL
+	#define RING_ALLOC_REQ_INT_MODE_POLL   0x3UL
+	#define RING_ALLOC_REQ_INT_MODE_LAST  RING_ALLOC_REQ_INT_MODE_POLL
+	u8	unused_4[3];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
 struct hwrm_ring_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 ring_id;
-	__le16 logical_ring_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_ring_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	ring_id;
+	__le16	logical_ring_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_ring_free_input (size:192b/24B) */
 struct hwrm_ring_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 ring_type;
-	#define RING_FREE_REQ_RING_TYPE_L2_CMPL		   0x0UL
-	#define RING_FREE_REQ_RING_TYPE_TX			   0x1UL
-	#define RING_FREE_REQ_RING_TYPE_RX			   0x2UL
-	#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL		   0x3UL
-	u8 unused_0;
-	__le16 ring_id;
-	__le32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	ring_type;
+	#define RING_FREE_REQ_RING_TYPE_L2_CMPL   0x0UL
+	#define RING_FREE_REQ_RING_TYPE_TX        0x1UL
+	#define RING_FREE_REQ_RING_TYPE_RX        0x2UL
+	#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+	#define RING_FREE_REQ_RING_TYPE_LAST     RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+	u8	unused_0;
+	__le16	ring_id;
+	u8	unused_1[4];
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
 struct hwrm_ring_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
 struct hwrm_ring_cmpl_ring_qaggint_params_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 ring_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	ring_id;
+	u8	unused_0[6];
 };
 
-/* Output (32 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
 struct hwrm_ring_cmpl_ring_qaggint_params_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 flags;
-	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
-	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
-	__le16 num_cmpl_dma_aggr;
-	__le16 num_cmpl_dma_aggr_during_int;
-	__le16 cmpl_aggr_dma_tmr;
-	__le16 cmpl_aggr_dma_tmr_during_int;
-	__le16 int_lat_tmr_min;
-	__le16 int_lat_tmr_max;
-	__le16 num_cmpl_aggr_int;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	flags;
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET     0x1UL
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE       0x2UL
+	__le16	num_cmpl_dma_aggr;
+	__le16	num_cmpl_dma_aggr_during_int;
+	__le16	cmpl_aggr_dma_tmr;
+	__le16	cmpl_aggr_dma_tmr_during_int;
+	__le16	int_lat_tmr_min;
+	__le16	int_lat_tmr_max;
+	__le16	num_cmpl_aggr_int;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 ring_id;
-	__le16 flags;
-	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
-	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
-	__le16 num_cmpl_dma_aggr;
-	__le16 num_cmpl_dma_aggr_during_int;
-	__le16 cmpl_aggr_dma_tmr;
-	__le16 cmpl_aggr_dma_tmr_during_int;
-	__le16 int_lat_tmr_min;
-	__le16 int_lat_tmr_max;
-	__le16 num_cmpl_aggr_int;
-	__le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	ring_id;
+	__le16	flags;
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET     0x1UL
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE       0x2UL
+	__le16	num_cmpl_dma_aggr;
+	__le16	num_cmpl_dma_aggr_during_int;
+	__le16	cmpl_aggr_dma_tmr;
+	__le16	cmpl_aggr_dma_tmr_during_int;
+	__le16	int_lat_tmr_min;
+	__le16	int_lat_tmr_max;
+	__le16	num_cmpl_aggr_int;
+	u8	unused_0[6];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_ring_reset */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
 struct hwrm_ring_reset_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 ring_type;
-	#define RING_RESET_REQ_RING_TYPE_L2_CMPL		   0x0UL
-	#define RING_RESET_REQ_RING_TYPE_TX			   0x1UL
-	#define RING_RESET_REQ_RING_TYPE_RX			   0x2UL
-	#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL		   0x3UL
-	u8 unused_0;
-	__le16 ring_id;
-	__le32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	ring_type;
+	#define RING_RESET_REQ_RING_TYPE_L2_CMPL   0x0UL
+	#define RING_RESET_REQ_RING_TYPE_TX        0x1UL
+	#define RING_RESET_REQ_RING_TYPE_RX        0x2UL
+	#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+	#define RING_RESET_REQ_RING_TYPE_LAST     RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+	u8	unused_0;
+	__le16	ring_id;
+	u8	unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
 struct hwrm_ring_reset_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_ring_grp_alloc */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
 struct hwrm_ring_grp_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 cr;
-	__le16 rr;
-	__le16 ar;
-	__le16 sc;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	cr;
+	__le16	rr;
+	__le16	ar;
+	__le16	sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
 struct hwrm_ring_grp_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 ring_group_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_ring_grp_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	ring_group_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
 struct hwrm_ring_grp_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 ring_group_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	ring_group_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_grp_free_output (size:128b/16B) */
 struct hwrm_ring_grp_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_alloc */
-/* Input (96 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
 struct hwrm_cfa_l2_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK		    0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP		    0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST	    0x8UL
-	__le32 enables;
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK       0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN	    0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK      0x8UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN	    0x10UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK      0x20UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR	    0x40UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK     0x80UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN	    0x100UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK    0x200UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN	    0x400UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK    0x800UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE	    0x1000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID		    0x2000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE	    0x4000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID		    0x8000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
-	u8 l2_addr[6];
-	u8 unused_0;
-	u8 unused_1;
-	u8 l2_addr_mask[6];
-	__le16 l2_ovlan;
-	__le16 l2_ovlan_mask;
-	__le16 l2_ivlan;
-	__le16 l2_ivlan_mask;
-	u8 unused_2;
-	u8 unused_3;
-	u8 t_l2_addr[6];
-	u8 unused_4;
-	u8 unused_5;
-	u8 t_l2_addr_mask[6];
-	__le16 t_l2_ovlan;
-	__le16 t_l2_ovlan_mask;
-	__le16 t_l2_ivlan;
-	__le16 t_l2_ivlan_mask;
-	u8 src_type;
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT		   0x0UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF		   0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF		   0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC		   0x3UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG		   0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE		   0x5UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO		   0x6UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG		   0x7UL
-	u8 unused_6;
-	__le32 src_id;
-	u8 tunnel_type;
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     0x0UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN	   0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE	   0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE	   0x3UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP	   0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE	   0x5UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS	   0x6UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT	   0x7UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE	   0x8UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4      0x9UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     0xffUL
-	u8 unused_7;
-	__le16 dst_id;
-	__le16 mirror_vnic_id;
-	u8 pri_hint;
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER	   0x0UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX		   0x3UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN		   0x4UL
-	u8 unused_8;
-	__le32 unused_9;
-	__le64 l2_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH          0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX         0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX         0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST      CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK      0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP          0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST     0x8UL
+	__le32	enables;
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR             0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK        0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN            0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK       0x8UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN            0x10UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK       0x20UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR           0x40UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK      0x80UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN          0x100UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK     0x200UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN          0x400UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK     0x800UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE            0x1000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID              0x2000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE         0x4000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID              0x8000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID      0x10000UL
+	u8	l2_addr[6];
+	u8	unused_0[2];
+	u8	l2_addr_mask[6];
+	__le16	l2_ovlan;
+	__le16	l2_ovlan_mask;
+	__le16	l2_ivlan;
+	__le16	l2_ivlan_mask;
+	u8	unused_1[2];
+	u8	t_l2_addr[6];
+	u8	unused_2[2];
+	u8	t_l2_addr_mask[6];
+	__le16	t_l2_ovlan;
+	__le16	t_l2_ovlan_mask;
+	__le16	t_l2_ivlan;
+	__le16	t_l2_ivlan_mask;
+	u8	src_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF    0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF    0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC  0x3UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG  0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE   0x5UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO  0x6UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG  0x7UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+	u8	unused_3;
+	__le32	src_id;
+	u8	tunnel_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	unused_4;
+	__le16	dst_id;
+	__le16	mirror_vnic_id;
+	u8	pri_hint;
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX          0x3UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN          0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST        CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+	u8	unused_5;
+	__le32	unused_6;
+	__le64	l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
 struct hwrm_cfa_l2_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 l2_filter_id;
-	__le32 flow_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	l2_filter_id;
+	__le32	flow_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_l2_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 l2_filter_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	l2_filter_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_l2_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
 struct hwrm_cfa_l2_filter_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH		    0x1UL
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP		    0x2UL
-	__le32 enables;
-	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID		    0x1UL
-	#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID   0x2UL
-	__le64 l2_filter_id;
-	__le32 dst_id;
-	__le32 new_mirror_vnic_id;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH     0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX    0x0UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX    0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP     0x2UL
+	__le32	enables;
+	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID                 0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID     0x2UL
+	__le64	l2_filter_id;
+	__le32	dst_id;
+	__le32	new_mirror_vnic_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
 struct hwrm_cfa_l2_filter_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask */
-/* Input (56 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
 struct hwrm_cfa_l2_set_rx_mask_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 vnic_id;
-	__le32 mask;
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED		    0x1UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST		    0x2UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST		    0x4UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST		    0x8UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS	    0x10UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST		    0x20UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY		    0x40UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN	    0x80UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN	    0x100UL
-	__le64 mc_tbl_addr;
-	__le32 num_mc_entries;
-	__le32 unused_0;
-	__le64 vlan_tag_tbl_addr;
-	__le32 num_vlan_tags;
-	__le32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	vnic_id;
+	__le32	mask;
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST               0x2UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST           0x4UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST               0x8UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS         0x10UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST           0x20UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY            0x40UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN        0x80UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN     0x100UL
+	__le64	mc_tbl_addr;
+	__le32	num_mc_entries;
+	u8	unused_0[4];
+	__le64	vlan_tag_tbl_addr;
+	__le32	num_vlan_tags;
+	u8	unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
 struct hwrm_cfa_l2_set_rx_mask_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
 struct hwrm_cfa_l2_set_rx_mask_cmd_err {
-	u8 code;
-	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN	   0x0UL
+	u8	code;
+	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN                    0x0UL
 	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
-	u8 unused_0[7];
+	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST                      CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+	u8	unused_0[7];
 };
 
-/* hwrm_cfa_tunnel_filter_alloc */
-/* Input (88 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
 struct hwrm_cfa_tunnel_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
-	__le32 enables;
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x2UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN       0x4UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR	    0x8UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE   0x10UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR      0x40UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x80UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI	    0x100UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x200UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
-	__le64 l2_filter_id;
-	u8 l2_addr[6];
-	__le16 l2_ivlan;
-	__le32 l3_addr[4];
-	__le32 t_l3_addr[4];
-	u8 l3_addr_type;
-	u8 t_l3_addr_type;
-	u8 tunnel_type;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK     0x1UL
+	__le32	enables;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID       0x1UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR            0x2UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN           0x4UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR            0x8UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE       0x10UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE     0x20UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR          0x40UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE        0x80UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI                0x100UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID        0x200UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x400UL
+	__le64	l2_filter_id;
+	u8	l2_addr[6];
+	__le16	l2_ivlan;
+	__le32	l3_addr[4];
+	__le32	t_l3_addr[4];
+	u8	l3_addr_type;
+	u8	t_l3_addr_type;
+	u8	tunnel_type;
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
-	u8 unused_0;
-	__le32 vni;
-	__le32 dst_vnic_id;
-	__le32 mirror_vnic_id;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	tunnel_flags;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR     0x1UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1          0x2UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0         0x4UL
+	__le32	vni;
+	__le32	dst_vnic_id;
+	__le32	mirror_vnic_id;
 };
 
-/* Output (24 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
 struct hwrm_cfa_tunnel_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 tunnel_filter_id;
-	__le32 flow_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	tunnel_filter_id;
+	__le32	flow_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_tunnel_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 tunnel_filter_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	tunnel_filter_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_tunnel_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_encap_record_alloc */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+	u8	ver_hlen;
+	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK      0xf0UL
+	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT       4
+	u8	tos;
+	__be16	ip_id;
+	__be16	flags_frag_offset;
+	u8	ttl;
+	u8	protocol;
+	__be32	src_ip_addr;
+	__be32	dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+	__be32	ver_tc_flow_label;
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT         0x1cUL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK        0xf0000000UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT          0x14UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK         0xff00000UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT  0x0UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST           VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+	__be16	payload_len;
+	u8	next_hdr;
+	u8	ttl;
+	__be32	src_ip_addr[4];
+	__be32	dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+	u8	src_mac_addr[6];
+	__le16	unused_0;
+	u8	dst_mac_addr[6];
+	u8	num_vlan_tags;
+	u8	unused_1;
+	__be16	ovlan_tpid;
+	__be16	ovlan_tci;
+	__be16	ivlan_tpid;
+	__be16	ivlan_tci;
+	__le32	l3[10];
+	#define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+	#define CFA_ENCAP_DATA_VXLAN_L3_LAST    CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+	__be16	src_port;
+	__be16	dst_port;
+	__be32	vni;
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
 struct hwrm_cfa_encap_record_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
-	u8 encap_type;
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       0x1UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       0x2UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       0x3UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP	   0x4UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      0x5UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS	   0x6UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN	   0x7UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       0x8UL
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 encap_data[20];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK     0x1UL
+	u8	encap_type;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN  0x1UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE  0x2UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE  0x3UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP   0x4UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS   0x6UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN   0x7UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE  0x8UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST  CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+	u8	unused_0[3];
+	__le32	encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
 struct hwrm_cfa_encap_record_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 encap_record_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	encap_record_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
 struct hwrm_cfa_encap_record_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 encap_record_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	encap_record_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
 struct hwrm_cfa_encap_record_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc */
-/* Input (128 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
 struct hwrm_cfa_ntuple_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP		    0x2UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER	    0x4UL
-	__le32 enables;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x4UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR    0x8UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE    0x10UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR     0x20UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR     0x80UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL    0x200UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT       0x400UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK  0x800UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT       0x1000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK  0x2000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT       0x4000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID	    0x10000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR    0x40000UL
-	__le64 l2_filter_id;
-	u8 src_macaddr[6];
-	__be16 ethertype;
-	u8 ip_addr_type;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN  0x0UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4     0x4UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6     0x6UL
-	u8 ip_protocol;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN   0x0UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP       0x6UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP       0x11UL
-	__le16 dst_id;
-	__le16 mirror_vnic_id;
-	u8 tunnel_type;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK     0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP         0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER        0x4UL
+	__le32	enables;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID         0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE            0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE          0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR          0x8UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE          0x10UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR           0x20UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK      0x40UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR           0x80UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK      0x100UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL          0x200UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT             0x400UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK        0x800UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT             0x1000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK        0x2000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT             0x4000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID     0x8000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID               0x10000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID       0x20000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR          0x40000UL
+	__le64	l2_filter_id;
+	u8	src_macaddr[6];
+	__be16	ethertype;
+	u8	ip_addr_type;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4    0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6    0x6UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST   CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+	u8	ip_protocol;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP     0x6UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP     0x11UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST   CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+	__le16	dst_id;
+	__le16	mirror_vnic_id;
+	u8	tunnel_type;
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
-	u8 pri_hint;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    0x0UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE	   0x1UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW	   0x2UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST      0x3UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST       0x4UL
-	__be32 src_ipaddr[4];
-	__be32 src_ipaddr_mask[4];
-	__be32 dst_ipaddr[4];
-	__be32 dst_ipaddr_mask[4];
-	__be16 src_port;
-	__be16 src_port_mask;
-	__be16 dst_port;
-	__be16 dst_port_mask;
-	__le64 ntuple_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	pri_hint;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE     0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW     0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST   0x3UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST    0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST     CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+	__be32	src_ipaddr[4];
+	__be32	src_ipaddr_mask[4];
+	__be32	dst_ipaddr[4];
+	__be32	dst_ipaddr_mask[4];
+	__be16	src_port;
+	__be16	src_port_mask;
+	__be16	dst_port;
+	__be16	dst_port_mask;
+	__le64	ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
 struct hwrm_cfa_ntuple_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 ntuple_filter_id;
-	__le32 flow_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	ntuple_filter_id;
+	__le32	flow_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
 struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
-	u8 code;
-	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN      0x0UL
+	u8	code;
+	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN                   0x0UL
 	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
-	u8 unused_0[7];
+	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST                     CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+	u8	unused_0[7];
 };
 
-/* hwrm_cfa_ntuple_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_ntuple_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 ntuple_filter_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	ntuple_filter_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_ntuple_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (48 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
 struct hwrm_cfa_ntuple_filter_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID       0x1UL
-	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
-	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
-	__le32 unused_0;
-	__le64 ntuple_filter_id;
-	__le32 new_dst_id;
-	__le32 new_mirror_vnic_id;
-	__le16 new_meter_instance_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID                0x1UL
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID        0x2UL
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID     0x4UL
+	u8	unused_0[4];
+	__le64	ntuple_filter_id;
+	__le32	new_dst_id;
+	__le32	new_mirror_vnic_id;
+	__le16	new_meter_instance_id;
 	#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
-	__le16 unused_1[3];
+	#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST   CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+	u8	unused_1[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
 struct hwrm_cfa_ntuple_filter_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc */
-/* Input (104 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
 struct hwrm_cfa_decap_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL	    0x1UL
-	__le32 enables;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE     0x1UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID       0x2UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR     0x4UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR     0x8UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID       0x10UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID       0x20UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID     0x40UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID     0x80UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE       0x100UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR      0x200UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR      0x400UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE     0x800UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL     0x1000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT	    0x2000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT	    0x4000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID	    0x8000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID  0x10000UL
-	__be32 tunnel_id;
-	u8 tunnel_type;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL  0x0UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN      0x1UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE      0x2UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE      0x3UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP       0x4UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE     0x5UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS       0x6UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT	   0x7UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE      0x8UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4   0x9UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL  0xffUL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 src_macaddr[6];
-	u8 unused_2;
-	u8 unused_3;
-	u8 dst_macaddr[6];
-	__be16 ovlan_vid;
-	__be16 ivlan_vid;
-	__be16 t_ovlan_vid;
-	__be16 t_ivlan_vid;
-	__be16 ethertype;
-	u8 ip_addr_type;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN   0x0UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4      0x4UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6      0x6UL
-	u8 ip_protocol;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN    0x0UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP	   0x6UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP	   0x11UL
-	u8 unused_4;
-	u8 unused_5;
-	u8 unused_6[3];
-	u8 unused_7;
-	__be32 src_ipaddr[4];
-	__be32 dst_ipaddr[4];
-	__be16 src_port;
-	__be16 dst_port;
-	__le16 dst_id;
-	__le16 l2_ctxt_ref_id;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL     0x1UL
+	__le32	enables;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE        0x1UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID          0x2UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR        0x4UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR        0x8UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID          0x10UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID          0x20UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID        0x40UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID        0x80UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE          0x100UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR         0x200UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR         0x400UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE        0x800UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL        0x1000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT           0x2000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT           0x4000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID             0x8000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
+	__be32	tunnel_id;
+	u8	tunnel_type;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	unused_0;
+	__le16	unused_1;
+	u8	src_macaddr[6];
+	u8	unused_2[2];
+	u8	dst_macaddr[6];
+	__be16	ovlan_vid;
+	__be16	ivlan_vid;
+	__be16	t_ovlan_vid;
+	__be16	t_ivlan_vid;
+	__be16	ethertype;
+	u8	ip_addr_type;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4    0x4UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6    0x6UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST   CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+	u8	ip_protocol;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP     0x6UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP     0x11UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST   CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+	__le16	unused_3;
+	__le32	unused_4;
+	__be32	src_ipaddr[4];
+	__be32	dst_ipaddr[4];
+	__be16	src_port;
+	__be16	dst_port;
+	__le16	dst_id;
+	__le16	l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
 struct hwrm_cfa_decap_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 decap_filter_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	decap_filter_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_decap_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 decap_filter_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	decap_filter_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_decap_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc */
-/* Input (128 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
 struct hwrm_cfa_flow_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 flags;
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL		    0x1UL
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK		    0x6UL
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT		    1
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE		   (0x0UL << 1)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE		   (0x1UL << 1)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO		   (0x2UL << 1)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST    CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK		    0x38UL
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT		    3
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2		   (0x0UL << 3)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4		   (0x1UL << 3)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6		   (0x2UL << 3)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST    CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
-	__le16 src_fid;
-	__le32 tunnel_handle;
-	__le16 action_flags;
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD		    0x1UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE	    0x2UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP		    0x4UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER		    0x8UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL		    0x10UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC	    0x20UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST	    0x40UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS   0x80UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE  0x100UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT      0x200UL
-	__le16 dst_fid;
-	__be16 l2_rewrite_vlan_tpid;
-	__be16 l2_rewrite_vlan_tci;
-	__le16 act_meter_id;
-	__le16 ref_flow_handle;
-	__be16 ethertype;
-	__be16 outer_vlan_tci;
-	__be16 dmac[3];
-	__be16 inner_vlan_tci;
-	__be16 smac[3];
-	u8 ip_dst_mask_len;
-	u8 ip_src_mask_len;
-	__be32 ip_dst[4];
-	__be32 ip_src[4];
-	__be16 l4_src_port;
-	__be16 l4_src_port_mask;
-	__be16 l4_dst_port;
-	__be16 l4_dst_port_mask;
-	__be32 nat_ip_address[4];
-	__be16 l2_rewrite_dmac[3];
-	__be16 nat_port;
-	__be16 l2_rewrite_smac[3];
-	u8 ip_proto;
-	u8 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	flags;
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL       0x1UL
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE  (0x0UL << 1)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE   (0x1UL << 1)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO   (0x2UL << 1)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2    (0x0UL << 3)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4  (0x1UL << 3)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6  (0x2UL << 3)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+	__le16	src_fid;
+	__le32	tunnel_handle;
+	__le16	action_flags;
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD                   0x1UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE               0x2UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP                  0x4UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER                 0x8UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL                0x10UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC               0x20UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST              0x40UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS      0x80UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE     0x100UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT         0x200UL
+	__le16	dst_fid;
+	__be16	l2_rewrite_vlan_tpid;
+	__be16	l2_rewrite_vlan_tci;
+	__le16	act_meter_id;
+	__le16	ref_flow_handle;
+	__be16	ethertype;
+	__be16	outer_vlan_tci;
+	__be16	dmac[3];
+	__be16	inner_vlan_tci;
+	__be16	smac[3];
+	u8	ip_dst_mask_len;
+	u8	ip_src_mask_len;
+	__be32	ip_dst[4];
+	__be32	ip_src[4];
+	__be16	l4_src_port;
+	__be16	l4_src_port_mask;
+	__be16	l4_dst_port;
+	__be16	l4_dst_port_mask;
+	__be32	nat_ip_address[4];
+	__be16	l2_rewrite_dmac[3];
+	__be16	nat_port;
+	__be16	l2_rewrite_smac[3];
+	u8	ip_proto;
+	u8	unused_0;
+};
+
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
 struct hwrm_cfa_flow_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 flow_handle;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_cfa_flow_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	flow_handle;
+	u8	unused_0[5];
+	u8	valid;
+};
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
 struct hwrm_cfa_flow_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 flow_handle;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	flow_handle;
+	u8	unused_0[6];
 };
 
-/* Output (32 bytes) */
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
 struct hwrm_cfa_flow_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 packet;
-	__le64 byte;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_flow_stats */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	packet;
+	__le64	byte;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
 struct hwrm_cfa_flow_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 num_flows;
-	__le16 flow_handle_0;
-	__le16 flow_handle_1;
-	__le16 flow_handle_2;
-	__le16 flow_handle_3;
-	__le16 flow_handle_4;
-	__le16 flow_handle_5;
-	__le16 flow_handle_6;
-	__le16 flow_handle_7;
-	__le16 flow_handle_8;
-	__le16 flow_handle_9;
-	__le16 unused_0;
-};
-
-/* Output (176 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	num_flows;
+	__le16	flow_handle_0;
+	__le16	flow_handle_1;
+	__le16	flow_handle_2;
+	__le16	flow_handle_3;
+	__le16	flow_handle_4;
+	__le16	flow_handle_5;
+	__le16	flow_handle_6;
+	__le16	flow_handle_7;
+	__le16	flow_handle_8;
+	__le16	flow_handle_9;
+	u8	unused_0[2];
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
 struct hwrm_cfa_flow_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 packet_0;
-	__le64 packet_1;
-	__le64 packet_2;
-	__le64 packet_3;
-	__le64 packet_4;
-	__le64 packet_5;
-	__le64 packet_6;
-	__le64 packet_7;
-	__le64 packet_8;
-	__le64 packet_9;
-	__le64 byte_0;
-	__le64 byte_1;
-	__le64 byte_2;
-	__le64 byte_3;
-	__le64 byte_4;
-	__le64 byte_5;
-	__le64 byte_6;
-	__le64 byte_7;
-	__le64 byte_8;
-	__le64 byte_9;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	packet_0;
+	__le64	packet_1;
+	__le64	packet_2;
+	__le64	packet_3;
+	__le64	packet_4;
+	__le64	packet_5;
+	__le64	packet_6;
+	__le64	packet_7;
+	__le64	packet_8;
+	__le64	packet_9;
+	__le64	byte_0;
+	__le64	byte_1;
+	__le64	byte_2;
+	__le64	byte_3;
+	__le64	byte_4;
+	__le64	byte_5;
+	__le64	byte_6;
+	__le64	byte_7;
+	__le64	byte_8;
+	__le64	byte_9;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
 struct hwrm_cfa_vfr_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 vf_id;
-	__le16 reserved;
-	__le32 unused_0;
-	char vfr_name[32];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	__le16	reserved;
+	u8	unused_0[4];
+	char	vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
 struct hwrm_cfa_vfr_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 rx_cfa_code;
-	__le16 tx_cfa_action;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_cfa_vfr_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	rx_cfa_code;
+	__le16	tx_cfa_action;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
 struct hwrm_cfa_vfr_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	char vfr_name[32];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	char	vfr_name[32];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
 struct hwrm_cfa_vfr_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
 struct hwrm_tunnel_dst_port_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 tunnel_type;
-	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       0x1UL
-	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      0x5UL
-	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4    0x9UL
-	u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	tunnel_type;
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE   0x5UL
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+	u8	unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
 struct hwrm_tunnel_dst_port_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 tunnel_dst_port_id;
-	__be16 tunnel_dst_port_val;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	tunnel_dst_port_id;
+	__be16	tunnel_dst_port_val;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
 struct hwrm_tunnel_dst_port_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 tunnel_type;
-	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       0x1UL
-	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      0x5UL
-	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4    0x9UL
-	u8 unused_0;
-	__be16 tunnel_dst_port_val;
-	__be32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	tunnel_type;
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE   0x5UL
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+	u8	unused_0;
+	__be16	tunnel_dst_port_val;
+	u8	unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
 struct hwrm_tunnel_dst_port_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 tunnel_dst_port_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	tunnel_dst_port_id;
+	u8	unused_0[5];
+	u8	valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
 struct hwrm_tunnel_dst_port_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 tunnel_type;
-	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN	   0x1UL
-	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       0x5UL
-	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4     0x9UL
-	u8 unused_0;
-	__le16 tunnel_dst_port_id;
-	__le32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	tunnel_type;
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE   0x5UL
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+	u8	unused_0;
+	__le16	tunnel_dst_port_id;
+	u8	unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
 struct hwrm_tunnel_dst_port_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_stat_ctx_alloc */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_1[7];
+	u8	valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+	__le64	rx_ucast_pkts;
+	__le64	rx_mcast_pkts;
+	__le64	rx_bcast_pkts;
+	__le64	rx_discard_pkts;
+	__le64	rx_drop_pkts;
+	__le64	rx_ucast_bytes;
+	__le64	rx_mcast_bytes;
+	__le64	rx_bcast_bytes;
+	__le64	tx_ucast_pkts;
+	__le64	tx_mcast_pkts;
+	__le64	tx_bcast_pkts;
+	__le64	tx_discard_pkts;
+	__le64	tx_drop_pkts;
+	__le64	tx_ucast_bytes;
+	__le64	tx_mcast_bytes;
+	__le64	tx_bcast_bytes;
+	__le64	tpa_pkts;
+	__le64	tpa_bytes;
+	__le64	tpa_events;
+	__le64	tpa_aborts;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
 struct hwrm_stat_ctx_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 stats_dma_addr;
-	__le32 update_period_ms;
-	u8 stat_ctx_flags;
-	#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE		    0x1UL
-	u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	stats_dma_addr;
+	__le32	update_period_ms;
+	u8	stat_ctx_flags;
+	#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE     0x1UL
+	u8	unused_0[3];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
 struct hwrm_stat_ctx_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 stat_ctx_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_stat_ctx_free */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	stat_ctx_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
 struct hwrm_stat_ctx_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 stat_ctx_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	stat_ctx_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
 struct hwrm_stat_ctx_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 stat_ctx_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_stat_ctx_query */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	stat_ctx_id;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
 struct hwrm_stat_ctx_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 stat_ctx_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	stat_ctx_id;
+	u8	unused_0[4];
 };
 
-/* Output (176 bytes) */
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
 struct hwrm_stat_ctx_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 tx_ucast_pkts;
-	__le64 tx_mcast_pkts;
-	__le64 tx_bcast_pkts;
-	__le64 tx_err_pkts;
-	__le64 tx_drop_pkts;
-	__le64 tx_ucast_bytes;
-	__le64 tx_mcast_bytes;
-	__le64 tx_bcast_bytes;
-	__le64 rx_ucast_pkts;
-	__le64 rx_mcast_pkts;
-	__le64 rx_bcast_pkts;
-	__le64 rx_err_pkts;
-	__le64 rx_drop_pkts;
-	__le64 rx_ucast_bytes;
-	__le64 rx_mcast_bytes;
-	__le64 rx_bcast_bytes;
-	__le64 rx_agg_pkts;
-	__le64 rx_agg_bytes;
-	__le64 rx_agg_events;
-	__le64 rx_agg_aborts;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	tx_ucast_pkts;
+	__le64	tx_mcast_pkts;
+	__le64	tx_bcast_pkts;
+	__le64	tx_err_pkts;
+	__le64	tx_drop_pkts;
+	__le64	tx_ucast_bytes;
+	__le64	tx_mcast_bytes;
+	__le64	tx_bcast_bytes;
+	__le64	rx_ucast_pkts;
+	__le64	rx_mcast_pkts;
+	__le64	rx_bcast_pkts;
+	__le64	rx_err_pkts;
+	__le64	rx_drop_pkts;
+	__le64	rx_ucast_bytes;
+	__le64	rx_mcast_bytes;
+	__le64	rx_bcast_bytes;
+	__le64	rx_agg_pkts;
+	__le64	rx_agg_bytes;
+	__le64	rx_agg_events;
+	__le64	rx_agg_aborts;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
 struct hwrm_stat_ctx_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 stat_ctx_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	stat_ctx_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
 struct hwrm_stat_ctx_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+	__le64	tx_64b_frames;
+	__le64	tx_65b_127b_frames;
+	__le64	tx_128b_255b_frames;
+	__le64	tx_256b_511b_frames;
+	__le64	tx_512b_1023b_frames;
+	__le64	tx_1024b_1518_frames;
+	__le64	tx_good_vlan_frames;
+	__le64	tx_1519b_2047_frames;
+	__le64	tx_2048b_4095b_frames;
+	__le64	tx_4096b_9216b_frames;
+	__le64	tx_9217b_16383b_frames;
+	__le64	tx_good_frames;
+	__le64	tx_total_frames;
+	__le64	tx_ucast_frames;
+	__le64	tx_mcast_frames;
+	__le64	tx_bcast_frames;
+	__le64	tx_pause_frames;
+	__le64	tx_pfc_frames;
+	__le64	tx_jabber_frames;
+	__le64	tx_fcs_err_frames;
+	__le64	tx_control_frames;
+	__le64	tx_oversz_frames;
+	__le64	tx_single_dfrl_frames;
+	__le64	tx_multi_dfrl_frames;
+	__le64	tx_single_coll_frames;
+	__le64	tx_multi_coll_frames;
+	__le64	tx_late_coll_frames;
+	__le64	tx_excessive_coll_frames;
+	__le64	tx_frag_frames;
+	__le64	tx_err;
+	__le64	tx_tagged_frames;
+	__le64	tx_dbl_tagged_frames;
+	__le64	tx_runt_frames;
+	__le64	tx_fifo_underruns;
+	__le64	tx_pfc_ena_frames_pri0;
+	__le64	tx_pfc_ena_frames_pri1;
+	__le64	tx_pfc_ena_frames_pri2;
+	__le64	tx_pfc_ena_frames_pri3;
+	__le64	tx_pfc_ena_frames_pri4;
+	__le64	tx_pfc_ena_frames_pri5;
+	__le64	tx_pfc_ena_frames_pri6;
+	__le64	tx_pfc_ena_frames_pri7;
+	__le64	tx_eee_lpi_events;
+	__le64	tx_eee_lpi_duration;
+	__le64	tx_llfc_logical_msgs;
+	__le64	tx_hcfc_msgs;
+	__le64	tx_total_collisions;
+	__le64	tx_bytes;
+	__le64	tx_xthol_frames;
+	__le64	tx_stat_discard;
+	__le64	tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+	__le64	rx_64b_frames;
+	__le64	rx_65b_127b_frames;
+	__le64	rx_128b_255b_frames;
+	__le64	rx_256b_511b_frames;
+	__le64	rx_512b_1023b_frames;
+	__le64	rx_1024b_1518_frames;
+	__le64	rx_good_vlan_frames;
+	__le64	rx_1519b_2047b_frames;
+	__le64	rx_2048b_4095b_frames;
+	__le64	rx_4096b_9216b_frames;
+	__le64	rx_9217b_16383b_frames;
+	__le64	rx_total_frames;
+	__le64	rx_ucast_frames;
+	__le64	rx_mcast_frames;
+	__le64	rx_bcast_frames;
+	__le64	rx_fcs_err_frames;
+	__le64	rx_ctrl_frames;
+	__le64	rx_pause_frames;
+	__le64	rx_pfc_frames;
+	__le64	rx_unsupported_opcode_frames;
+	__le64	rx_unsupported_da_pausepfc_frames;
+	__le64	rx_wrong_sa_frames;
+	__le64	rx_align_err_frames;
+	__le64	rx_oor_len_frames;
+	__le64	rx_code_err_frames;
+	__le64	rx_false_carrier_frames;
+	__le64	rx_ovrsz_frames;
+	__le64	rx_jbr_frames;
+	__le64	rx_mtu_err_frames;
+	__le64	rx_match_crc_frames;
+	__le64	rx_promiscuous_frames;
+	__le64	rx_tagged_frames;
+	__le64	rx_double_tagged_frames;
+	__le64	rx_trunc_frames;
+	__le64	rx_good_frames;
+	__le64	rx_pfc_xon2xoff_frames_pri0;
+	__le64	rx_pfc_xon2xoff_frames_pri1;
+	__le64	rx_pfc_xon2xoff_frames_pri2;
+	__le64	rx_pfc_xon2xoff_frames_pri3;
+	__le64	rx_pfc_xon2xoff_frames_pri4;
+	__le64	rx_pfc_xon2xoff_frames_pri5;
+	__le64	rx_pfc_xon2xoff_frames_pri6;
+	__le64	rx_pfc_xon2xoff_frames_pri7;
+	__le64	rx_pfc_ena_frames_pri0;
+	__le64	rx_pfc_ena_frames_pri1;
+	__le64	rx_pfc_ena_frames_pri2;
+	__le64	rx_pfc_ena_frames_pri3;
+	__le64	rx_pfc_ena_frames_pri4;
+	__le64	rx_pfc_ena_frames_pri5;
+	__le64	rx_pfc_ena_frames_pri6;
+	__le64	rx_pfc_ena_frames_pri7;
+	__le64	rx_sch_crc_err_frames;
+	__le64	rx_undrsz_frames;
+	__le64	rx_frag_frames;
+	__le64	rx_eee_lpi_events;
+	__le64	rx_eee_lpi_duration;
+	__le64	rx_llfc_physical_msgs;
+	__le64	rx_llfc_logical_msgs;
+	__le64	rx_llfc_msgs_with_crc_err;
+	__le64	rx_hcfc_msgs;
+	__le64	rx_hcfc_msgs_with_crc_err;
+	__le64	rx_bytes;
+	__le64	rx_runt_bytes;
+	__le64	rx_runt_frames;
+	__le64	rx_stat_discard;
+	__le64	rx_stat_err;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
 struct hwrm_fw_reset_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 embedded_proc_type;
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT		   0x0UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT		   0x1UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL	   0x2UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE		   0x3UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST		   0x4UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP		   0x5UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP		   0x6UL
-	u8 selfrst_status;
-	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE	   0x0UL
-	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP	   0x1UL
-	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST	   0x2UL
-	u8 host_idx;
-	u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	embedded_proc_type;
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT    0x0UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT    0x1UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE    0x3UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST    0x4UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP      0x5UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP    0x6UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST   FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+	u8	selfrst_status;
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE    0x0UL
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP    0x1UL
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+	#define FW_RESET_REQ_SELFRST_STATUS_LAST          FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+	u8	host_idx;
+	u8	unused_0[5];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
 struct hwrm_fw_reset_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 selfrst_status;
-	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE	   0x0UL
-	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP	   0x1UL
-	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       0x2UL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	selfrst_status;
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE    0x0UL
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP    0x1UL
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+	#define FW_RESET_RESP_SELFRST_STATUS_LAST          FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+	u8	unused_0[6];
+	u8	valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
 struct hwrm_fw_qstatus_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 embedded_proc_type;
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT		   0x0UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT		   0x1UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL	   0x2UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE		   0x3UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST		   0x4UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP		   0x5UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP		   0x6UL
-	u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	embedded_proc_type;
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT    0x0UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT    0x1UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE    0x3UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST    0x4UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP      0x5UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP    0x6UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST   FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+	u8	unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
 struct hwrm_fw_qstatus_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 selfrst_status;
-	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE	   0x0UL
-	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP	   0x1UL
-	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     0x2UL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_fw_set_time */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	selfrst_status;
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE    0x0UL
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP    0x1UL
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST          FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+	u8	unused_0[6];
+	u8	valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
 struct hwrm_fw_set_time_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 year;
-	#define FW_SET_TIME_REQ_YEAR_UNKNOWN			   0x0UL
-	u8 month;
-	u8 day;
-	u8 hour;
-	u8 minute;
-	u8 second;
-	u8 unused_0;
-	__le16 millisecond;
-	__le16 zone;
-	#define FW_SET_TIME_REQ_ZONE_UTC			   0x0UL
-	#define FW_SET_TIME_REQ_ZONE_UNKNOWN			   0xffffUL
-	__le32 unused_1;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	year;
+	#define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+	#define FW_SET_TIME_REQ_YEAR_LAST   FW_SET_TIME_REQ_YEAR_UNKNOWN
+	u8	month;
+	u8	day;
+	u8	hour;
+	u8	minute;
+	u8	second;
+	u8	unused_0;
+	__le16	millisecond;
+	__le16	zone;
+	#define FW_SET_TIME_REQ_ZONE_UTC     0x0UL
+	#define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+	#define FW_SET_TIME_REQ_ZONE_LAST   FW_SET_TIME_REQ_ZONE_UNKNOWN
+	u8	unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
 struct hwrm_fw_set_time_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_fw_set_structured_data */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+	__le16	struct_id;
+	#define STRUCT_HDR_STRUCT_ID_LLDP_CFG           0x41bUL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_ETS           0x41dUL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_PFC           0x41fUL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_APP           0x421UL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+	#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC       0x424UL
+	#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE        0x426UL
+	#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE         0x1UL
+	#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION   0xaUL
+	#define STRUCT_HDR_STRUCT_ID_RSS_V2             0x64UL
+	#define STRUCT_HDR_STRUCT_ID_LAST              STRUCT_HDR_STRUCT_ID_RSS_V2
+	__le16	len;
+	u8	version;
+	u8	count;
+	__le16	subtype;
+	__le16	next_offset;
+	#define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+	u8	unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+	__be16	protocol_id;
+	u8	protocol_selector;
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE   0x1UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT     0x2UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT     0x3UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST        STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+	u8	priority;
+	u8	valid;
+	u8	unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
 struct hwrm_fw_set_structured_data_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 src_data_addr;
-	__le16 data_len;
-	u8 hdr_cnt;
-	u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	src_data_addr;
+	__le16	data_len;
+	u8	hdr_cnt;
+	u8	unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
 struct hwrm_fw_set_structured_data_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
 struct hwrm_fw_set_structured_data_cmd_err {
-	u8 code;
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN       0x0UL
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT   0x1UL
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT       0x2UL
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID	   0x3UL
-	u8 unused_0[7];
+	u8	code;
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN     0x0UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT     0x2UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID      0x3UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST       FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+	u8	unused_0[7];
 };
 
-/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
 struct hwrm_fw_get_structured_data_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 dest_data_addr;
-	__le16 data_len;
-	__le16 structure_id;
-	__le16 subtype;
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL		   0xffffUL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	dest_data_addr;
+	__le16	data_len;
+	__le16	structure_id;
+	__le16	subtype;
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED                  0x0UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL                     0xffffUL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN       0x100UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER        0x101UL
 	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER  0x201UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
-	u8 count;
-	u8 unused_0;
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN          0x200UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER           0x201UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL    0x202UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL        0x300UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST                   FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+	u8	count;
+	u8	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
 struct hwrm_fw_get_structured_data_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 hdr_cnt;
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	hdr_cnt;
+	u8	unused_0[6];
+	u8	valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
 struct hwrm_fw_get_structured_data_cmd_err {
-	u8 code;
-	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN       0x0UL
-	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID	   0x3UL
-	u8 unused_0[7];
+	u8	code;
+	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID  0x3UL
+	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST   FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+	u8	unused_0[7];
 };
 
-/* hwrm_exec_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
 struct hwrm_exec_fwd_resp_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 encap_request[26];
-	__le16 encap_resp_target_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	encap_request[26];
+	__le16	encap_resp_target_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
 struct hwrm_exec_fwd_resp_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_reject_fwd_resp */
-/* Input (128 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
 struct hwrm_reject_fwd_resp_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 encap_request[26];
-	__le16 encap_resp_target_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	encap_request[26];
+	__le16	encap_resp_target_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
 struct hwrm_reject_fwd_resp_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
 struct hwrm_fwd_resp_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 encap_resp_target_id;
-	__le16 encap_resp_cmpl_ring;
-	__le16 encap_resp_len;
-	u8 unused_0;
-	u8 unused_1;
-	__le64 encap_resp_addr;
-	__le32 encap_resp[24];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	encap_resp_target_id;
+	__le16	encap_resp_cmpl_ring;
+	__le16	encap_resp_len;
+	u8	unused_0;
+	u8	unused_1;
+	__le64	encap_resp_addr;
+	__le32	encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
 struct hwrm_fwd_resp_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
 struct hwrm_fwd_async_event_cmpl_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 encap_async_event_target_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2[3];
-	u8 unused_3;
-	__le32 encap_async_event_cmpl[4];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	encap_async_event_target_id;
+	u8	unused_0[6];
+	__le32	encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
 struct hwrm_fwd_async_event_cmpl_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
 struct hwrm_temp_monitor_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_temp_monitor_query_output (size:128b/16B) */
 struct hwrm_temp_monitor_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 temp;
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_wol_filter_alloc */
-/* Input (64 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	temp;
+	u8	unused_0[6];
+	u8	valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
 struct hwrm_wol_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	__le32 enables;
-	#define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS	    0x1UL
-	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET	    0x2UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	__le32	enables;
+	#define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS           0x1UL
+	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET        0x2UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE      0x4UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR      0x8UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR     0x10UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE     0x20UL
-	__le16 port_id;
-	u8 wol_type;
-	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT		   0x0UL
-	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP		   0x1UL
-	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID		   0xffUL
-	u8 unused_0;
-	__le32 unused_1;
-	u8 mac_address[6];
-	__le16 pattern_offset;
-	__le16 pattern_buf_size;
-	__le16 pattern_mask_size;
-	__le32 unused_2;
-	__le64 pattern_buf_addr;
-	__le64 pattern_mask_addr;
-};
-
-/* Output (16 bytes) */
+	__le16	port_id;
+	u8	wol_type;
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP      0x1UL
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID  0xffUL
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST    WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+	u8	unused_0[5];
+	u8	mac_address[6];
+	__le16	pattern_offset;
+	__le16	pattern_buf_size;
+	__le16	pattern_mask_size;
+	u8	unused_1[4];
+	__le64	pattern_buf_addr;
+	__le64	pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
 struct hwrm_wol_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 wol_filter_id;
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_wol_filter_free */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	wol_filter_id;
+	u8	unused_0[6];
+	u8	valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
 struct hwrm_wol_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
 	#define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS     0x1UL
-	__le32 enables;
-	#define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID	    0x1UL
-	__le16 port_id;
-	u8 wol_filter_id;
-	u8 unused_0[5];
+	__le32	enables;
+	#define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID     0x1UL
+	__le16	port_id;
+	u8	wol_filter_id;
+	u8	unused_0[5];
 };
 
-/* Output (16 bytes) */
+/* hwrm_wol_filter_free_output (size:128b/16B) */
 struct hwrm_wol_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg */
-/* Input (56 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
 struct hwrm_wol_filter_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 handle;
-	__le32 unused_0;
-	__le64 pattern_buf_addr;
-	__le16 pattern_buf_size;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3[3];
-	u8 unused_4;
-	__le64 pattern_mask_addr;
-	__le16 pattern_mask_size;
-	__le16 unused_5[3];
-};
-
-/* Output (32 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	__le16	handle;
+	u8	unused_0[4];
+	__le64	pattern_buf_addr;
+	__le16	pattern_buf_size;
+	u8	unused_1[6];
+	__le64	pattern_mask_addr;
+	__le16	pattern_mask_size;
+	u8	unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
 struct hwrm_wol_filter_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 next_handle;
-	u8 wol_filter_id;
-	u8 wol_type;
-	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT		   0x0UL
-	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP		   0x1UL
-	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID		   0xffUL
-	__le32 unused_0;
-	u8 mac_address[6];
-	__le16 pattern_offset;
-	__le16 pattern_size;
-	__le16 pattern_mask_size;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	next_handle;
+	u8	wol_filter_id;
+	u8	wol_type;
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP      0x1UL
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID  0xffUL
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST    WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+	__le32	unused_0;
+	u8	mac_address[6];
+	__le16	pattern_offset;
+	__le16	pattern_size;
+	__le16	pattern_mask_size;
+	u8	unused_1[3];
+	u8	valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
 struct hwrm_wol_reason_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2[3];
-	u8 unused_3;
-	__le64 wol_pkt_buf_addr;
-	__le16 wol_pkt_buf_size;
-	__le16 unused_4[3];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
+	__le64	wol_pkt_buf_addr;
+	__le16	wol_pkt_buf_size;
+	u8	unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
 struct hwrm_wol_reason_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 wol_filter_id;
-	u8 wol_reason;
-	#define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT	   0x0UL
-	#define WOL_REASON_QCFG_RESP_WOL_REASON_BMP		   0x1UL
-	#define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID	   0xffUL
-	u8 wol_pkt_len;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_dbg_read_direct */
-/* Input (32 bytes) */
-struct hwrm_dbg_read_direct_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_dest_addr;
-	__le32 read_addr;
-	__le32 read_len32;
-};
-
-/* Output (16 bytes) */
-struct hwrm_dbg_read_direct_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	wol_filter_id;
+	u8	wol_reason;
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_BMP      0x1UL
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID  0xffUL
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_LAST    WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+	u8	wol_pkt_len;
+	u8	unused_0[4];
+	u8	valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
 struct hwrm_nvm_read_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_dest_addr;
-	__le16 dir_idx;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 offset;
-	__le32 len;
-	__le32 unused_2;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_dest_addr;
+	__le16	dir_idx;
+	u8	unused_0[2];
+	__le32	offset;
+	__le32	len;
+	u8	unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
 struct hwrm_nvm_read_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
 struct hwrm_nvm_get_dir_entries_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_dest_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_dest_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
 struct hwrm_nvm_get_dir_entries_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
 struct hwrm_nvm_get_dir_info_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (24 bytes) */
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
 struct hwrm_nvm_get_dir_info_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 entries;
-	__le32 entry_length;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_write */
-/* Input (48 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	entries;
+	__le32	entry_length;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_write_input (size:384b/48B) */
 struct hwrm_nvm_write_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_src_addr;
-	__le16 dir_type;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	__le16 dir_attr;
-	__le32 dir_data_length;
-	__le16 option;
-	__le16 flags;
-	#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG	    0x1UL
-	__le32 dir_item_length;
-	__le32 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_src_addr;
+	__le16	dir_type;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	__le16	dir_attr;
+	__le32	dir_data_length;
+	__le16	option;
+	__le16	flags;
+	#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG     0x1UL
+	__le32	dir_item_length;
+	__le32	unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
 struct hwrm_nvm_write_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 dir_item_length;
-	__le16 dir_idx;
-	u8 unused_0;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	dir_item_length;
+	__le16	dir_idx;
+	u8	unused_0;
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
 struct hwrm_nvm_write_cmd_err {
-	u8 code;
-	#define NVM_WRITE_CMD_ERR_CODE_UNKNOWN			   0x0UL
-	#define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR		   0x1UL
-	#define NVM_WRITE_CMD_ERR_CODE_NO_SPACE		   0x2UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_WRITE_CMD_ERR_CODE_UNKNOWN  0x0UL
+	#define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+	#define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+	#define NVM_WRITE_CMD_ERR_CODE_LAST    NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+	u8	unused_0[7];
 };
 
-/* hwrm_nvm_modify */
-/* Input (40 bytes) */
+/* hwrm_nvm_modify_input (size:320b/40B) */
 struct hwrm_nvm_modify_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_src_addr;
-	__le16 dir_idx;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 offset;
-	__le32 len;
-	__le32 unused_2;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_src_addr;
+	__le16	dir_idx;
+	u8	unused_0[2];
+	__le32	offset;
+	__le32	len;
+	u8	unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
 struct hwrm_nvm_modify_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
 struct hwrm_nvm_find_dir_entry_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID       0x1UL
-	__le16 dir_idx;
-	__le16 dir_type;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	u8 opt_ordinal;
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK	    0x3UL
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT		    0
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ		   0x0UL
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE		   0x1UL
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT		   0x2UL
-	u8 unused_1[3];
-};
-
-/* Output (32 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID     0x1UL
+	__le16	dir_idx;
+	__le16	dir_type;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	u8	opt_ordinal;
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ    0x0UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE    0x1UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT    0x2UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+	u8	unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
 struct hwrm_nvm_find_dir_entry_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 dir_item_length;
-	__le32 dir_data_length;
-	__le32 fw_ver;
-	__le16 dir_ordinal;
-	__le16 dir_idx;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	dir_item_length;
+	__le32	dir_data_length;
+	__le32	fw_ver;
+	__le16	dir_ordinal;
+	__le16	dir_idx;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
 struct hwrm_nvm_erase_dir_entry_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 dir_idx;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	dir_idx;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
 struct hwrm_nvm_erase_dir_entry_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
 struct hwrm_nvm_get_dev_info_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (32 bytes) */
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
 struct hwrm_nvm_get_dev_info_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 manufacturer_id;
-	__le16 device_id;
-	__le32 sector_size;
-	__le32 nvram_size;
-	__le32 reserved_size;
-	__le32 available_size;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry */
-/* Input (32 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	manufacturer_id;
+	__le16	device_id;
+	__le32	sector_size;
+	__le32	nvram_size;
+	__le32	reserved_size;
+	__le32	available_size;
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
 struct hwrm_nvm_mod_dir_entry_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM		    0x1UL
-	__le16 dir_idx;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	__le16 dir_attr;
-	__le32 checksum;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM     0x1UL
+	__le16	dir_idx;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	__le16	dir_attr;
+	__le32	checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
 struct hwrm_nvm_mod_dir_entry_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_verify_update */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
 struct hwrm_nvm_verify_update_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 dir_type;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	__le16 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	dir_type;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	u8	unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
 struct hwrm_nvm_verify_update_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_install_update */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
 struct hwrm_nvm_install_update_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 install_type;
-	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL	   0x0UL
-	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL	   0xffffffffUL
-	__le16 flags;
-	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE    0x1UL
-	#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG     0x2UL
-	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG     0x4UL
-	__le16 unused_0;
-};
-
-/* Output (24 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	install_type;
+	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL    0xffffffffUL
+	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST  NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+	__le16	flags;
+	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE     0x1UL
+	#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG      0x2UL
+	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG      0x4UL
+	u8	unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
 struct hwrm_nvm_install_update_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 installed_items;
-	u8 result;
-	#define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS		   0x0UL
-	u8 problem_item;
-	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE	   0x0UL
-	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE      0xffUL
-	u8 reset_required;
-	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE       0x0UL
-	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI	   0x1UL
-	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER      0x2UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	installed_items;
+	u8	result;
+	#define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+	#define NVM_INSTALL_UPDATE_RESP_RESULT_LAST   NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+	u8	problem_item;
+	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE    0x0UL
+	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST   NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+	u8	reset_required;
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE  0x0UL
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI   0x1UL
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+	u8	unused_0[4];
+	u8	valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
 struct hwrm_nvm_install_update_cmd_err {
-	u8 code;
-	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN	   0x0UL
-	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR	   0x1UL
-	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE	   0x2UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN  0x0UL
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST    NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+	u8	unused_0[7];
 };
 
-/* hwrm_nvm_get_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
 struct hwrm_nvm_get_variable_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 dest_data_addr;
-	__le16 data_len;
-	__le16 option_num;
-	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0		   0x0UL
-	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF	   0xffffUL
-	__le16 dimensions;
-	__le16 index_0;
-	__le16 index_1;
-	__le16 index_2;
-	__le16 index_3;
-	u8 flags;
-	#define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT	    0x1UL
-	u8 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	dest_data_addr;
+	__le16	data_len;
+	__le16	option_num;
+	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0    0x0UL
+	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST     NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+	__le16	dimensions;
+	__le16	index_0;
+	__le16	index_1;
+	__le16	index_2;
+	__le16	index_3;
+	u8	flags;
+	#define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT     0x1UL
+	u8	unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
 struct hwrm_nvm_get_variable_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 data_len;
-	__le16 option_num;
-	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0	   0x0UL
-	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF	   0xffffUL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	data_len;
+	__le16	option_num;
+	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0    0x0UL
+	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST     NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+	u8	unused_0[3];
+	u8	valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
 struct hwrm_nvm_get_variable_cmd_err {
-	u8 code;
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN		   0x0UL
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST       0x1UL
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR	   0x2UL
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT       0x3UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN       0x0UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR   0x2UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST         NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+	u8	unused_0[7];
 };
 
-/* hwrm_nvm_set_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
 struct hwrm_nvm_set_variable_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 src_data_addr;
-	__le16 data_len;
-	__le16 option_num;
-	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0		   0x0UL
-	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF	   0xffffUL
-	__le16 dimensions;
-	__le16 index_0;
-	__le16 index_1;
-	__le16 index_2;
-	__le16 index_3;
-	u8 flags;
-	#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH		    0x1UL
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK       0xeUL
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT	    1
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE      (0x0UL << 1)
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST    NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
-	u8 unused_0;
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	src_data_addr;
+	__le16	data_len;
+	__le16	option_num;
+	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0    0x0UL
+	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST     NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+	__le16	dimensions;
+	__le16	index_0;
+	__le16	index_1;
+	__le16	index_2;
+	__le16	index_3;
+	u8	flags;
+	#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH           0x1UL
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK     0xeUL
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT      1
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE       (0x0UL << 1)
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1  (0x1UL << 1)
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST      NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+	u8	unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
 struct hwrm_nvm_set_variable_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
 struct hwrm_nvm_set_variable_cmd_err {
-	u8 code;
-	#define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN		   0x0UL
-	#define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST       0x1UL
-	#define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR	   0x2UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN       0x0UL
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR   0x2UL
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST         NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+	u8	unused_0[7];
 };
 
-/* hwrm_selftest_qlist */
-/* Input (16 bytes) */
+/* hwrm_selftest_qlist_input (size:128b/16B) */
 struct hwrm_selftest_qlist_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (280 bytes) */
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
 struct hwrm_selftest_qlist_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 num_tests;
-	u8 available_tests;
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST       0x1UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST      0x2UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST  0x4UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST    0x8UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
-	u8 offline_tests;
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST	    0x1UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST	    0x2UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST    0x4UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST      0x8UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
-	u8 unused_0;
-	__le16 test_timeout;
-	u8 unused_1;
-	u8 unused_2;
-	char test0_name[32];
-	char test1_name[32];
-	char test2_name[32];
-	char test3_name[32];
-	char test4_name[32];
-	char test5_name[32];
-	char test6_name[32];
-	char test7_name[32];
-	__le32 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 unused_6;
-	u8 valid;
-};
-
-/* hwrm_selftest_exec */
-/* Input (24 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_tests;
+	u8	available_tests;
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST                 0x1UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST                0x2UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST            0x4UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST              0x8UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST     0x20UL
+	u8	offline_tests;
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST                 0x1UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST                0x2UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST            0x4UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST              0x8UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST     0x20UL
+	u8	unused_0;
+	__le16	test_timeout;
+	u8	unused_1[2];
+	char	test0_name[32];
+	char	test1_name[32];
+	char	test2_name[32];
+	char	test3_name[32];
+	char	test4_name[32];
+	char	test5_name[32];
+	char	test6_name[32];
+	char	test7_name[32];
+	u8	unused_2[7];
+	u8	valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
 struct hwrm_selftest_exec_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 flags;
-	#define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST		    0x1UL
-	#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST		    0x2UL
-	#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST		    0x4UL
-	#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST		    0x8UL
-	#define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST	    0x10UL
-	#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST       0x20UL
-	u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	flags;
+	#define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST                 0x1UL
+	#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST                0x2UL
+	#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST            0x4UL
+	#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST              0x8UL
+	#define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST     0x20UL
+	u8	pcie_lane_num;
+	u8	unused_0[6];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
 struct hwrm_selftest_exec_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 requested_tests;
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST	    0x1UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST       0x2UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST   0x4UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST     0x8UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
-	u8 test_success;
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST	    0x1UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST	    0x2UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST      0x4UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST	    0x8UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST   0x10UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
-};
-
-/* hwrm_selftest_irq */
-/* Input (16 bytes) */
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	requested_tests;
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST                 0x1UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST                0x2UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST            0x4UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST              0x8UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST     0x20UL
+	u8	test_success;
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST                 0x1UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST                0x2UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST            0x4UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST              0x8UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST     0x20UL
+	u8	unused_0[5];
+	u8	valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
 struct hwrm_selftest_irq_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_selftest_irq_output (size:128b/16B) */
 struct hwrm_selftest_irq_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_selftest_retrieve_serdes_data */
-/* Input (32 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 resp_data_addr;
-	__le32 resp_data_offset;
-	__le16 data_len;
-	u8 flags;
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
-	u8 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 total_data_len;
-	__le16 copied_data_len;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* Hardware Resource Manager Specification */
-/* Input (16 bytes) */
-struct input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-};
-
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
-	__le16 req_type;
-	__le16 signature;
-	#define SHORT_REQ_SIGNATURE_SHORT_CMD			   0x4321UL
-	__le16 unused_0;
-	__le16 size;
-	__le64 req_addr;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
-	__le16 req_type;
-	#define HWRM_VER_GET					   (0x0UL)
-	#define HWRM_FUNC_BUF_UNRGTR				   (0xeUL)
-	#define HWRM_FUNC_VF_CFG				   (0xfUL)
-	#define RESERVED1					   (0x10UL)
-	#define HWRM_FUNC_RESET				   (0x11UL)
-	#define HWRM_FUNC_GETFID				   (0x12UL)
-	#define HWRM_FUNC_VF_ALLOC				   (0x13UL)
-	#define HWRM_FUNC_VF_FREE				   (0x14UL)
-	#define HWRM_FUNC_QCAPS				   (0x15UL)
-	#define HWRM_FUNC_QCFG					   (0x16UL)
-	#define HWRM_FUNC_CFG					   (0x17UL)
-	#define HWRM_FUNC_QSTATS				   (0x18UL)
-	#define HWRM_FUNC_CLR_STATS				   (0x19UL)
-	#define HWRM_FUNC_DRV_UNRGTR				   (0x1aUL)
-	#define HWRM_FUNC_VF_RESC_FREE				   (0x1bUL)
-	#define HWRM_FUNC_VF_VNIC_IDS_QUERY			   (0x1cUL)
-	#define HWRM_FUNC_DRV_RGTR				   (0x1dUL)
-	#define HWRM_FUNC_DRV_QVER				   (0x1eUL)
-	#define HWRM_FUNC_BUF_RGTR				   (0x1fUL)
-	#define HWRM_PORT_PHY_CFG				   (0x20UL)
-	#define HWRM_PORT_MAC_CFG				   (0x21UL)
-	#define HWRM_PORT_TS_QUERY				   (0x22UL)
-	#define HWRM_PORT_QSTATS				   (0x23UL)
-	#define HWRM_PORT_LPBK_QSTATS				   (0x24UL)
-	#define HWRM_PORT_CLR_STATS				   (0x25UL)
-	#define HWRM_PORT_LPBK_CLR_STATS			   (0x26UL)
-	#define HWRM_PORT_PHY_QCFG				   (0x27UL)
-	#define HWRM_PORT_MAC_QCFG				   (0x28UL)
-	#define HWRM_PORT_MAC_PTP_QCFG				   (0x29UL)
-	#define HWRM_PORT_PHY_QCAPS				   (0x2aUL)
-	#define HWRM_PORT_PHY_I2C_WRITE			   (0x2bUL)
-	#define HWRM_PORT_PHY_I2C_READ				   (0x2cUL)
-	#define HWRM_PORT_LED_CFG				   (0x2dUL)
-	#define HWRM_PORT_LED_QCFG				   (0x2eUL)
-	#define HWRM_PORT_LED_QCAPS				   (0x2fUL)
-	#define HWRM_QUEUE_QPORTCFG				   (0x30UL)
-	#define HWRM_QUEUE_QCFG				   (0x31UL)
-	#define HWRM_QUEUE_CFG					   (0x32UL)
-	#define HWRM_FUNC_VLAN_CFG				   (0x33UL)
-	#define HWRM_FUNC_VLAN_QCFG				   (0x34UL)
-	#define HWRM_QUEUE_PFCENABLE_QCFG			   (0x35UL)
-	#define HWRM_QUEUE_PFCENABLE_CFG			   (0x36UL)
-	#define HWRM_QUEUE_PRI2COS_QCFG			   (0x37UL)
-	#define HWRM_QUEUE_PRI2COS_CFG				   (0x38UL)
-	#define HWRM_QUEUE_COS2BW_QCFG				   (0x39UL)
-	#define HWRM_QUEUE_COS2BW_CFG				   (0x3aUL)
-	#define HWRM_QUEUE_DSCP_QCAPS				   (0x3bUL)
-	#define HWRM_QUEUE_DSCP2PRI_QCFG			   (0x3cUL)
-	#define HWRM_QUEUE_DSCP2PRI_CFG			   (0x3dUL)
-	#define HWRM_VNIC_ALLOC				   (0x40UL)
-	#define HWRM_VNIC_FREE					   (0x41UL)
-	#define HWRM_VNIC_CFG					   (0x42UL)
-	#define HWRM_VNIC_QCFG					   (0x43UL)
-	#define HWRM_VNIC_TPA_CFG				   (0x44UL)
-	#define HWRM_VNIC_TPA_QCFG				   (0x45UL)
-	#define HWRM_VNIC_RSS_CFG				   (0x46UL)
-	#define HWRM_VNIC_RSS_QCFG				   (0x47UL)
-	#define HWRM_VNIC_PLCMODES_CFG				   (0x48UL)
-	#define HWRM_VNIC_PLCMODES_QCFG			   (0x49UL)
-	#define HWRM_VNIC_QCAPS				   (0x4aUL)
-	#define HWRM_RING_ALLOC				   (0x50UL)
-	#define HWRM_RING_FREE					   (0x51UL)
-	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS		   (0x52UL)
-	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS		   (0x53UL)
-	#define HWRM_RING_RESET				   (0x5eUL)
-	#define HWRM_RING_GRP_ALLOC				   (0x60UL)
-	#define HWRM_RING_GRP_FREE				   (0x61UL)
-	#define RESERVED5					   (0x64UL)
-	#define RESERVED6					   (0x65UL)
-	#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC			   (0x70UL)
-	#define HWRM_VNIC_RSS_COS_LB_CTX_FREE			   (0x71UL)
-	#define HWRM_CFA_L2_FILTER_ALLOC			   (0x90UL)
-	#define HWRM_CFA_L2_FILTER_FREE			   (0x91UL)
-	#define HWRM_CFA_L2_FILTER_CFG				   (0x92UL)
-	#define HWRM_CFA_L2_SET_RX_MASK			   (0x93UL)
-	#define HWRM_CFA_VLAN_ANTISPOOF_CFG			   (0x94UL)
-	#define HWRM_CFA_TUNNEL_FILTER_ALLOC			   (0x95UL)
-	#define HWRM_CFA_TUNNEL_FILTER_FREE			   (0x96UL)
-	#define HWRM_CFA_ENCAP_RECORD_ALLOC			   (0x97UL)
-	#define HWRM_CFA_ENCAP_RECORD_FREE			   (0x98UL)
-	#define HWRM_CFA_NTUPLE_FILTER_ALLOC			   (0x99UL)
-	#define HWRM_CFA_NTUPLE_FILTER_FREE			   (0x9aUL)
-	#define HWRM_CFA_NTUPLE_FILTER_CFG			   (0x9bUL)
-	#define HWRM_CFA_EM_FLOW_ALLOC				   (0x9cUL)
-	#define HWRM_CFA_EM_FLOW_FREE				   (0x9dUL)
-	#define HWRM_CFA_EM_FLOW_CFG				   (0x9eUL)
-	#define HWRM_TUNNEL_DST_PORT_QUERY			   (0xa0UL)
-	#define HWRM_TUNNEL_DST_PORT_ALLOC			   (0xa1UL)
-	#define HWRM_TUNNEL_DST_PORT_FREE			   (0xa2UL)
-	#define HWRM_STAT_CTX_ALLOC				   (0xb0UL)
-	#define HWRM_STAT_CTX_FREE				   (0xb1UL)
-	#define HWRM_STAT_CTX_QUERY				   (0xb2UL)
-	#define HWRM_STAT_CTX_CLR_STATS			   (0xb3UL)
-	#define HWRM_FW_RESET					   (0xc0UL)
-	#define HWRM_FW_QSTATUS				   (0xc1UL)
-	#define HWRM_FW_SET_TIME				   (0xc8UL)
-	#define HWRM_FW_GET_TIME				   (0xc9UL)
-	#define HWRM_FW_SET_STRUCTURED_DATA			   (0xcaUL)
-	#define HWRM_FW_GET_STRUCTURED_DATA			   (0xcbUL)
-	#define HWRM_FW_IPC_MAILBOX				   (0xccUL)
-	#define HWRM_EXEC_FWD_RESP				   (0xd0UL)
-	#define HWRM_REJECT_FWD_RESP				   (0xd1UL)
-	#define HWRM_FWD_RESP					   (0xd2UL)
-	#define HWRM_FWD_ASYNC_EVENT_CMPL			   (0xd3UL)
-	#define HWRM_TEMP_MONITOR_QUERY			   (0xe0UL)
-	#define HWRM_WOL_FILTER_ALLOC				   (0xf0UL)
-	#define HWRM_WOL_FILTER_FREE				   (0xf1UL)
-	#define HWRM_WOL_FILTER_QCFG				   (0xf2UL)
-	#define HWRM_WOL_REASON_QCFG				   (0xf3UL)
-	#define HWRM_CFA_METER_PROFILE_ALLOC			   (0xf5UL)
-	#define HWRM_CFA_METER_PROFILE_FREE			   (0xf6UL)
-	#define HWRM_CFA_METER_PROFILE_CFG			   (0xf7UL)
-	#define HWRM_CFA_METER_INSTANCE_ALLOC			   (0xf8UL)
-	#define HWRM_CFA_METER_INSTANCE_FREE			   (0xf9UL)
-	#define HWRM_CFA_VFR_ALLOC				   (0xfdUL)
-	#define HWRM_CFA_VFR_FREE				   (0xfeUL)
-	#define HWRM_CFA_VF_PAIR_ALLOC				   (0x100UL)
-	#define HWRM_CFA_VF_PAIR_FREE				   (0x101UL)
-	#define HWRM_CFA_VF_PAIR_INFO				   (0x102UL)
-	#define HWRM_CFA_FLOW_ALLOC				   (0x103UL)
-	#define HWRM_CFA_FLOW_FREE				   (0x104UL)
-	#define HWRM_CFA_FLOW_FLUSH				   (0x105UL)
-	#define HWRM_CFA_FLOW_STATS				   (0x106UL)
-	#define HWRM_CFA_FLOW_INFO				   (0x107UL)
-	#define HWRM_CFA_DECAP_FILTER_ALLOC			   (0x108UL)
-	#define HWRM_CFA_DECAP_FILTER_FREE			   (0x109UL)
-	#define HWRM_CFA_VLAN_ANTISPOOF_QCFG			   (0x10aUL)
-	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC		   (0x10bUL)
-	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE		   (0x10cUL)
-	#define HWRM_CFA_PAIR_ALLOC				   (0x10dUL)
-	#define HWRM_CFA_PAIR_FREE				   (0x10eUL)
-	#define HWRM_CFA_PAIR_INFO				   (0x10fUL)
-	#define HWRM_FW_IPC_MSG				   (0x110UL)
-	#define HWRM_SELFTEST_QLIST				   (0x200UL)
-	#define HWRM_SELFTEST_EXEC				   (0x201UL)
-	#define HWRM_SELFTEST_IRQ				   (0x202UL)
-	#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA		   (0x203UL)
-	#define HWRM_DBG_READ_DIRECT				   (0xff10UL)
-	#define HWRM_DBG_READ_INDIRECT				   (0xff11UL)
-	#define HWRM_DBG_WRITE_DIRECT				   (0xff12UL)
-	#define HWRM_DBG_WRITE_INDIRECT			   (0xff13UL)
-	#define HWRM_DBG_DUMP					   (0xff14UL)
-	#define HWRM_DBG_ERASE_NVM				   (0xff15UL)
-	#define HWRM_DBG_CFG					   (0xff16UL)
-	#define HWRM_DBG_COREDUMP_LIST				   (0xff17UL)
-	#define HWRM_DBG_COREDUMP_INITIATE			   (0xff18UL)
-	#define HWRM_DBG_COREDUMP_RETRIEVE			   (0xff19UL)
-	#define HWRM_NVM_FACTORY_DEFAULTS			   (0xffeeUL)
-	#define HWRM_NVM_VALIDATE_OPTION			   (0xffefUL)
-	#define HWRM_NVM_FLUSH					   (0xfff0UL)
-	#define HWRM_NVM_GET_VARIABLE				   (0xfff1UL)
-	#define HWRM_NVM_SET_VARIABLE				   (0xfff2UL)
-	#define HWRM_NVM_INSTALL_UPDATE			   (0xfff3UL)
-	#define HWRM_NVM_MODIFY				   (0xfff4UL)
-	#define HWRM_NVM_VERIFY_UPDATE				   (0xfff5UL)
-	#define HWRM_NVM_GET_DEV_INFO				   (0xfff6UL)
-	#define HWRM_NVM_ERASE_DIR_ENTRY			   (0xfff7UL)
-	#define HWRM_NVM_MOD_DIR_ENTRY				   (0xfff8UL)
-	#define HWRM_NVM_FIND_DIR_ENTRY			   (0xfff9UL)
-	#define HWRM_NVM_GET_DIR_ENTRIES			   (0xfffaUL)
-	#define HWRM_NVM_GET_DIR_INFO				   (0xfffbUL)
-	#define HWRM_NVM_RAW_DUMP				   (0xfffcUL)
-	#define HWRM_NVM_READ					   (0xfffdUL)
-	#define HWRM_NVM_WRITE					   (0xfffeUL)
-	#define HWRM_NVM_RAW_WRITE_BLK				   (0xffffUL)
-	__le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
-	__le16 error_code;
-	#define HWRM_ERR_CODE_SUCCESS				   (0x0UL)
-	#define HWRM_ERR_CODE_FAIL				   (0x1UL)
-	#define HWRM_ERR_CODE_INVALID_PARAMS			   (0x2UL)
-	#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED		   (0x3UL)
-	#define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR		   (0x4UL)
-	#define HWRM_ERR_CODE_INVALID_FLAGS			   (0x5UL)
-	#define HWRM_ERR_CODE_INVALID_ENABLES			   (0x6UL)
-	#define HWRM_ERR_CODE_HWRM_ERROR			   (0xfUL)
-	#define HWRM_ERR_CODE_UNKNOWN_ERR			   (0xfffeUL)
-	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED		   (0xffffUL)
-	__le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 opaque_0;
-	__le16 opaque_1;
-	u8 cmd_err;
-	u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
-	__le64 tx_64b_frames;
-	__le64 tx_65b_127b_frames;
-	__le64 tx_128b_255b_frames;
-	__le64 tx_256b_511b_frames;
-	__le64 tx_512b_1023b_frames;
-	__le64 tx_1024b_1518_frames;
-	__le64 tx_good_vlan_frames;
-	__le64 tx_1519b_2047_frames;
-	__le64 tx_2048b_4095b_frames;
-	__le64 tx_4096b_9216b_frames;
-	__le64 tx_9217b_16383b_frames;
-	__le64 tx_good_frames;
-	__le64 tx_total_frames;
-	__le64 tx_ucast_frames;
-	__le64 tx_mcast_frames;
-	__le64 tx_bcast_frames;
-	__le64 tx_pause_frames;
-	__le64 tx_pfc_frames;
-	__le64 tx_jabber_frames;
-	__le64 tx_fcs_err_frames;
-	__le64 tx_control_frames;
-	__le64 tx_oversz_frames;
-	__le64 tx_single_dfrl_frames;
-	__le64 tx_multi_dfrl_frames;
-	__le64 tx_single_coll_frames;
-	__le64 tx_multi_coll_frames;
-	__le64 tx_late_coll_frames;
-	__le64 tx_excessive_coll_frames;
-	__le64 tx_frag_frames;
-	__le64 tx_err;
-	__le64 tx_tagged_frames;
-	__le64 tx_dbl_tagged_frames;
-	__le64 tx_runt_frames;
-	__le64 tx_fifo_underruns;
-	__le64 tx_pfc_ena_frames_pri0;
-	__le64 tx_pfc_ena_frames_pri1;
-	__le64 tx_pfc_ena_frames_pri2;
-	__le64 tx_pfc_ena_frames_pri3;
-	__le64 tx_pfc_ena_frames_pri4;
-	__le64 tx_pfc_ena_frames_pri5;
-	__le64 tx_pfc_ena_frames_pri6;
-	__le64 tx_pfc_ena_frames_pri7;
-	__le64 tx_eee_lpi_events;
-	__le64 tx_eee_lpi_duration;
-	__le64 tx_llfc_logical_msgs;
-	__le64 tx_hcfc_msgs;
-	__le64 tx_total_collisions;
-	__le64 tx_bytes;
-	__le64 tx_xthol_frames;
-	__le64 tx_stat_discard;
-	__le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
-	__le64 rx_64b_frames;
-	__le64 rx_65b_127b_frames;
-	__le64 rx_128b_255b_frames;
-	__le64 rx_256b_511b_frames;
-	__le64 rx_512b_1023b_frames;
-	__le64 rx_1024b_1518_frames;
-	__le64 rx_good_vlan_frames;
-	__le64 rx_1519b_2047b_frames;
-	__le64 rx_2048b_4095b_frames;
-	__le64 rx_4096b_9216b_frames;
-	__le64 rx_9217b_16383b_frames;
-	__le64 rx_total_frames;
-	__le64 rx_ucast_frames;
-	__le64 rx_mcast_frames;
-	__le64 rx_bcast_frames;
-	__le64 rx_fcs_err_frames;
-	__le64 rx_ctrl_frames;
-	__le64 rx_pause_frames;
-	__le64 rx_pfc_frames;
-	__le64 rx_unsupported_opcode_frames;
-	__le64 rx_unsupported_da_pausepfc_frames;
-	__le64 rx_wrong_sa_frames;
-	__le64 rx_align_err_frames;
-	__le64 rx_oor_len_frames;
-	__le64 rx_code_err_frames;
-	__le64 rx_false_carrier_frames;
-	__le64 rx_ovrsz_frames;
-	__le64 rx_jbr_frames;
-	__le64 rx_mtu_err_frames;
-	__le64 rx_match_crc_frames;
-	__le64 rx_promiscuous_frames;
-	__le64 rx_tagged_frames;
-	__le64 rx_double_tagged_frames;
-	__le64 rx_trunc_frames;
-	__le64 rx_good_frames;
-	__le64 rx_pfc_xon2xoff_frames_pri0;
-	__le64 rx_pfc_xon2xoff_frames_pri1;
-	__le64 rx_pfc_xon2xoff_frames_pri2;
-	__le64 rx_pfc_xon2xoff_frames_pri3;
-	__le64 rx_pfc_xon2xoff_frames_pri4;
-	__le64 rx_pfc_xon2xoff_frames_pri5;
-	__le64 rx_pfc_xon2xoff_frames_pri6;
-	__le64 rx_pfc_xon2xoff_frames_pri7;
-	__le64 rx_pfc_ena_frames_pri0;
-	__le64 rx_pfc_ena_frames_pri1;
-	__le64 rx_pfc_ena_frames_pri2;
-	__le64 rx_pfc_ena_frames_pri3;
-	__le64 rx_pfc_ena_frames_pri4;
-	__le64 rx_pfc_ena_frames_pri5;
-	__le64 rx_pfc_ena_frames_pri6;
-	__le64 rx_pfc_ena_frames_pri7;
-	__le64 rx_sch_crc_err_frames;
-	__le64 rx_undrsz_frames;
-	__le64 rx_frag_frames;
-	__le64 rx_eee_lpi_events;
-	__le64 rx_eee_lpi_duration;
-	__le64 rx_llfc_physical_msgs;
-	__le64 rx_llfc_logical_msgs;
-	__le64 rx_llfc_msgs_with_crc_err;
-	__le64 rx_hcfc_msgs;
-	__le64 rx_hcfc_msgs_with_crc_err;
-	__le64 rx_bytes;
-	__le64 rx_runt_bytes;
-	__le64 rx_runt_frames;
-	__le64 rx_stat_discard;
-	__le64 rx_stat_err;
-};
-
-/* VXLAN IPv4 encapsulation structure (16 bytes) */
-struct hwrm_vxlan_ipv4_hdr {
-	u8 ver_hlen;
-	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK	    0xfUL
-	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT	    0
-	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK		    0xf0UL
-	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT		    4
-	u8 tos;
-	__be16 ip_id;
-	__be16 flags_frag_offset;
-	u8 ttl;
-	u8 protocol;
-	__be32 src_ip_addr;
-	__be32 dest_ip_addr;
-};
-
-/* VXLAN IPv6 encapsulation structure (32 bytes) */
-struct hwrm_vxlan_ipv6_hdr {
-	__be32 ver_tc_flow_label;
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT	   0x1cUL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK	   0xf0000000UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT	   0x14UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK	   0xff00000UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT   0x0UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK  0xfffffUL
-	__be16 payload_len;
-	u8 next_hdr;
-	u8 ttl;
-	__be32 src_ip_addr[4];
-	__be32 dest_ip_addr[4];
-};
-
-/* VXLAN encapsulation structure (72 bytes) */
-struct hwrm_cfa_encap_data_vxlan {
-	u8 src_mac_addr[6];
-	__le16 unused_0;
-	u8 dst_mac_addr[6];
-	u8 num_vlan_tags;
-	u8 unused_1;
-	__be16 ovlan_tpid;
-	__be16 ovlan_tci;
-	__be16 ivlan_tpid;
-	__be16 ivlan_tci;
-	__le32 l3[10];
-	#define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK		   0xfUL
-	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4		   0x4UL
-	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6		   0x6UL
-	__be16 src_port;
-	__be16 dst_port;
-	__be32 vni;
-};
-
-/* Periodic Statistics Context DMA to host (160 bytes) */
-struct ctx_hw_stats {
-	__le64 rx_ucast_pkts;
-	__le64 rx_mcast_pkts;
-	__le64 rx_bcast_pkts;
-	__le64 rx_discard_pkts;
-	__le64 rx_drop_pkts;
-	__le64 rx_ucast_bytes;
-	__le64 rx_mcast_bytes;
-	__le64 rx_bcast_bytes;
-	__le64 tx_ucast_pkts;
-	__le64 tx_mcast_pkts;
-	__le64 tx_bcast_pkts;
-	__le64 tx_discard_pkts;
-	__le64 tx_drop_pkts;
-	__le64 tx_ucast_bytes;
-	__le64 tx_mcast_bytes;
-	__le64 tx_bcast_bytes;
-	__le64 tpa_pkts;
-	__le64 tpa_bytes;
-	__le64 tpa_events;
-	__le64 tpa_aborts;
-};
-
-/* Structure data header (16 bytes) */
-struct hwrm_struct_hdr {
-	__le16 struct_id;
-	#define STRUCT_HDR_STRUCT_ID_LLDP_CFG			   0x41bUL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_ETS			   0x41dUL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_PFC			   0x41fUL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_APP			   0x421UL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE	   0x422UL
-	#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC		   0x424UL
-	#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE		   0x426UL
-	#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE		   0x1UL
-	#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION		   0xaUL
-	#define STRUCT_HDR_STRUCT_ID_RSS_V2			   0x64UL
-	__le16 len;
-	u8 version;
-	u8 count;
-	__le16 subtype;
-	__le16 next_offset;
-	#define STRUCT_HDR_NEXT_OFFSET_LAST			   0x0UL
-	__le16 unused_0[3];
-};
-
-/* DCBX Application configuration structure (1057) (8 bytes) */
-struct hwrm_struct_data_dcbx_app {
-	__be16 protocol_id;
-	u8 protocol_selector;
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT   0x2UL
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT   0x3UL
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
-	u8 priority;
-	u8 valid;
-	u8 unused_0[3];
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-#endif
+#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c9617675f934..d87faad901fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
 	ivi->vf = vf_id;
 	vf = &bp->pf.vf[vf_id];
 
-	memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+	if (is_valid_ether_addr(vf->mac_addr))
+		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+	else
+		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
 	ivi->max_tx_rate = vf->max_tx_rate;
 	ivi->min_tx_rate = vf->min_tx_rate;
 	ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 }
 
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_vf_resource_cfg_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+	struct bnxt_pf_info *pf = &bp->pf;
+	int i, rc = 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+	vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+	vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+	else
+		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+	vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+	req.min_rsscos_ctx = cpu_to_le16(1);
+	req.max_rsscos_ctx = cpu_to_le16(1);
+	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+		req.min_cmpl_rings = cpu_to_le16(1);
+		req.min_tx_rings = cpu_to_le16(1);
+		req.min_rx_rings = cpu_to_le16(1);
+		req.min_l2_ctxs = cpu_to_le16(1);
+		req.min_vnics = cpu_to_le16(1);
+		req.min_stat_ctx = cpu_to_le16(1);
+		req.min_hw_ring_grps = cpu_to_le16(1);
+	} else {
+		vf_cp_rings /= num_vfs;
+		vf_tx_rings /= num_vfs;
+		vf_rx_rings /= num_vfs;
+		vf_vnics /= num_vfs;
+		vf_stat_ctx /= num_vfs;
+		vf_ring_grps /= num_vfs;
+
+		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+		req.min_l2_ctxs = cpu_to_le16(4);
+		req.min_vnics = cpu_to_le16(vf_vnics);
+		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+	}
+	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+	req.max_l2_ctxs = cpu_to_le16(4);
+	req.max_vnics = cpu_to_le16(vf_vnics);
+	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < num_vfs; i++) {
+		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc) {
+			rc = -ENOMEM;
+			break;
+		}
+		pf->active_vfs = i + 1;
+		pf->vf[i].fw_fid = pf->first_vf_id + i;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	if (pf->active_vfs) {
+		u16 n = 1;
+
+		if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+			n = pf->active_vfs;
+
+		hw_resc->max_tx_rings -= vf_tx_rings * n;
+		hw_resc->max_rx_rings -= vf_rx_rings * n;
+		hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+		hw_resc->max_cp_rings -= vf_cp_rings * n;
+		hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+		hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+		hw_resc->max_vnics -= vf_vnics * n;
+
+		rc = pf->active_vfs;
+	}
+	return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 {
 	u32 rc = 0, mtu, i;
 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
-	u16 vf_ring_grps;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u16 vf_ring_grps, max_stat_ctxs;
 	struct hwrm_func_cfg_input req = {0};
 	struct bnxt_pf_info *pf = &bp->pf;
 	int total_vf_tx_rings = 0;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 
+	max_stat_ctxs = hw_resc->max_stat_ctxs;
+
 	/* Remaining rings are distributed equally amongs VF's for now */
-	vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
-	vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+	vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+	vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
-		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
 			      num_vfs;
 	else
-		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
-	vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
-	vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
-	vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
+			      num_vfs;
+	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
 	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 
 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 		total_vf_tx_rings += vf_tx_rsvd;
 	}
 	mutex_unlock(&bp->hwrm_cmd_lock);
-	if (!rc) {
-		pf->max_tx_rings -= total_vf_tx_rings;
-		pf->max_rx_rings -= vf_rx_rings * num_vfs;
-		pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
-		pf->max_cp_rings -= vf_cp_rings * num_vfs;
-		pf->max_rsscos_ctxs -= num_vfs;
-		pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
-		pf->max_vnics -= vf_vnics * num_vfs;
+	if (rc)
+		rc = -ENOMEM;
+	if (pf->active_vfs) {
+		hw_resc->max_tx_rings -= total_vf_tx_rings;
+		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
+		hw_resc->max_rsscos_ctxs -= num_vfs;
+		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+		hw_resc->max_vnics -= vf_vnics * num_vfs;
+		rc = pf->active_vfs;
 	}
 	return rc;
 }
 
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+	if (bp->flags & BNXT_FLAG_NEW_RM)
+		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+	else
+		return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 {
 	int rc = 0, vfs_supported;
 	int min_rx_rings, min_tx_rings, min_rss_ctxs;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
 	int avail_cp, avail_stat;
 
@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 	 */
 	vfs_supported = *num_vfs;
 
-	avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
-	avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+	avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+	avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
 	avail_cp = min_t(int, avail_cp, avail_stat);
 
 	while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 		min_rss_ctxs = vfs_supported;
 
 		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
-			if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
 			    min_rx_rings)
 				rx_ok = 1;
 		} else {
-			if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
 			    min_rx_rings)
 				rx_ok = 1;
 		}
-		if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
 		    avail_cp < min_rx_rings)
 			rx_ok = 0;
 
-		if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
 		    avail_cp >= min_tx_rings)
 			tx_ok = 1;
 
-		if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
+		    min_rss_ctxs)
 			rss_ok = 1;
 
 		if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 		goto err_out1;
 
 	/* Reserve resources for VFs */
-	rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
-	if (rc)
-		goto err_out2;
+	rc = bnxt_func_cfg(bp, *num_vfs);
+	if (rc != *num_vfs) {
+		if (rc <= 0) {
+			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+			*num_vfs = 0;
+			goto err_out2;
+		}
+		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+		*num_vfs = rc;
+	}
 
 	/* Register buffers for VFs */
 	rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,17 +886,51 @@ exec_fwd_resp_exit:
 	return rc;
 }
 
+static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+	struct hwrm_func_vf_cfg_input *req =
+		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+	/* Only allow VF to set a valid MAC address if the PF assigned MAC
+	 * address is zero
+	 */
+	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+		if (is_valid_ether_addr(req->dflt_mac_addr) &&
+		    !is_valid_ether_addr(vf->mac_addr)) {
+			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+		}
+		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+	}
+	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+}
+
 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
 {
 	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
 	struct hwrm_cfa_l2_filter_alloc_input *req =
 		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+	bool mac_ok = false;
 
-	if (!is_valid_ether_addr(vf->mac_addr) ||
-	    ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+	/* VF MAC address must first match PF MAC address, if it is valid.
+	 * Otherwise, it must match the VF MAC address if firmware spec >=
+	 * 1.2.2
+	 */
+	if (is_valid_ether_addr(vf->mac_addr)) {
+		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+			mac_ok = true;
+	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
+			mac_ok = true;
+	} else if (bp->hwrm_spec_code < 0x10202) {
+		mac_ok = true;
+	} else {
+		mac_ok = true;
+	}
+	if (mac_ok)
 		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
-	else
-		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
 }
 
 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
 	u32 req_type = le16_to_cpu(encap_req->req_type);
 
 	switch (req_type) {
+	case HWRM_FUNC_VF_CFG:
+		rc = bnxt_vf_store_mac(bp, vf);
+		break;
 	case HWRM_CFA_L2_FILTER_ALLOC:
 		rc = bnxt_vf_validate_set_mac(bp, vf);
 		break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d8fee26cd45e..fbe6e208e17b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
 	}
 
 	/* Is dev a VF-rep? */
-	if (dev != pf_bp->dev)
+	if (bnxt_dev_is_vf_rep(dev))
 		return bnxt_vf_rep_get_fid(dev);
 
 	bp = netdev_priv(dev);
@@ -54,12 +54,10 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
 			       struct bnxt_tc_actions *actions,
 			       const struct tc_action *tc_act)
 {
-	int ifindex = tcf_mirred_ifindex(tc_act);
-	struct net_device *dev;
+	struct net_device *dev = tcf_mirred_dev(tc_act);
 
-	dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
 	if (!dev) {
-		netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
+		netdev_info(bp->dev, "no dev in mirred action");
 		return -EINVAL;
 	}
 
@@ -148,9 +146,6 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
 		}
 	}
 
-	if (rc)
-		return rc;
-
 	if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
 		if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
 			/* dst_fid is PF's fid */
@@ -164,7 +159,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 #define GET_KEY(flow_cmd, key_type)					\
@@ -1417,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
 	void *flow_node;
 	int rc, i;
 
-	rc = rhashtable_walk_start(iter);
-	if (rc && rc != -EAGAIN) {
-		i = 0;
-		goto done;
-	}
+	rhashtable_walk_start(iter);
 
 	rc = 0;
 	for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
@@ -1483,9 +1474,6 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
 {
 	int rc = 0;
 
-	if (cls_flower->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_flower->command) {
 	case TC_CLSFLOWER_REPLACE:
 		rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 69186d188c43..26290403f38f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -124,7 +124,8 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
 	struct bnxt *bp = vf_rep->bp;
 	int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
 
-	if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev))
+	if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
+	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -241,6 +242,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
 	.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
 };
 
+bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+	return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
+}
+
 /* Called when the parent PF interface is closed:
  * As the mode transition from SWITCHDEV to LEGACY
  * happens under the rtnl_lock() this routine is safe
@@ -376,6 +382,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
 	ether_addr_copy(dev->dev_addr, dev->perm_addr);
 }
 
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+	struct pci_dev *pdev = bp->pdev;
+	int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+	u32 dw;
+
+	if (!pos) {
+		netdev_info(bp->dev, "Unable do read adapter's DSN");
+		return -EOPNOTSUPP;
+	}
+
+	/* DSN (two dw) is at an offset of 4 from the cap pos */
+	pos += 4;
+	pci_read_config_dword(pdev, pos, &dw);
+	put_unaligned_le32(dw, &dsn[0]);
+	pci_read_config_dword(pdev, pos + 4, &dw);
+	put_unaligned_le32(dw, &dsn[4]);
+	return 0;
+}
+
 static int bnxt_vf_reps_create(struct bnxt *bp)
 {
 	u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -440,6 +466,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
 		}
 	}
 
+	/* Read the adapter's DSN to use as the eswitch switch_id */
+	rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+	if (rc)
+		goto err;
+
 	/* publish cfa_code_map only after all VF-reps have been initialized */
 	bp->cfa_code_map = cfa_code_map;
 	bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index fb06bbe70e42..38b9a75ad724 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
 	return bp->pf.vf[vf_rep->vf_idx].fw_fid;
 }
 
+bool bnxt_dev_is_vf_rep(struct net_device *dev);
 int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
 int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
 
@@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
 {
 	return 0;
 }
+
+static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+	return false;
+}
 #endif /* CONFIG_BNXT_SRIOV */
 #endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 261e5847557a..1389ab5e05df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 	xdp.data = *data_ptr;
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = *data_ptr + *len;
+	xdp.rxq = &rxr->xdp_rxq;
 	orig_data = xdp.data;
 	mapping = rx_buf->mapping - bp->rx_dma_offset;
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24b4f4ceceef..b1e35a9accf1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2527,9 +2527,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
 	spin_unlock_irq(&priv->lock);
 
 	/* Link UP/DOWN event */
-	if (status & UMAC_IRQ_LINK_EVENT)
-		phy_mac_interrupt(priv->dev->phydev,
-				  !!(status & UMAC_IRQ_LINK_UP));
+	if (status & UMAC_IRQ_LINK_EVENT) {
+		priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+		phy_mac_interrupt(priv->dev->phydev);
+	}
 }
 
 /* bcmgenet_isr1: handle Rx and Tx priority queues */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8995cfefbfcf..a77ee2f8fb8d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3227,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
 	return 0;
 }
 
-#define NVRAM_CMD_TIMEOUT 5000
+#define NVRAM_CMD_TIMEOUT 10000
 
 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 {
@@ -14789,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp)
 
 static void tg3_get_5720_nvram_info(struct tg3 *tp)
 {
-	u32 nvcfg1, nvmpinstrp;
+	u32 nvcfg1, nvmpinstrp, nv_status;
 
 	nvcfg1 = tr32(NVRAM_CFG1);
 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@ -14801,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
 		}
 
 		switch (nvmpinstrp) {
+		case FLASH_5762_MX25L_100:
+		case FLASH_5762_MX25L_200:
+		case FLASH_5762_MX25L_400:
+		case FLASH_5762_MX25L_800:
+		case FLASH_5762_MX25L_160_320:
+			tp->nvram_pagesize = 4096;
+			tp->nvram_jedecnum = JEDEC_MACRONIX;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+			tg3_flag_set(tp, FLASH);
+			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+			tp->nvram_size =
+				(1 << (nv_status >> AUTOSENSE_DEVID &
+						AUTOSENSE_DEVID_MASK)
+					<< AUTOSENSE_SIZE_IN_MB);
+			return;
+
 		case FLASH_5762_EEPROM_HD:
 			nvmpinstrp = FLASH_5720_EEPROM_HD;
 			break;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1f0271fa7c74..47f51cc0566d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1863,7 +1863,7 @@
 #define NVRAM_STAT			0x00007004
 #define NVRAM_WRDATA			0x00007008
 #define NVRAM_ADDR			0x0000700c
-#define  NVRAM_ADDR_MSK			0x00ffffff
+#define  NVRAM_ADDR_MSK			0x07ffffff
 #define NVRAM_RDDATA			0x00007010
 #define NVRAM_CFG1			0x00007014
 #define  NVRAM_CFG1_FLASHIF_ENAB	 0x00000001
@@ -1945,6 +1945,11 @@
 #define  FLASH_5720_EEPROM_LD		 0x00000003
 #define  FLASH_5762_EEPROM_HD		 0x02000001
 #define  FLASH_5762_EEPROM_LD		 0x02000003
+#define  FLASH_5762_MX25L_100           0x00800000
+#define  FLASH_5762_MX25L_200           0x00800002
+#define  FLASH_5762_MX25L_400           0x00800001
+#define  FLASH_5762_MX25L_800           0x00800003
+#define  FLASH_5762_MX25L_160_320       0x03800002
 #define  FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
 #define  FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
 #define  FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2009,7 +2014,11 @@
 /* 0x702c unused */
 
 #define NVRAM_ADDR_LOCKOUT		0x00007030
-/* 0x7034 --> 0x7500 unused */
+#define NVRAM_AUTOSENSE_STATUS         0x00007038
+#define AUTOSENSE_DEVID                        0x00000010
+#define AUTOSENSE_DEVID_MASK           0x00000007
+#define AUTOSENSE_SIZE_IN_MB           17
+/* 0x703c --> 0x7500 unused */
 
 #define OTP_MODE			0x00007500
 #define OTP_MODE_OTP_THRU_GRC		 0x00000001
@@ -3378,6 +3387,7 @@ struct tg3 {
 #define JEDEC_ST			0x20
 #define JEDEC_SAIFUN			0x4f
 #define JEDEC_SST			0xbf
+#define JEDEC_MACRONIX                 0xc2
 
 #define ATMEL_AT24C02_CHIP_SIZE		TG3_NVRAM_SIZE_2KB
 #define ATMEL_AT24C02_PAGE_SIZE		(8)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c93f3a2dc6c1..86659823b259 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
 #include <linux/phy.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/net_tstamp.h>
+#include <linux/interrupt.h>
 
 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
 #define MACB_EXT_DESC
@@ -164,14 +165,38 @@
 #define GEM_DCFG5		0x0290 /* Design Config 5 */
 #define GEM_DCFG6		0x0294 /* Design Config 6 */
 #define GEM_DCFG7		0x0298 /* Design Config 7 */
+#define GEM_DCFG8		0x029C /* Design Config 8 */
 
 #define GEM_TXBDCTRL	0x04cc /* TX Buffer Descriptor control register */
 #define GEM_RXBDCTRL	0x04d0 /* RX Buffer Descriptor control register */
 
+/* Screener Type 2 match registers */
+#define GEM_SCRT2		0x540
+
+/* EtherType registers */
+#define GEM_ETHT		0x06E0
+
+/* Type 2 compare registers */
+#define GEM_T2CMPW0		0x0700
+#define GEM_T2CMPW1		0x0704
+#define T2CMP_OFST(t2idx)	(t2idx * 2)
+
+/* type 2 compare registers
+ * each location requires 3 compare regs
+ */
+#define GEM_IP4SRC_CMP(idx)		(idx * 3)
+#define GEM_IP4DST_CMP(idx)		(idx * 3 + 1)
+#define GEM_PORT_CMP(idx)		(idx * 3 + 2)
+
+/* Which screening type 2 EtherType register will be used (0 - 7) */
+#define SCRT2_ETHT		0
+
 #define GEM_ISR(hw_q)		(0x0400 + ((hw_q) << 2))
 #define GEM_TBQP(hw_q)		(0x0440 + ((hw_q) << 2))
 #define GEM_TBQPH(hw_q)		(0x04C8)
 #define GEM_RBQP(hw_q)		(0x0480 + ((hw_q) << 2))
+#define GEM_RBQS(hw_q)		(0x04A0 + ((hw_q) << 2))
+#define GEM_RBQPH(hw_q)		(0x04D4)
 #define GEM_IER(hw_q)		(0x0600 + ((hw_q) << 2))
 #define GEM_IDR(hw_q)		(0x0620 + ((hw_q) << 2))
 #define GEM_IMR(hw_q)		(0x0640 + ((hw_q) << 2))
@@ -455,6 +480,16 @@
 #define GEM_DAW64_OFFSET			23
 #define GEM_DAW64_SIZE				1
 
+/* Bitfields in DCFG8. */
+#define GEM_T1SCR_OFFSET			24
+#define GEM_T1SCR_SIZE				8
+#define GEM_T2SCR_OFFSET			16
+#define GEM_T2SCR_SIZE				8
+#define GEM_SCR2ETH_OFFSET			8
+#define GEM_SCR2ETH_SIZE			8
+#define GEM_SCR2CMP_OFFSET			0
+#define GEM_SCR2CMP_SIZE			8
+
 /* Bitfields in TISUBN */
 #define GEM_SUBNSINCR_OFFSET			0
 #define GEM_SUBNSINCR_SIZE			16
@@ -483,6 +518,66 @@
 #define GEM_RXTSMODE_OFFSET			4 /* RX Descriptor Timestamp Insertion mode */
 #define GEM_RXTSMODE_SIZE			2
 
+/* Bitfields in SCRT2 */
+#define GEM_QUEUE_OFFSET			0 /* Queue Number */
+#define GEM_QUEUE_SIZE				4
+#define GEM_VLANPR_OFFSET			4 /* VLAN Priority */
+#define GEM_VLANPR_SIZE				3
+#define GEM_VLANEN_OFFSET			8 /* VLAN Enable */
+#define GEM_VLANEN_SIZE				1
+#define GEM_ETHT2IDX_OFFSET			9 /* Index to screener type 2 EtherType register */
+#define GEM_ETHT2IDX_SIZE			3
+#define GEM_ETHTEN_OFFSET			12 /* EtherType Enable */
+#define GEM_ETHTEN_SIZE				1
+#define GEM_CMPA_OFFSET				13 /* Compare A - Index to screener type 2 Compare register */
+#define GEM_CMPA_SIZE				5
+#define GEM_CMPAEN_OFFSET			18 /* Compare A Enable */
+#define GEM_CMPAEN_SIZE				1
+#define GEM_CMPB_OFFSET				19 /* Compare B - Index to screener type 2 Compare register */
+#define GEM_CMPB_SIZE				5
+#define GEM_CMPBEN_OFFSET			24 /* Compare B Enable */
+#define GEM_CMPBEN_SIZE				1
+#define GEM_CMPC_OFFSET				25 /* Compare C - Index to screener type 2 Compare register */
+#define GEM_CMPC_SIZE				5
+#define GEM_CMPCEN_OFFSET			30 /* Compare C Enable */
+#define GEM_CMPCEN_SIZE				1
+
+/* Bitfields in ETHT */
+#define GEM_ETHTCMP_OFFSET			0 /* EtherType compare value */
+#define GEM_ETHTCMP_SIZE			16
+
+/* Bitfields in T2CMPW0 */
+#define GEM_T2CMP_OFFSET			16 /* 0xFFFF0000 compare value */
+#define GEM_T2CMP_SIZE				16
+#define GEM_T2MASK_OFFSET			0 /* 0x0000FFFF compare value or mask */
+#define GEM_T2MASK_SIZE				16
+
+/* Bitfields in T2CMPW1 */
+#define GEM_T2DISMSK_OFFSET			9 /* disable mask */
+#define GEM_T2DISMSK_SIZE			1
+#define GEM_T2CMPOFST_OFFSET			7 /* compare offset */
+#define GEM_T2CMPOFST_SIZE			2
+#define GEM_T2OFST_OFFSET			0 /* offset value */
+#define GEM_T2OFST_SIZE				7
+
+/* Offset for screener type 2 compare values (T2CMPOFST).
+ * Note the offset is applied after the specified point,
+ * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
+ * of 12 bytes from this would be the source IP address in an IP header
+ */
+#define GEM_T2COMPOFST_SOF		0
+#define GEM_T2COMPOFST_ETYPE	1
+#define GEM_T2COMPOFST_IPHDR	2
+#define GEM_T2COMPOFST_TCPUDP	3
+
+/* offset from EtherType to IP address */
+#define ETYPE_SRCIP_OFFSET			12
+#define ETYPE_DSTIP_OFFSET			16
+
+/* offset from IP header to port */
+#define IPHDR_SRCPORT_OFFSET		0
+#define IPHDR_DSTPORT_OFFSET		2
+
 /* Transmit DMA buffer descriptor Word 1 */
 #define GEM_DMA_TXVALID_OFFSET		23 /* timestamp has been captured in the Buffer Descriptor */
 #define GEM_DMA_TXVALID_SIZE		1
@@ -583,6 +678,8 @@
 #define gem_writel(port, reg, value)	(port)->macb_reg_writel((port), GEM_##reg, (value))
 #define queue_readl(queue, reg)		(queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
 #define queue_writel(queue, reg, value)	(queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define gem_readl_n(port, reg, idx)		(port)->macb_reg_readl((port), GEM_##reg + idx * 4)
+#define gem_writel_n(port, reg, idx, value)	(port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
 
 #define PTP_TS_BUFFER_SIZE		128 /* must be power of 2 */
 
@@ -920,13 +1017,42 @@ static const struct gem_statistic gem_statistics[] = {
 
 #define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
 
+#define QUEUE_STAT_TITLE(title) {	\
+	.stat_string = title,			\
+}
+
+/* per queue statistics, each should be unsigned long type */
+struct queue_stats {
+	union {
+		unsigned long first;
+		unsigned long rx_packets;
+	};
+	unsigned long rx_bytes;
+	unsigned long rx_dropped;
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+	unsigned long tx_dropped;
+};
+
+static const struct gem_statistic queue_statistics[] = {
+		QUEUE_STAT_TITLE("rx_packets"),
+		QUEUE_STAT_TITLE("rx_bytes"),
+		QUEUE_STAT_TITLE("rx_dropped"),
+		QUEUE_STAT_TITLE("tx_packets"),
+		QUEUE_STAT_TITLE("tx_bytes"),
+		QUEUE_STAT_TITLE("tx_dropped"),
+};
+
+#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
+
 struct macb;
+struct macb_queue;
 
 struct macb_or_gem_ops {
 	int	(*mog_alloc_rx_buffers)(struct macb *bp);
 	void	(*mog_free_rx_buffers)(struct macb *bp);
 	void	(*mog_init_rings)(struct macb *bp);
-	int	(*mog_rx)(struct macb *bp, int budget);
+	int	(*mog_rx)(struct macb_queue *queue, int budget);
 };
 
 /* MACB-PTP interface: adapt to platform needs. */
@@ -968,6 +1094,9 @@ struct macb_queue {
 	unsigned int		IMR;
 	unsigned int		TBQP;
 	unsigned int		TBQPH;
+	unsigned int		RBQS;
+	unsigned int		RBQP;
+	unsigned int		RBQPH;
 
 	unsigned int		tx_head, tx_tail;
 	struct macb_dma_desc	*tx_ring;
@@ -975,6 +1104,16 @@ struct macb_queue {
 	dma_addr_t		tx_ring_dma;
 	struct work_struct	tx_error_task;
 
+	dma_addr_t		rx_ring_dma;
+	dma_addr_t		rx_buffers_dma;
+	unsigned int		rx_tail;
+	unsigned int		rx_prepared_head;
+	struct macb_dma_desc	*rx_ring;
+	struct sk_buff		**rx_skbuff;
+	void			*rx_buffers;
+	struct napi_struct	napi;
+	struct queue_stats stats;
+
 #ifdef CONFIG_MACB_USE_HWSTAMP
 	struct work_struct	tx_ts_task;
 	unsigned int		tx_ts_head, tx_ts_tail;
@@ -982,6 +1121,16 @@ struct macb_queue {
 #endif
 };
 
+struct ethtool_rx_fs_item {
+	struct ethtool_rx_flow_spec fs;
+	struct list_head list;
+};
+
+struct ethtool_rx_fs_list {
+	struct list_head list;
+	unsigned int count;
+};
+
 struct macb {
 	void __iomem		*regs;
 	bool			native_io;
@@ -990,11 +1139,6 @@ struct macb {
 	u32	(*macb_reg_readl)(struct macb *bp, int offset);
 	void	(*macb_reg_writel)(struct macb *bp, int offset, u32 value);
 
-	unsigned int		rx_tail;
-	unsigned int		rx_prepared_head;
-	struct macb_dma_desc	*rx_ring;
-	struct sk_buff		**rx_skbuff;
-	void			*rx_buffers;
 	size_t			rx_buffer_size;
 
 	unsigned int		rx_ring_size;
@@ -1011,15 +1155,11 @@ struct macb {
 	struct clk		*tx_clk;
 	struct clk		*rx_clk;
 	struct net_device	*dev;
-	struct napi_struct	napi;
 	union {
 		struct macb_stats	macb;
 		struct gem_stats	gem;
 	}			hw_stats;
 
-	dma_addr_t		rx_ring_dma;
-	dma_addr_t		rx_buffers_dma;
-
 	struct macb_or_gem_ops	macbgem_ops;
 
 	struct mii_bus		*mii_bus;
@@ -1032,7 +1172,6 @@ struct macb {
 	unsigned int		dma_burst_length;
 
 	phy_interface_t		phy_interface;
-	struct gpio_desc	*reset_gpio;
 
 	/* AT91RM9200 transmit */
 	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
@@ -1040,7 +1179,7 @@ struct macb {
 	int skb_length;				/* saved skb length for pci_unmap_single */
 	unsigned int		max_tx_length;
 
-	u64			ethtool_stats[GEM_STATS_LEN];
+	u64			ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
 
 	unsigned int		rx_frm_len_mask;
 	unsigned int		jumbo_max_len;
@@ -1057,6 +1196,13 @@ struct macb {
 	struct ptp_clock_info ptp_clock_info;
 	struct tsu_incr tsu_incr;
 	struct hwtstamp_config tstamp_config;
+
+	/* RX queue filer rule set*/
+	struct ethtool_rx_fs_list rx_fs_list;
+	spinlock_t rx_fs_lock;
+	unsigned int max_tuples;
+
+	struct tasklet_struct	hresp_err_tasklet;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72a67f74b97b..e84afcf1ecb5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 	return index & (bp->rx_ring_size - 1);
 }
 
-static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
 {
-	index = macb_rx_ring_wrap(bp, index);
-	index = macb_adj_dma_desc_idx(bp, index);
-	return &bp->rx_ring[index];
+	index = macb_rx_ring_wrap(queue->bp, index);
+	index = macb_adj_dma_desc_idx(queue->bp, index);
+	return &queue->rx_ring[index];
 }
 
-static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
 {
-	return bp->rx_buffers + bp->rx_buffer_size *
-	       macb_rx_ring_wrap(bp, index);
+	return queue->rx_buffers + queue->bp->rx_buffer_size *
+	       macb_rx_ring_wrap(queue->bp, index);
 }
 
 /* I/O accessors */
@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
 					    macb_tx_ring_wrap(bp, tail),
 					    skb->data);
 				bp->dev->stats.tx_packets++;
+				queue->stats.tx_packets++;
 				bp->dev->stats.tx_bytes += skb->len;
+				queue->stats.tx_bytes += skb->len;
 			}
 		} else {
 			/* "Buffers exhausted mid-frame" errors may only happen
@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 					    macb_tx_ring_wrap(bp, tail),
 					    skb->data);
 				bp->dev->stats.tx_packets++;
+				queue->stats.tx_packets++;
 				bp->dev->stats.tx_bytes += skb->len;
+				queue->stats.tx_bytes += skb->len;
 			}
 
 			/* Now we can safely release resources */
@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 		netif_wake_subqueue(bp->dev, queue_index);
 }
 
-static void gem_rx_refill(struct macb *bp)
+static void gem_rx_refill(struct macb_queue *queue)
 {
 	unsigned int		entry;
 	struct sk_buff		*skb;
 	dma_addr_t		paddr;
+	struct macb *bp = queue->bp;
 	struct macb_dma_desc *desc;
 
-	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
-			  bp->rx_ring_size) > 0) {
-		entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
+	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
+			bp->rx_ring_size) > 0) {
+		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
 
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 
-		bp->rx_prepared_head++;
-		desc = macb_rx_desc(bp, entry);
+		queue->rx_prepared_head++;
+		desc = macb_rx_desc(queue, entry);
 
-		if (!bp->rx_skbuff[entry]) {
+		if (!queue->rx_skbuff[entry]) {
 			/* allocate sk_buff for this free entry in ring */
 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
 			if (unlikely(!skb)) {
@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
 				break;
 			}
 
-			bp->rx_skbuff[entry] = skb;
+			queue->rx_skbuff[entry] = skb;
 
 			if (entry == bp->rx_ring_size - 1)
 				paddr |= MACB_BIT(RX_WRAP);
@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
 	/* Make descriptor updates visible to hardware */
 	wmb();
 
-	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
-		    bp->rx_prepared_head, bp->rx_tail);
+	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
+			queue, queue->rx_prepared_head, queue->rx_tail);
 }
 
 /* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
+static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
 				  unsigned int end)
 {
 	unsigned int frag;
 
 	for (frag = begin; frag != end; frag++) {
-		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
 
 		desc->addr &= ~MACB_BIT(RX_USED);
 	}
@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
 	 */
 }
 
-static int gem_rx(struct macb *bp, int budget)
+static int gem_rx(struct macb_queue *queue, int budget)
 {
+	struct macb *bp = queue->bp;
 	unsigned int		len;
 	unsigned int		entry;
 	struct sk_buff		*skb;
@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
 		dma_addr_t addr;
 		bool rxused;
 
-		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-		desc = macb_rx_desc(bp, entry);
+		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+		desc = macb_rx_desc(queue, entry);
 
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
 		if (!rxused)
 			break;
 
-		bp->rx_tail++;
+		queue->rx_tail++;
 		count++;
 
 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
 			netdev_err(bp->dev,
 				   "not whole frame pointed by descriptor\n");
 			bp->dev->stats.rx_dropped++;
+			queue->stats.rx_dropped++;
 			break;
 		}
-		skb = bp->rx_skbuff[entry];
+		skb = queue->rx_skbuff[entry];
 		if (unlikely(!skb)) {
 			netdev_err(bp->dev,
 				   "inconsistent Rx descriptor chain\n");
 			bp->dev->stats.rx_dropped++;
+			queue->stats.rx_dropped++;
 			break;
 		}
 		/* now everything is ready for receiving packet */
-		bp->rx_skbuff[entry] = NULL;
+		queue->rx_skbuff[entry] = NULL;
 		len = ctrl & bp->rx_frm_len_mask;
 
 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 		bp->dev->stats.rx_packets++;
+		queue->stats.rx_packets++;
 		bp->dev->stats.rx_bytes += skb->len;
+		queue->stats.rx_bytes += skb->len;
 
 		gem_ptp_do_rxstamp(bp, skb, desc);
 
@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
 		netif_receive_skb(skb);
 	}
 
-	gem_rx_refill(bp);
+	gem_rx_refill(queue);
 
 	return count;
 }
 
-static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
 			 unsigned int last_frag)
 {
 	unsigned int len;
@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 	unsigned int offset;
 	struct sk_buff *skb;
 	struct macb_dma_desc *desc;
+	struct macb *bp = queue->bp;
 
-	desc = macb_rx_desc(bp, last_frag);
+	desc = macb_rx_desc(queue, last_frag);
 	len = desc->ctrl & bp->rx_frm_len_mask;
 
 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 	if (!skb) {
 		bp->dev->stats.rx_dropped++;
 		for (frag = first_frag; ; frag++) {
-			desc = macb_rx_desc(bp, frag);
+			desc = macb_rx_desc(queue, frag);
 			desc->addr &= ~MACB_BIT(RX_USED);
 			if (frag == last_frag)
 				break;
@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 			frag_len = len - offset;
 		}
 		skb_copy_to_linear_data_offset(skb, offset,
-					       macb_rx_buffer(bp, frag),
+					       macb_rx_buffer(queue, frag),
 					       frag_len);
 		offset += bp->rx_buffer_size;
-		desc = macb_rx_desc(bp, frag);
+		desc = macb_rx_desc(queue, frag);
 		desc->addr &= ~MACB_BIT(RX_USED);
 
 		if (frag == last_frag)
@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 	return 0;
 }
 
-static inline void macb_init_rx_ring(struct macb *bp)
+static inline void macb_init_rx_ring(struct macb_queue *queue)
 {
+	struct macb *bp = queue->bp;
 	dma_addr_t addr;
 	struct macb_dma_desc *desc = NULL;
 	int i;
 
-	addr = bp->rx_buffers_dma;
+	addr = queue->rx_buffers_dma;
 	for (i = 0; i < bp->rx_ring_size; i++) {
-		desc = macb_rx_desc(bp, i);
+		desc = macb_rx_desc(queue, i);
 		macb_set_addr(bp, desc, addr);
 		desc->ctrl = 0;
 		addr += bp->rx_buffer_size;
 	}
 	desc->addr |= MACB_BIT(RX_WRAP);
-	bp->rx_tail = 0;
+	queue->rx_tail = 0;
 }
 
-static int macb_rx(struct macb *bp, int budget)
+static int macb_rx(struct macb_queue *queue, int budget)
 {
+	struct macb *bp = queue->bp;
 	bool reset_rx_queue = false;
 	int received = 0;
 	unsigned int tail;
 	int first_frag = -1;
 
-	for (tail = bp->rx_tail; budget > 0; tail++) {
-		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+	for (tail = queue->rx_tail; budget > 0; tail++) {
+		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
 		u32 ctrl;
 
 		/* Make hw descriptor updates visible to CPU */
@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
 
 		if (ctrl & MACB_BIT(RX_SOF)) {
 			if (first_frag != -1)
-				discard_partial_frame(bp, first_frag, tail);
+				discard_partial_frame(queue, first_frag, tail);
 			first_frag = tail;
 		}
 
@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
 				continue;
 			}
 
-			dropped = macb_rx_frame(bp, first_frag, tail);
+			dropped = macb_rx_frame(queue, first_frag, tail);
 			first_frag = -1;
 			if (unlikely(dropped < 0)) {
 				reset_rx_queue = true;
@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
 		ctrl = macb_readl(bp, NCR);
 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
 
-		macb_init_rx_ring(bp);
-		macb_writel(bp, RBQP, bp->rx_ring_dma);
+		macb_init_rx_ring(queue);
+		queue_writel(queue, RBQP, queue->rx_ring_dma);
 
 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 
@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
 	}
 
 	if (first_frag != -1)
-		bp->rx_tail = first_frag;
+		queue->rx_tail = first_frag;
 	else
-		bp->rx_tail = tail;
+		queue->rx_tail = tail;
 
 	return received;
 }
 
 static int macb_poll(struct napi_struct *napi, int budget)
 {
-	struct macb *bp = container_of(napi, struct macb, napi);
+	struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
+	struct macb *bp = queue->bp;
 	int work_done;
 	u32 status;
 
@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
 	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
 		    (unsigned long)status, budget);
 
-	work_done = bp->macbgem_ops.mog_rx(bp, budget);
+	work_done = bp->macbgem_ops.mog_rx(queue, budget);
 	if (work_done < budget) {
 		napi_complete_done(napi, work_done);
 
@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
 		status = macb_readl(bp, RSR);
 		if (status) {
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
-				macb_writel(bp, ISR, MACB_BIT(RCOMP));
+				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 			napi_reschedule(napi);
 		} else {
-			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+			queue_writel(queue, IER, MACB_RX_INT_FLAGS);
 		}
 	}
 
@@ -1244,6 +1258,57 @@ static int macb_poll(struct napi_struct *napi, int budget)
 	return work_done;
 }
 
+static void macb_hresp_error_task(unsigned long data)
+{
+	struct macb *bp = (struct macb *)data;
+	struct net_device *dev = bp->dev;
+	struct macb_queue *queue = bp->queues;
+	unsigned int q;
+	u32 ctrl;
+
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+					 MACB_TX_INT_FLAGS |
+					 MACB_BIT(HRESP));
+	}
+	ctrl = macb_readl(bp, NCR);
+	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+	macb_writel(bp, NCR, ctrl);
+
+	netif_tx_stop_all_queues(dev);
+	netif_carrier_off(dev);
+
+	bp->macbgem_ops.mog_init_rings(bp);
+
+	/* Initialize TX and RX buffers */
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			queue_writel(queue, RBQPH,
+				     upper_32_bits(queue->rx_ring_dma));
+#endif
+		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			queue_writel(queue, TBQPH,
+				     upper_32_bits(queue->tx_ring_dma));
+#endif
+
+		/* Enable interrupts */
+		queue_writel(queue, IER,
+			     MACB_RX_INT_FLAGS |
+			     MACB_TX_INT_FLAGS |
+			     MACB_BIT(HRESP));
+	}
+
+	ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+	macb_writel(bp, NCR, ctrl);
+
+	netif_carrier_on(dev);
+	netif_tx_start_all_queues(dev);
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
 	struct macb_queue *queue = dev_id;
@@ -1282,9 +1347,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 
-			if (napi_schedule_prep(&bp->napi)) {
+			if (napi_schedule_prep(&queue->napi)) {
 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
-				__napi_schedule(&bp->napi);
+				__napi_schedule(&queue->napi);
 			}
 		}
 
@@ -1333,10 +1398,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
 		}
 
 		if (status & MACB_BIT(HRESP)) {
-			/* TODO: Reset the hardware, and maybe move the
-			 * netdev_err to a lower-priority context as well
-			 * (work queue?)
-			 */
+			tasklet_schedule(&bp->hresp_err_tasklet);
 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
 
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1708,38 +1770,44 @@ static void gem_free_rx_buffers(struct macb *bp)
 {
 	struct sk_buff		*skb;
 	struct macb_dma_desc	*desc;
+	struct macb_queue *queue;
 	dma_addr_t		addr;
+	unsigned int q;
 	int i;
 
-	if (!bp->rx_skbuff)
-		return;
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		if (!queue->rx_skbuff)
+			continue;
 
-	for (i = 0; i < bp->rx_ring_size; i++) {
-		skb = bp->rx_skbuff[i];
+		for (i = 0; i < bp->rx_ring_size; i++) {
+			skb = queue->rx_skbuff[i];
 
-		if (!skb)
-			continue;
+			if (!skb)
+				continue;
 
-		desc = macb_rx_desc(bp, i);
-		addr = macb_get_addr(bp, desc);
+			desc = macb_rx_desc(queue, i);
+			addr = macb_get_addr(bp, desc);
 
-		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
-				 DMA_FROM_DEVICE);
-		dev_kfree_skb_any(skb);
-		skb = NULL;
-	}
+			dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+					DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+			skb = NULL;
+		}
 
-	kfree(bp->rx_skbuff);
-	bp->rx_skbuff = NULL;
+		kfree(queue->rx_skbuff);
+		queue->rx_skbuff = NULL;
+	}
 }
 
 static void macb_free_rx_buffers(struct macb *bp)
 {
-	if (bp->rx_buffers) {
+	struct macb_queue *queue = &bp->queues[0];
+
+	if (queue->rx_buffers) {
 		dma_free_coherent(&bp->pdev->dev,
 				  bp->rx_ring_size * bp->rx_buffer_size,
-				  bp->rx_buffers, bp->rx_buffers_dma);
-		bp->rx_buffers = NULL;
+				  queue->rx_buffers, queue->rx_buffers_dma);
+		queue->rx_buffers = NULL;
 	}
 }
 
@@ -1748,11 +1816,12 @@ static void macb_free_consistent(struct macb *bp)
 	struct macb_queue *queue;
 	unsigned int q;
 
+	queue = &bp->queues[0];
 	bp->macbgem_ops.mog_free_rx_buffers(bp);
-	if (bp->rx_ring) {
+	if (queue->rx_ring) {
 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
-				  bp->rx_ring, bp->rx_ring_dma);
-		bp->rx_ring = NULL;
+				queue->rx_ring, queue->rx_ring_dma);
+		queue->rx_ring = NULL;
 	}
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -1768,32 +1837,37 @@ static void macb_free_consistent(struct macb *bp)
 
 static int gem_alloc_rx_buffers(struct macb *bp)
 {
+	struct macb_queue *queue;
+	unsigned int q;
 	int size;
 
-	size = bp->rx_ring_size * sizeof(struct sk_buff *);
-	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
-	if (!bp->rx_skbuff)
-		return -ENOMEM;
-	else
-		netdev_dbg(bp->dev,
-			   "Allocated %d RX struct sk_buff entries at %p\n",
-			   bp->rx_ring_size, bp->rx_skbuff);
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		size = bp->rx_ring_size * sizeof(struct sk_buff *);
+		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
+		if (!queue->rx_skbuff)
+			return -ENOMEM;
+		else
+			netdev_dbg(bp->dev,
+				   "Allocated %d RX struct sk_buff entries at %p\n",
+				   bp->rx_ring_size, queue->rx_skbuff);
+	}
 	return 0;
 }
 
 static int macb_alloc_rx_buffers(struct macb *bp)
 {
+	struct macb_queue *queue = &bp->queues[0];
 	int size;
 
 	size = bp->rx_ring_size * bp->rx_buffer_size;
-	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
-					    &bp->rx_buffers_dma, GFP_KERNEL);
-	if (!bp->rx_buffers)
+	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+					    &queue->rx_buffers_dma, GFP_KERNEL);
+	if (!queue->rx_buffers)
 		return -ENOMEM;
 
 	netdev_dbg(bp->dev,
 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
-		   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
 	return 0;
 }
 
@@ -1819,17 +1893,16 @@ static int macb_alloc_consistent(struct macb *bp)
 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
 		if (!queue->tx_skb)
 			goto out_err;
-	}
-
-	size = RX_RING_BYTES(bp);
-	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
-					 &bp->rx_ring_dma, GFP_KERNEL);
-	if (!bp->rx_ring)
-		goto out_err;
-	netdev_dbg(bp->dev,
-		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
-		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
 
+		size = RX_RING_BYTES(bp);
+		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+						 &queue->rx_ring_dma, GFP_KERNEL);
+		if (!queue->rx_ring)
+			goto out_err;
+		netdev_dbg(bp->dev,
+			   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
+	}
 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
 		goto out_err;
 
@@ -1856,12 +1929,13 @@ static void gem_init_rings(struct macb *bp)
 		desc->ctrl |= MACB_BIT(TX_WRAP);
 		queue->tx_head = 0;
 		queue->tx_tail = 0;
-	}
 
-	bp->rx_tail = 0;
-	bp->rx_prepared_head = 0;
+		queue->rx_tail = 0;
+		queue->rx_prepared_head = 0;
+
+		gem_rx_refill(queue);
+	}
 
-	gem_rx_refill(bp);
 }
 
 static void macb_init_rings(struct macb *bp)
@@ -1869,7 +1943,7 @@ static void macb_init_rings(struct macb *bp)
 	int i;
 	struct macb_dma_desc *desc = NULL;
 
-	macb_init_rx_ring(bp);
+	macb_init_rx_ring(&bp->queues[0]);
 
 	for (i = 0; i < bp->tx_ring_size; i++) {
 		desc = macb_tx_desc(&bp->queues[0], i);
@@ -1978,11 +2052,20 @@ static u32 macb_dbw(struct macb *bp)
  */
 static void macb_configure_dma(struct macb *bp)
 {
+	struct macb_queue *queue;
+	u32 buffer_size;
+	unsigned int q;
 	u32 dmacfg;
 
+	buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
 	if (macb_is_gem(bp)) {
 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
-		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+			if (q)
+				queue_writel(queue, RBQS, buffer_size);
+			else
+				dmacfg |= GEM_BF(RXBS, buffer_size);
+		}
 		if (bp->dma_burst_length)
 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
@@ -2051,12 +2134,12 @@ static void macb_init_hw(struct macb *bp)
 	macb_configure_dma(bp);
 
 	/* Initialize TX and RX buffers */
-	macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
-		macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
 #endif
-	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
@@ -2197,6 +2280,8 @@ static int macb_open(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+	struct macb_queue *queue;
+	unsigned int q;
 	int err;
 
 	netdev_dbg(bp->dev, "open\n");
@@ -2218,11 +2303,12 @@ static int macb_open(struct net_device *dev)
 		return err;
 	}
 
-	napi_enable(&bp->napi);
-
 	bp->macbgem_ops.mog_init_rings(bp);
 	macb_init_hw(bp);
 
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+		napi_enable(&queue->napi);
+
 	/* schedule a link state check */
 	phy_start(dev->phydev);
 
@@ -2237,10 +2323,14 @@ static int macb_open(struct net_device *dev)
 static int macb_close(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
+	struct macb_queue *queue;
 	unsigned long flags;
+	unsigned int q;
 
 	netif_tx_stop_all_queues(dev);
-	napi_disable(&bp->napi);
+
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+		napi_disable(&queue->napi);
 
 	if (dev->phydev)
 		phy_stop(dev->phydev);
@@ -2270,7 +2360,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
 
 static void gem_update_stats(struct macb *bp)
 {
-	unsigned int i;
+	struct macb_queue *queue;
+	unsigned int i, q, idx;
+	unsigned long *stat;
+
 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
 
 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
@@ -2287,6 +2380,11 @@ static void gem_update_stats(struct macb *bp)
 			*(++p) += val;
 		}
 	}
+
+	idx = GEM_STATS_LEN;
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+			bp->ethtool_stats[idx++] = *stat;
 }
 
 static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -2334,14 +2432,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
 
 	bp = netdev_priv(dev);
 	gem_update_stats(bp);
-	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+	memcpy(data, &bp->ethtool_stats, sizeof(u64)
+			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
 }
 
 static int gem_get_sset_count(struct net_device *dev, int sset)
 {
+	struct macb *bp = netdev_priv(dev);
+
 	switch (sset) {
 	case ETH_SS_STATS:
-		return GEM_STATS_LEN;
+		return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -2349,13 +2450,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
 
 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 {
+	char stat_string[ETH_GSTRING_LEN];
+	struct macb *bp = netdev_priv(dev);
+	struct macb_queue *queue;
 	unsigned int i;
+	unsigned int q;
 
 	switch (sset) {
 	case ETH_SS_STATS:
 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
 			memcpy(p, gem_statistics[i].stat_string,
 			       ETH_GSTRING_LEN);
+
+		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+				snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+						q, queue_statistics[i].stat_string);
+				memcpy(p, stat_string, ETH_GSTRING_LEN);
+			}
+		}
 		break;
 	}
 }
@@ -2603,6 +2716,307 @@ static int macb_get_ts_info(struct net_device *netdev,
 	return ethtool_op_get_ts_info(netdev, info);
 }
 
+static void gem_enable_flow_filters(struct macb *bp, bool enable)
+{
+	struct ethtool_rx_fs_item *item;
+	u32 t2_scr;
+	int num_t2_scr;
+
+	num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		struct ethtool_rx_flow_spec *fs = &item->fs;
+		struct ethtool_tcpip4_spec *tp4sp_m;
+
+		if (fs->location >= num_t2_scr)
+			continue;
+
+		t2_scr = gem_readl_n(bp, SCRT2, fs->location);
+
+		/* enable/disable screener regs for the flow entry */
+		t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
+
+		/* only enable fields with no masking */
+		tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+		if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
+			t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
+		else
+			t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
+
+		if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
+			t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
+		else
+			t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
+
+		if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
+			t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
+		else
+			t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
+
+		gem_writel_n(bp, SCRT2, fs->location, t2_scr);
+	}
+}
+
+static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+{
+	struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
+	uint16_t index = fs->location;
+	u32 w0, w1, t2_scr;
+	bool cmp_a = false;
+	bool cmp_b = false;
+	bool cmp_c = false;
+
+	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+	tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+	/* ignore field if any masking set */
+	if (tp4sp_m->ip4src == 0xFFFFFFFF) {
+		/* 1st compare reg - IP source address */
+		w0 = 0;
+		w1 = 0;
+		w0 = tp4sp_v->ip4src;
+		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+		w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
+		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
+		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
+		cmp_a = true;
+	}
+
+	/* ignore field if any masking set */
+	if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
+		/* 2nd compare reg - IP destination address */
+		w0 = 0;
+		w1 = 0;
+		w0 = tp4sp_v->ip4dst;
+		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+		w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
+		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
+		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
+		cmp_b = true;
+	}
+
+	/* ignore both port fields if masking set in both */
+	if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
+		/* 3rd compare reg - source port, destination port */
+		w0 = 0;
+		w1 = 0;
+		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
+		if (tp4sp_m->psrc == tp4sp_m->pdst) {
+			w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
+			w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+			w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+			w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+		} else {
+			/* only one port definition */
+			w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
+			w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
+			if (tp4sp_m->psrc == 0xFFFF) { /* src port */
+				w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
+				w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+			} else { /* dst port */
+				w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+				w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
+			}
+		}
+		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
+		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
+		cmp_c = true;
+	}
+
+	t2_scr = 0;
+	t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
+	t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
+	if (cmp_a)
+		t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
+	if (cmp_b)
+		t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
+	if (cmp_c)
+		t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
+	gem_writel_n(bp, SCRT2, index, t2_scr);
+}
+
+static int gem_add_flow_filter(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_flow_spec *fs = &cmd->fs;
+	struct ethtool_rx_fs_item *item, *newfs;
+	unsigned long flags;
+	int ret = -EINVAL;
+	bool added = false;
+
+	newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
+	if (newfs == NULL)
+		return -ENOMEM;
+	memcpy(&newfs->fs, fs, sizeof(newfs->fs));
+
+	netdev_dbg(netdev,
+			"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+			fs->flow_type, (int)fs->ring_cookie, fs->location,
+			htonl(fs->h_u.tcp_ip4_spec.ip4src),
+			htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+			htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+
+	spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+	/* find correct place to add in list */
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (item->fs.location > newfs->fs.location) {
+			list_add_tail(&newfs->list, &item->list);
+			added = true;
+			break;
+		} else if (item->fs.location == fs->location) {
+			netdev_err(netdev, "Rule not added: location %d not free!\n",
+					fs->location);
+			ret = -EBUSY;
+			goto err;
+		}
+	}
+	if (!added)
+		list_add_tail(&newfs->list, &bp->rx_fs_list.list);
+
+	gem_prog_cmp_regs(bp, fs);
+	bp->rx_fs_list.count++;
+	/* enable filtering if NTUPLE on */
+	if (netdev->features & NETIF_F_NTUPLE)
+		gem_enable_flow_filters(bp, 1);
+
+	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+	return 0;
+
+err:
+	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+	kfree(newfs);
+	return ret;
+}
+
+static int gem_del_flow_filter(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_fs_item *item;
+	struct ethtool_rx_flow_spec *fs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (item->fs.location == cmd->fs.location) {
+			/* disable screener regs for the flow entry */
+			fs = &(item->fs);
+			netdev_dbg(netdev,
+					"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+					fs->flow_type, (int)fs->ring_cookie, fs->location,
+					htonl(fs->h_u.tcp_ip4_spec.ip4src),
+					htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+					htons(fs->h_u.tcp_ip4_spec.psrc),
+					htons(fs->h_u.tcp_ip4_spec.pdst));
+
+			gem_writel_n(bp, SCRT2, fs->location, 0);
+
+			list_del(&item->list);
+			bp->rx_fs_list.count--;
+			spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+			kfree(item);
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+	return -EINVAL;
+}
+
+static int gem_get_flow_entry(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_fs_item *item;
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (item->fs.location == cmd->fs.location) {
+			memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int gem_get_all_flow_entries(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_fs_item *item;
+	uint32_t cnt = 0;
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (cnt == cmd->rule_cnt)
+			return -EMSGSIZE;
+		rule_locs[cnt] = item->fs.location;
+		cnt++;
+	}
+	cmd->data = bp->max_tuples;
+	cmd->rule_cnt = cnt;
+
+	return 0;
+}
+
+static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+		u32 *rule_locs)
+{
+	struct macb *bp = netdev_priv(netdev);
+	int ret = 0;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = bp->num_queues;
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = bp->rx_fs_list.count;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = gem_get_flow_entry(netdev, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
+		break;
+	default:
+		netdev_err(netdev,
+			  "Command parameter %d is not supported\n", cmd->cmd);
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	int ret;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		if ((cmd->fs.location >= bp->max_tuples)
+				|| (cmd->fs.ring_cookie >= bp->num_queues)) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = gem_add_flow_filter(netdev, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = gem_del_flow_filter(netdev, cmd);
+		break;
+	default:
+		netdev_err(netdev,
+			  "Command parameter %d is not supported\n", cmd->cmd);
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
 static const struct ethtool_ops macb_ethtool_ops = {
 	.get_regs_len		= macb_get_regs_len,
 	.get_regs		= macb_get_regs,
@@ -2628,6 +3042,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
 	.get_ringparam		= macb_get_ringparam,
 	.set_ringparam		= macb_set_ringparam,
+	.get_rxnfc			= gem_get_rxnfc,
+	.set_rxnfc			= gem_set_rxnfc,
 };
 
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2685,6 +3101,12 @@ static int macb_set_features(struct net_device *netdev,
 		gem_writel(bp, NCFGR, netcfg);
 	}
 
+	/* RX Flow Filters */
+	if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
+		bool turn_on = features & NETIF_F_NTUPLE;
+
+		gem_enable_flow_filters(bp, turn_on);
+	}
 	return 0;
 }
 
@@ -2850,7 +3272,7 @@ static int macb_init(struct platform_device *pdev)
 	struct macb *bp = netdev_priv(dev);
 	struct macb_queue *queue;
 	int err;
-	u32 val;
+	u32 val, reg;
 
 	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
 	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
@@ -2865,15 +3287,20 @@ static int macb_init(struct platform_device *pdev)
 
 		queue = &bp->queues[q];
 		queue->bp = bp;
+		netif_napi_add(dev, &queue->napi, macb_poll, 64);
 		if (hw_q) {
 			queue->ISR  = GEM_ISR(hw_q - 1);
 			queue->IER  = GEM_IER(hw_q - 1);
 			queue->IDR  = GEM_IDR(hw_q - 1);
 			queue->IMR  = GEM_IMR(hw_q - 1);
 			queue->TBQP = GEM_TBQP(hw_q - 1);
+			queue->RBQP = GEM_RBQP(hw_q - 1);
+			queue->RBQS = GEM_RBQS(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
 				queue->TBQPH = GEM_TBQPH(hw_q - 1);
+				queue->RBQPH = GEM_RBQPH(hw_q - 1);
+			}
 #endif
 		} else {
 			/* queue0 uses legacy registers */
@@ -2882,9 +3309,12 @@ static int macb_init(struct platform_device *pdev)
 			queue->IDR  = MACB_IDR;
 			queue->IMR  = MACB_IMR;
 			queue->TBQP = MACB_TBQP;
+			queue->RBQP = MACB_RBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
 				queue->TBQPH = MACB_TBQPH;
+				queue->RBQPH = MACB_RBQPH;
+			}
 #endif
 		}
 
@@ -2908,7 +3338,6 @@ static int macb_init(struct platform_device *pdev)
 	}
 
 	dev->netdev_ops = &macb_netdev_ops;
-	netif_napi_add(dev, &bp->napi, macb_poll, 64);
 
 	/* setup appropriated routines according to adapter type */
 	if (macb_is_gem(bp)) {
@@ -2941,6 +3370,30 @@ static int macb_init(struct platform_device *pdev)
 		dev->hw_features &= ~NETIF_F_SG;
 	dev->features = dev->hw_features;
 
+	/* Check RX Flow Filters support.
+	 * Max Rx flows set by availability of screeners & compare regs:
+	 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
+	 */
+	reg = gem_readl(bp, DCFG8);
+	bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+			GEM_BFEXT(T2SCR, reg));
+	if (bp->max_tuples > 0) {
+		/* also needs one ethtype match to check IPv4 */
+		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+			/* program this reg now */
+			reg = 0;
+			reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
+			gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
+			/* Filtering is supported in hw but don't enable it in kernel now */
+			dev->hw_features |= NETIF_F_NTUPLE;
+			/* init Rx flow definitions */
+			INIT_LIST_HEAD(&bp->rx_fs_list.list);
+			bp->rx_fs_list.count = 0;
+			spin_lock_init(&bp->rx_fs_lock);
+		} else
+			bp->max_tuples = 0;
+	}
+
 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
 		val = 0;
 		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
@@ -2977,34 +3430,35 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 {
 	struct macb *lp = netdev_priv(dev);
+	struct macb_queue *q = &lp->queues[0];
 	struct macb_dma_desc *desc;
 	dma_addr_t addr;
 	u32 ctl;
 	int i;
 
-	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+	q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 					 (AT91ETHER_MAX_RX_DESCR *
 					  macb_dma_desc_get_size(lp)),
-					 &lp->rx_ring_dma, GFP_KERNEL);
-	if (!lp->rx_ring)
+					 &q->rx_ring_dma, GFP_KERNEL);
+	if (!q->rx_ring)
 		return -ENOMEM;
 
-	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+	q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
 					    AT91ETHER_MAX_RX_DESCR *
 					    AT91ETHER_MAX_RBUFF_SZ,
-					    &lp->rx_buffers_dma, GFP_KERNEL);
-	if (!lp->rx_buffers) {
+					    &q->rx_buffers_dma, GFP_KERNEL);
+	if (!q->rx_buffers) {
 		dma_free_coherent(&lp->pdev->dev,
 				  AT91ETHER_MAX_RX_DESCR *
 				  macb_dma_desc_get_size(lp),
-				  lp->rx_ring, lp->rx_ring_dma);
-		lp->rx_ring = NULL;
+				  q->rx_ring, q->rx_ring_dma);
+		q->rx_ring = NULL;
 		return -ENOMEM;
 	}
 
-	addr = lp->rx_buffers_dma;
+	addr = q->rx_buffers_dma;
 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-		desc = macb_rx_desc(lp, i);
+		desc = macb_rx_desc(q, i);
 		macb_set_addr(lp, desc, addr);
 		desc->ctrl = 0;
 		addr += AT91ETHER_MAX_RBUFF_SZ;
@@ -3014,10 +3468,10 @@ static int at91ether_start(struct net_device *dev)
 	desc->addr |= MACB_BIT(RX_WRAP);
 
 	/* Reset buffer index */
-	lp->rx_tail = 0;
+	q->rx_tail = 0;
 
 	/* Program address of descriptor list in Rx Buffer Queue register */
-	macb_writel(lp, RBQP, lp->rx_ring_dma);
+	macb_writel(lp, RBQP, q->rx_ring_dma);
 
 	/* Enable Receive and Transmit */
 	ctl = macb_readl(lp, NCR);
@@ -3064,6 +3518,7 @@ static int at91ether_open(struct net_device *dev)
 static int at91ether_close(struct net_device *dev)
 {
 	struct macb *lp = netdev_priv(dev);
+	struct macb_queue *q = &lp->queues[0];
 	u32 ctl;
 
 	/* Disable Receiver and Transmitter */
@@ -3084,13 +3539,13 @@ static int at91ether_close(struct net_device *dev)
 	dma_free_coherent(&lp->pdev->dev,
 			  AT91ETHER_MAX_RX_DESCR *
 			  macb_dma_desc_get_size(lp),
-			  lp->rx_ring, lp->rx_ring_dma);
-	lp->rx_ring = NULL;
+			  q->rx_ring, q->rx_ring_dma);
+	q->rx_ring = NULL;
 
 	dma_free_coherent(&lp->pdev->dev,
 			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
-			  lp->rx_buffers, lp->rx_buffers_dma);
-	lp->rx_buffers = NULL;
+			  q->rx_buffers, q->rx_buffers_dma);
+	q->rx_buffers = NULL;
 
 	return 0;
 }
@@ -3134,14 +3589,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
 	struct macb *lp = netdev_priv(dev);
+	struct macb_queue *q = &lp->queues[0];
 	struct macb_dma_desc *desc;
 	unsigned char *p_recv;
 	struct sk_buff *skb;
 	unsigned int pktlen;
 
-	desc = macb_rx_desc(lp, lp->rx_tail);
+	desc = macb_rx_desc(q, q->rx_tail);
 	while (desc->addr & MACB_BIT(RX_USED)) {
-		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+		p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
 		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
 		if (skb) {
@@ -3163,12 +3619,12 @@ static void at91ether_rx(struct net_device *dev)
 		desc->addr &= ~MACB_BIT(RX_USED);
 
 		/* wrap after last buffer */
-		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
-			lp->rx_tail = 0;
+		if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+			q->rx_tail = 0;
 		else
-			lp->rx_tail++;
+			q->rx_tail++;
 
-		desc = macb_rx_desc(lp, lp->rx_tail);
+		desc = macb_rx_desc(q, q->rx_tail);
 	}
 }
 
@@ -3394,7 +3850,6 @@ static int macb_probe(struct platform_device *pdev)
 					      = macb_config->clk_init;
 	int (*init)(struct platform_device *) = macb_config->init;
 	struct device_node *np = pdev->dev.of_node;
-	struct device_node *phy_node;
 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
 	unsigned int queue_mask, num_queues;
 	struct macb_platform_data *pdata;
@@ -3500,18 +3955,6 @@ static int macb_probe(struct platform_device *pdev)
 	else
 		macb_get_hwaddr(bp);
 
-	/* Power up the PHY if there is a GPIO reset */
-	phy_node =  of_get_next_available_child(np, NULL);
-	if (phy_node) {
-		int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-
-		if (gpio_is_valid(gpio)) {
-			bp->reset_gpio = gpio_to_desc(gpio);
-			gpiod_direction_output(bp->reset_gpio, 1);
-		}
-	}
-	of_node_put(phy_node);
-
 	err = of_get_phy_mode(np);
 	if (err < 0) {
 		pdata = dev_get_platdata(&pdev->dev);
@@ -3542,6 +3985,9 @@ static int macb_probe(struct platform_device *pdev)
 		goto err_out_unregister_mdio;
 	}
 
+	tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+		     (unsigned long)bp);
+
 	phy_attached_info(phydev);
 
 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -3558,10 +4004,6 @@ err_out_unregister_mdio:
 		of_phy_deregister_fixed_link(np);
 	mdiobus_free(bp->mii_bus);
 
-	/* Shutdown the PHY if there is a GPIO reset */
-	if (bp->reset_gpio)
-		gpiod_set_value(bp->reset_gpio, 0);
-
 err_out_free_netdev:
 	free_netdev(dev);
 
@@ -3592,10 +4034,6 @@ static int macb_remove(struct platform_device *pdev)
 		dev->phydev = NULL;
 		mdiobus_free(bp->mii_bus);
 
-		/* Shutdown the PHY if there is a GPIO reset */
-		if (bp->reset_gpio)
-			gpiod_set_value(bp->reset_gpio, 0);
-
 		unregister_netdev(dev);
 		clk_disable_unprepare(bp->tx_clk);
 		clk_disable_unprepare(bp->hclk);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75eb34d2..043e3c11c42b 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@ config THUNDER_NIC_PF
 
 config THUNDER_NIC_VF
 	tristate "Thunder Virtual function driver"
+	imply CAVIUM_PTP
 	depends on 64BIT
 	---help---
 	  This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@ config	THUNDER_NIC_RGX
 	  This driver supports configuring XCV block of RGX interface
 	  present on CN81XX chip.
 
+config CAVIUM_PTP
+	tristate "Cavium PTP coprocessor as PTP clock"
+	depends on 64BIT
+	imply PTP_1588_CLOCK
+	default y
+	---help---
+	  This driver adds support for the Precision Time Protocol Clocks and
+	  Timestamping coprocessor (PTP) found on Cavium processors.
+	  PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+	  Precision Time Protocol or other purposes.  Timestamps can be used in
+	  BGX, TNS, GTI, and NIC blocks.
+
 config LIQUIDIO
 	tristate "Cavium LiquidIO support"
 	depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f7c31a..946bba84e81d 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the Cavium ethernet device drivers.
 #
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
 obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
 obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
 obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000000..dd8561b8060b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000000..c87c9c684a33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME	"Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP	0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST	0xA00E
+
+#define PCI_PTP_BAR_NO	0
+#define PCI_RST_BAR_NO	0
+
+#define PTP_CLOCK_CFG		0xF00ULL
+#define  PTP_CLOCK_CFG_PTP_EN	BIT(0)
+#define PTP_CLOCK_LO		0xF08ULL
+#define PTP_CLOCK_HI		0xF10ULL
+#define PTP_CLOCK_COMP		0xF18ULL
+
+#define RST_BOOT	0x1600ULL
+#define CLOCK_BASE_RATE	50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+	struct pci_dev *pdev;
+	void __iomem *base;
+	u64 ret = CLOCK_BASE_RATE * 16;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+			      PCI_DEVICE_ID_CAVIUM_RST, NULL);
+	if (!pdev)
+		goto error;
+
+	base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+	if (!base)
+		goto error_put_pdev;
+
+	ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+	iounmap(base);
+
+error_put_pdev:
+	pci_dev_put(pdev);
+
+error:
+	return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+	struct cavium_ptp *ptp;
+	struct pci_dev *pdev;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+			      PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	ptp = pci_get_drvdata(pdev);
+	if (!ptp)
+		ptp = ERR_PTR(-EPROBE_DEFER);
+	if (IS_ERR(ptp))
+		pci_dev_put(pdev);
+
+	return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+	pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ *              16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+	u64 comp;
+	u64 adj;
+	bool neg_adj = false;
+
+	if (scaled_ppm < 0) {
+		neg_adj = true;
+		scaled_ppm = -scaled_ppm;
+	}
+
+	/* The hardware adds the clock compensation value to the PTP clock
+	 * on every coprocessor clock cycle. Typical convention is that it
+	 * represent number of nanosecond betwen each cycle. In this
+	 * convention compensation value is in 64 bit fixed-point
+	 * representation where upper 32 bits are number of nanoseconds
+	 * and lower is fractions of nanosecond.
+	 * The scaled_ppm represent the ratio in "parts per bilion" by which the
+	 * compensation value should be corrected.
+	 * To calculate new compenstation value we use 64bit fixed point
+	 * arithmetic on following formula
+	 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+	 * where tbase is the basic compensation value calculated initialy
+	 * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+	 * independent structure definition to write data to PTP register.
+	 */
+	comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+	adj = comp * scaled_ppm;
+	adj >>= 16;
+	adj = div_u64(adj, 1000000ull);
+	comp = neg_adj ? comp - adj : comp + adj;
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp:   PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	timecounter_adjtime(&clock->time_counter, delta);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	/* Sync, for network driver to get latest value */
+	smp_mb();
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts:  timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+			      struct timespec64 *ts)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+	u64 nsec;
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	nsec = timecounter_read(&clock->time_counter);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	*ts = ns_to_timespec64(nsec);
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts:  timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+			      const struct timespec64 *ts)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+	u64 nsec;
+
+	nsec = timespec64_to_ns(ts);
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq:  request
+ * @on:  is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+			     struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+	struct cavium_ptp *clock =
+		container_of(cc, struct cavium_ptp, cycle_counter);
+
+	return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct cavium_ptp *clock;
+	struct cyclecounter *cc;
+	u64 clock_cfg;
+	u64 clock_comp;
+	int err;
+
+	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+	if (!clock) {
+		err = -ENOMEM;
+		goto error;
+	}
+
+	clock->pdev = pdev;
+
+	err = pcim_enable_device(pdev);
+	if (err)
+		goto error_free;
+
+	err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+	if (err)
+		goto error_free;
+
+	clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+	spin_lock_init(&clock->spin_lock);
+
+	cc = &clock->cycle_counter;
+	cc->read = cavium_ptp_cc_read;
+	cc->mask = CYCLECOUNTER_MASK(64);
+	cc->mult = 1;
+	cc->shift = 0;
+
+	timecounter_init(&clock->time_counter, &clock->cycle_counter,
+			 ktime_to_ns(ktime_get_real()));
+
+	clock->clock_rate = ptp_cavium_clock_get();
+
+	clock->ptp_info = (struct ptp_clock_info) {
+		.owner		= THIS_MODULE,
+		.name		= "ThunderX PTP",
+		.max_adj	= 1000000000ull,
+		.n_ext_ts	= 0,
+		.n_pins		= 0,
+		.pps		= 0,
+		.adjfine	= cavium_ptp_adjfine,
+		.adjtime	= cavium_ptp_adjtime,
+		.gettime64	= cavium_ptp_gettime,
+		.settime64	= cavium_ptp_settime,
+		.enable		= cavium_ptp_enable,
+	};
+
+	clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+	clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+	writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+	clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+	writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+	clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+	if (!clock->ptp_clock) {
+		err = -ENODEV;
+		goto error_stop;
+	}
+	if (IS_ERR(clock->ptp_clock)) {
+		err = PTR_ERR(clock->ptp_clock);
+		goto error_stop;
+	}
+
+	pci_set_drvdata(pdev, clock);
+	return 0;
+
+error_stop:
+	clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+	writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+	pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+	devm_kfree(dev, clock);
+
+error:
+	/* For `cavium_ptp_get()` we need to differentiate between the case
+	 * when the core has not tried to probe this device and the case when
+	 * the probe failed.  In the later case we pretend that the
+	 * initialization was successful and keep the error in
+	 * `dev->driver_data`.
+	 */
+	pci_set_drvdata(pdev, ERR_PTR(err));
+	return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+	struct cavium_ptp *clock = pci_get_drvdata(pdev);
+	u64 clock_cfg;
+
+	if (IS_ERR_OR_NULL(clock))
+		return;
+
+	ptp_clock_unregister(clock->ptp_clock);
+
+	clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+	writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+	{ 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+	.name = DRV_NAME,
+	.id_table = cavium_ptp_id_table,
+	.probe = cavium_ptp_probe,
+	.remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+	return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+	pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000000..be2bafc7beeb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+	struct pci_dev *pdev;
+
+	/* Serialize access to cycle_counter, time_counter and hw_registers */
+	spinlock_t spin_lock;
+	struct cyclecounter cycle_counter;
+	struct timecounter time_counter;
+	void __iomem *reg_base;
+
+	u32 clock_rate;
+
+	struct ptp_clock_info ptp_info;
+	struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+	unsigned long flags;
+	u64 ret;
+
+	spin_lock_irqsave(&ptp->spin_lock, flags);
+	ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+	spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+	return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+	return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+	return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+	return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 2c615ab09e64..f38abf626412 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
 	size = octdevsize + priv_size + configsize +
 		(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
 
-	buf = vmalloc(size);
+	buf = vzalloc(size);
 	if (!buf)
 		return NULL;
 
-	memset(buf, 0, size);
-
 	oct = (struct octeon_device *)buf;
 	oct->priv = (void *)(buf + octdevsize);
 	oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device  *oct)
 
 	size = sizeof(struct octeon_ioq_vector) * num_ioqs;
 
-	oct->ioq_vector = vmalloc(size);
+	oct->ioq_vector = vzalloc(size);
 	if (!oct->ioq_vector)
 		return 1;
-	memset(oct->ioq_vector, 0, size);
 	for (i = 0; i < num_ioqs; i++) {
 		ioq_vector		= &oct->ioq_vector[i];
 		ioq_vector->oct_dev	= oct;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e618e318..4cacce5d2b16 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
 	struct u64_stats_sync   syncp;
 };
 
+struct cavium_ptp;
+
 struct nicvf {
 	struct nicvf		*pnicvf;
 	struct net_device	*netdev;
@@ -312,6 +314,33 @@ struct nicvf {
 	struct tasklet_struct	qs_err_task;
 	struct work_struct	reset_task;
 
+	/* PTP timestamp */
+	struct cavium_ptp	*ptp_clock;
+	/* Inbound timestamping is on */
+	bool			hw_rx_tstamp;
+	/* When the packet that requires timestamping is sent, hardware inserts
+	 * two entries to the completion queue.  First is the regular
+	 * CQE_TYPE_SEND entry that signals that the packet was sent.
+	 * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+	 * for that packet.
+	 * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+	 * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+	 * entry.
+	 * So `ptp_skb` is used to hold the pointer to the packet between
+	 * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+	 */
+	struct sk_buff		*ptp_skb;
+	/* `tx_ptp_skbs` is set when the hardware is sending a packet that
+	 * requires timestamping.  Cavium hardware can not process more than one
+	 * such packet at once so this is set each time the driver submits
+	 * a packet that requires timestamping to the send queue and clears
+	 * each time it receives the entry on the completion queue saying
+	 * that such packet was sent.
+	 * So `tx_ptp_skbs` prevents driver from submitting more than one
+	 * packet that requires timestamping to the hardware for transmitting.
+	 */
+	atomic_t		tx_ptp_skbs;
+
 	/* Interrupt coalescing settings */
 	u32			cq_coalesce_usecs;
 	u32			msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
 #define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
 #define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
 #define	NIC_MBOX_MSG_PFC		0x18	/* Pause frame control */
+#define	NIC_MBOX_MSG_PTP_CFG		0x19	/* HW packet timestamp */
 #define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
 #define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
 
@@ -521,6 +551,11 @@ struct pfc {
 	u8    fc_tx;
 };
 
+struct set_ptp {
+	u8    msg;
+	bool  enable;
+};
+
 /* 128 bit shared memory between PF and each VF */
 union nic_mbx {
 	struct { u8 msg; }	msg;
@@ -540,6 +575,7 @@ union nic_mbx {
 	struct set_loopback	lbk;
 	struct reset_stat_cfg	reset_stat;
 	struct pfc		pfc;
+	struct set_ptp		ptp;
 };
 
 #define NIC_NODE_ID_MASK	0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55b3e08..7ff66a8194e2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -18,7 +18,7 @@
 #include "q_struct.h"
 #include "thunder_bgx.h"
 
-#define DRV_NAME	"thunder-nic"
+#define DRV_NAME	"nicpf"
 #define DRV_VERSION	"1.0"
 
 struct hw_info {
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
 	/* Enable backpressure */
 	nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
 
-	/* TNS and TNS bypass modes are present only on 88xx */
+	/* TNS and TNS bypass modes are present only on 88xx
+	 * Also offset of this CSR has changed in 81xx and 83xx.
+	 */
 	if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
 		/* Disable TNS mode on both interfaces */
 		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
-			      (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+			      (NIC_TNS_BYPASS_MODE << 7) |
+			      BGX0_BLOCK | (1ULL << 16));
 		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
-			      (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+			      (NIC_TNS_BYPASS_MODE << 7) |
+			      BGX1_BLOCK | (1ULL << 16));
+	} else {
+		/* Configure timestamp generation timeout to 10us */
+		for (i = 0; i < nic->hw->bgx_cnt; i++)
+			nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+				      (1ULL << 16));
 	}
 
 	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
 	}
 }
 
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+	struct pkind_cfg *pkind;
+	u8 lmac, bgx_idx;
+	u64 pkind_val, pkind_idx;
+
+	if (vf >= nic->num_vf_en)
+		return;
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+	pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+	pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+	pkind = (struct pkind_cfg *)&pkind_val;
+
+	if (ptp->enable && !pkind->hdr_sl) {
+		/* Skiplen to exclude 8byte timestamp while parsing pkt
+		 * If not configured, will result in L2 errors.
+		 */
+		pkind->hdr_sl = 4;
+		/* Adjust max packet length allowed */
+		pkind->maxlen += (pkind->hdr_sl * 2);
+		bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+		nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+			      (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+	} else if (!ptp->enable && pkind->hdr_sl) {
+		pkind->maxlen -= (pkind->hdr_sl * 2);
+		pkind->hdr_sl = 0;
+		bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+		nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+			      (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+	}
+
+	nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
 /* Interrupt handler to handle mailbox messages from VFs */
 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 {
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 	case NIC_MBOX_MSG_PFC:
 		nic_pause_frame(nic, vf, &mbx.pfc);
 		goto unlock;
+	case NIC_MBOX_MSG_PTP_CFG:
+		nic_config_timestamp(nic, vf, &mbx.ptp);
+		break;
 	default:
 		dev_err(&nic->pdev->dev,
 			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d46337cf29..a16c48a1ebb2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
 #define   NIC_PF_ECC3_DBE_INT_W1S		(0x2708)
 #define   NIC_PF_ECC3_DBE_ENA_W1C		(0x2710)
 #define   NIC_PF_ECC3_DBE_ENA_W1S		(0x2718)
+#define   NIC_PF_INTFX_SEND_CFG			(0x4000)
 #define   NIC_PF_MCAM_0_191_ENA			(0x100000)
 #define   NIC_PF_MCAM_0_191_M_0_5_DATA		(0x110000)
 #define   NIC_PF_MCAM_CTRL			(0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9cbf98b..5603f5ab1fee 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,14 +9,16 @@
 /* ETHTOOL Support for VNIC_VF Device*/
 
 #include <linux/pci.h>
+#include <linux/net_tstamp.h>
 
 #include "nic_reg.h"
 #include "nic.h"
 #include "nicvf_queues.h"
 #include "q_struct.h"
 #include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
 
-#define DRV_NAME	"thunder-nicvf"
+#define DRV_NAME	"nicvf"
 #define DRV_VERSION     "1.0"
 
 struct nicvf_stat {
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
 	return 0;
 }
 
+static int nicvf_get_ts_info(struct net_device *netdev,
+			     struct ethtool_ts_info *info)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if (!nic->ptp_clock)
+		return ethtool_op_get_ts_info(netdev, info);
+
+	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE |
+				SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_ALL);
+
+	return 0;
+}
+
 static const struct ethtool_ops nicvf_ethtool_ops = {
 	.get_link		= nicvf_get_link,
 	.get_drvinfo		= nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
 	.set_channels		= nicvf_set_channels,
 	.get_pauseparam         = nicvf_get_pauseparam,
 	.set_pauseparam         = nicvf_set_pauseparam,
-	.get_ts_info		= ethtool_op_get_ts_info,
+	.get_ts_info		= nicvf_get_ts_info,
 	.get_link_ksettings	= nicvf_get_link_ksettings,
 };
 
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a063c36c4c58..b68cde9f17d2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,13 +20,15 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/filter.h>
+#include <linux/net_tstamp.h>
 
 #include "nic_reg.h"
 #include "nic.h"
 #include "nicvf_queues.h"
 #include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
 
-#define DRV_NAME	"thunder-nicvf"
+#define DRV_NAME	"nicvf"
 #define DRV_VERSION	"1.0"
 
 /* Supported devices */
@@ -65,6 +67,11 @@ module_param(cpi_alg, int, S_IRUGO);
 MODULE_PARM_DESC(cpi_alg,
 		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
 
+struct nicvf_xdp_tx {
+	u64 dma_addr;
+	u8  qidx;
+};
+
 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
 {
 	if (nic->sqs_mode)
@@ -500,14 +507,29 @@ static int nicvf_init_resources(struct nicvf *nic)
 	return 0;
 }
 
+static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
+{
+	/* Check if it's a recycled page, if not unmap the DMA mapping.
+	 * Recycled page holds an extra reference.
+	 */
+	if (page_ref_count(page) == 1) {
+		dma_addr &= PAGE_MASK;
+		dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+				     RCV_FRAG_LEN + XDP_HEADROOM,
+				     DMA_FROM_DEVICE,
+				     DMA_ATTR_SKIP_CPU_SYNC);
+	}
+}
+
 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 				struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
-				struct sk_buff **skb)
+				struct rcv_queue *rq, struct sk_buff **skb)
 {
 	struct xdp_buff xdp;
 	struct page *page;
+	struct nicvf_xdp_tx *xdp_tx = NULL;
 	u32 action;
-	u16 len, offset = 0;
+	u16 len, err, offset = 0;
 	u64 dma_addr, cpu_addr;
 	void *orig_data;
 
@@ -521,10 +543,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 	cpu_addr = (u64)phys_to_virt(cpu_addr);
 	page = virt_to_page((void *)cpu_addr);
 
-	xdp.data_hard_start = page_address(page);
+	xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
 	xdp.data = (void *)cpu_addr;
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + len;
+	xdp.rxq = &rq->xdp_rxq;
 	orig_data = xdp.data;
 
 	rcu_read_lock();
@@ -540,18 +563,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 
 	switch (action) {
 	case XDP_PASS:
-		/* Check if it's a recycled page, if not
-		 * unmap the DMA mapping.
-		 *
-		 * Recycled page holds an extra reference.
-		 */
-		if (page_ref_count(page) == 1) {
-			dma_addr &= PAGE_MASK;
-			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
-					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
-					     DMA_FROM_DEVICE,
-					     DMA_ATTR_SKIP_CPU_SYNC);
-		}
+		nicvf_unmap_page(nic, page, dma_addr);
 
 		/* Build SKB and pass on packet to network stack */
 		*skb = build_skb(xdp.data,
@@ -564,6 +576,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 	case XDP_TX:
 		nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
 		return true;
+	case XDP_REDIRECT:
+		/* Save DMA address for use while transmitting */
+		xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+		xdp_tx->dma_addr = dma_addr;
+		xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+		err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
+		if (!err)
+			return true;
+
+		/* Free the page on error */
+		nicvf_unmap_page(nic, page, dma_addr);
+		put_page(page);
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(action);
 		/* fall through */
@@ -571,24 +597,51 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 		trace_xdp_exception(nic->netdev, prog, action);
 		/* fall through */
 	case XDP_DROP:
-		/* Check if it's a recycled page, if not
-		 * unmap the DMA mapping.
-		 *
-		 * Recycled page holds an extra reference.
-		 */
-		if (page_ref_count(page) == 1) {
-			dma_addr &= PAGE_MASK;
-			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
-					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
-					     DMA_FROM_DEVICE,
-					     DMA_ATTR_SKIP_CPU_SYNC);
-		}
+		nicvf_unmap_page(nic, page, dma_addr);
 		put_page(page);
 		return true;
 	}
 	return false;
 }
 
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+				  struct cqe_send_t *cqe_tx)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	struct skb_shared_hwtstamps ts;
+	u64 ns;
+
+	nic = nic->pnicvf;
+
+	/* Sync for 'ptp_skb' */
+	smp_rmb();
+
+	/* New timestamp request can be queued now */
+	atomic_set(&nic->tx_ptp_skbs, 0);
+
+	/* Check for timestamp requested skb */
+	if (!nic->ptp_skb)
+		return;
+
+	/* Check if timestamping is timedout, which is set to 10us */
+	if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+	    cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+		goto no_tstamp;
+
+	/* Get the timestamp */
+	memset(&ts, 0, sizeof(ts));
+	ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+	ts.hwtstamp = ns_to_ktime(ns);
+	skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+	/* Free the original skb */
+	dev_kfree_skb_any(nic->ptp_skb);
+	nic->ptp_skb = NULL;
+	/* Sync 'ptp_skb' */
+	smp_wmb();
+}
+
 static void nicvf_snd_pkt_handler(struct net_device *netdev,
 				  struct cqe_send_t *cqe_tx,
 				  int budget, int *subdesc_cnt,
@@ -645,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
 		prefetch(skb);
 		(*tx_pkts)++;
 		*tx_bytes += skb->len;
-		napi_consume_skb(skb, budget);
+		/* If timestamp is requested for this skb, don't free it */
+		if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+		    !nic->pnicvf->ptp_skb)
+			nic->pnicvf->ptp_skb = skb;
+		else
+			napi_consume_skb(skb, budget);
 		sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
 	} else {
 		/* In case of SW TSO on 88xx, only last segment will have
@@ -684,9 +742,25 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
 	skb_set_hash(skb, hash, hash_type);
 }
 
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+	u64 ns;
+
+	if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+		return;
+
+	/* The first 8 bytes is the timestamp */
+	ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+				    be64_to_cpu(*(__be64 *)skb->data));
+	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+	__skb_pull(skb, 8);
+}
+
 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 				  struct napi_struct *napi,
-				  struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+				  struct cqe_rx_t *cqe_rx,
+				  struct snd_queue *sq, struct rcv_queue *rq)
 {
 	struct sk_buff *skb = NULL;
 	struct nicvf *nic = netdev_priv(netdev);
@@ -712,7 +786,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 	/* For XDP, ignore pkts spanning multiple pages */
 	if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
 		/* Packet consumed by XDP */
-		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
 			return;
 	} else {
 		skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -734,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 		return;
 	}
 
+	nicvf_set_rxtstamp(nic, skb);
 	nicvf_set_rxhash(netdev, cqe_rx, skb);
 
 	skb_record_rx_queue(skb, rq_idx);
@@ -769,6 +844,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
 	struct cqe_rx_t *cq_desc;
 	struct netdev_queue *txq;
 	struct snd_queue *sq = &qs->sq[cq_idx];
+	struct rcv_queue *rq = &qs->rq[cq_idx];
 	unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
 
 	spin_lock_bh(&cq->lock);
@@ -799,7 +875,7 @@ loop:
 
 		switch (cq_desc->cqe_type) {
 		case CQE_TYPE_RX:
-			nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+			nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
 			work_done++;
 		break;
 		case CQE_TYPE_SEND:
@@ -808,10 +884,12 @@ loop:
 					      &tx_pkts, &tx_bytes);
 			tx_done++;
 		break;
+		case CQE_TYPE_SEND_PTP:
+			nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+		break;
 		case CQE_TYPE_INVALID:
 		case CQE_TYPE_RX_SPLIT:
 		case CQE_TYPE_RX_TCP:
-		case CQE_TYPE_SEND_PTP:
 			/* Ignore for now */
 		break;
 		}
@@ -1307,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
 
 	nicvf_free_cq_poll(nic);
 
+	/* Free any pending SKB saved to receive timestamp */
+	if (nic->ptp_skb) {
+		dev_kfree_skb_any(nic->ptp_skb);
+		nic->ptp_skb = NULL;
+	}
+
 	/* Clear multiqset info */
 	nic->pnicvf = nic;
 
 	return 0;
 }
 
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+	union nic_mbx mbx = {};
+
+	mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+	mbx.ptp.enable = enable;
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
 {
 	union nic_mbx mbx = {};
@@ -1382,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
 	if (nic->sqs_mode)
 		nicvf_get_primary_vf_struct(nic);
 
+	/* Configure PTP timestamp */
+	if (nic->ptp_clock)
+		nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+	atomic_set(&nic->tx_ptp_skbs, 0);
+	nic->ptp_skb = NULL;
+
 	/* Configure receive side scaling and MTU */
 	if (!nic->sqs_mode) {
 		nicvf_rss_init(nic);
@@ -1764,6 +1864,117 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
 	}
 }
 
+static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	struct nicvf *snic = nic;
+	struct nicvf_xdp_tx *xdp_tx;
+	struct snd_queue *sq;
+	struct page *page;
+	int err, qidx;
+
+	if (!netif_running(netdev) || !nic->xdp_prog)
+		return -EINVAL;
+
+	page = virt_to_page(xdp->data);
+	xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+	qidx = xdp_tx->qidx;
+
+	if (xdp_tx->qidx >= nic->xdp_tx_queues)
+		return -EINVAL;
+
+	/* Get secondary Qset's info */
+	if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
+		qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
+		snic = (struct nicvf *)nic->snicvf[qidx - 1];
+		if (!snic)
+			return -EINVAL;
+		qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
+	}
+
+	sq = &snic->qs->sq[qidx];
+	err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
+				      xdp_tx->dma_addr,
+				      xdp->data_end - xdp->data);
+	if (err)
+		return -ENOMEM;
+
+	nicvf_xdp_sq_doorbell(snic, sq, qidx);
+	return 0;
+}
+
+static void nicvf_xdp_flush(struct net_device *dev)
+{
+	return;
+}
+
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if (!nic->ptp_clock)
+		return -ENODEV;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+	case HWTSTAMP_TX_ON:
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		nic->hw_rx_tstamp = false;
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		nic->hw_rx_tstamp = true;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	if (netif_running(netdev))
+		nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return nicvf_config_hwtstamp(netdev, req);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
 static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_open		= nicvf_open,
 	.ndo_stop		= nicvf_stop,
@@ -1775,6 +1986,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_fix_features       = nicvf_fix_features,
 	.ndo_set_features       = nicvf_set_features,
 	.ndo_bpf		= nicvf_xdp,
+	.ndo_xdp_xmit		= nicvf_xdp_xmit,
+	.ndo_xdp_flush          = nicvf_xdp_flush,
+	.ndo_do_ioctl           = nicvf_ioctl,
 };
 
 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1784,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct nicvf *nic;
 	int    err, qcount;
 	u16    sdevid;
+	struct cavium_ptp *ptp_clock;
+
+	ptp_clock = cavium_ptp_get();
+	if (IS_ERR(ptp_clock)) {
+		if (PTR_ERR(ptp_clock) == -ENODEV)
+			/* In virtualized environment we proceed without ptp */
+			ptp_clock = NULL;
+		else
+			return PTR_ERR(ptp_clock);
+	}
 
 	err = pci_enable_device(pdev);
 	if (err) {
@@ -1833,6 +2057,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	nic->pdev = pdev;
 	nic->pnicvf = nic;
 	nic->max_queues = qcount;
+	/* If no of CPUs are too low, there won't be any queues left
+	 * for XDP_TX, hence double it.
+	 */
+	if (!nic->t88)
+		nic->max_queues *= 2;
+	nic->ptp_clock = ptp_clock;
 
 	/* MAP VF's configuration registers */
 	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1946,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
 	pci_set_drvdata(pdev, NULL);
 	if (nic->drv_stats)
 		free_percpu(nic->drv_stats);
+	cavium_ptp_put(nic->ptp_clock);
 	free_netdev(netdev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a3d12dbde95b..3eae9ff9b53a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
 
 	/* Reserve space for header modifications by BPF program */
 	if (rbdr->is_xdp)
-		buf_len += XDP_PACKET_HEADROOM;
+		buf_len += XDP_HEADROOM;
 
 	/* Check if it's recycled */
 	if (pgcache)
@@ -224,8 +224,9 @@ ret:
 			nic->rb_page = NULL;
 			return -ENOMEM;
 		}
+
 		if (pgcache)
-			pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+			pgcache->dma_addr = *rbuf + XDP_HEADROOM;
 		nic->rb_page_offset += buf_len;
 	}
 
@@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 
 	if (!rq->enable) {
 		nicvf_reclaim_rcv_queue(nic, qs, qidx);
+		xdp_rxq_info_unreg(&rq->xdp_rxq);
 		return;
 	}
 
@@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 	/* all writes of RBDR data to be loaded into L2 Cache as well*/
 	rq->caching = 1;
 
+	/* Driver have no proper error path for failed XDP RX-queue info reg */
+	WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
 	/* Send a mailbox msg to PF to config RQ */
 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
 	mbx.rq.qs_num = qs->vnic_id;
@@ -977,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
 		qs_cfg->be = 1;
 #endif
 		qs_cfg->vnic = qs->vnic_id;
+		/* Enable Tx timestamping capability */
+		if (nic->ptp_clock)
+			qs_cfg->send_tstmp_ena = 1;
 	}
 	nicvf_send_msg_to_pf(nic, &mbx);
 }
@@ -1236,7 +1244,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
 	int qentry;
 
 	if (subdesc_cnt > sq->xdp_free_cnt)
-		return 0;
+		return -1;
 
 	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 
@@ -1247,7 +1255,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
 
 	sq->xdp_desc_cnt += subdesc_cnt;
 
-	return 1;
+	return 0;
 }
 
 /* Calculate no of SQ subdescriptors needed to transmit all
@@ -1384,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 		hdr->inner_l3_offset = skb_network_offset(skb) - 2;
 		this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
 	}
+
+	/* Check if timestamp is requested */
+	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+		skb_tx_timestamp(skb);
+		return;
+	}
+
+	/* Tx timestamping not supported along with TSO, so ignore request */
+	if (skb_shinfo(skb)->gso_size)
+		return;
+
+	/* HW supports only a single outstanding packet to timestamp */
+	if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+		return;
+
+	/* Mark the SKB for later reference */
+	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+	/* Finally enable timestamp generation
+	 * Since 'post_cqe' is also set, two CQEs will be posted
+	 * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+	 */
+	hdr->tstmp = 1;
 }
 
 /* SQ GATHER subdescriptor
@@ -1625,7 +1656,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
 		if (page_ref_count(page) != 1)
 			return;
 
-		len += XDP_PACKET_HEADROOM;
+		len += XDP_HEADROOM;
 		/* Receive buffers in XDP mode are mapped from page start */
 		dma_addr &= PAGE_MASK;
 	}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 67d1a3230773..7d1e4e2aaad0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,6 +11,8 @@
 
 #include <linux/netdevice.h>
 #include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
 #include "q_struct.h"
 
 #define MAX_QUEUE_SET			128
@@ -92,6 +94,9 @@
 #define RCV_FRAG_LEN	 (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
+#define RCV_BUF_HEADROOM	128 /* To store dma address for XDP redirect */
+#define XDP_HEADROOM		(XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
+
 #define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
 				 MAX_CQE_PER_PKT_XMIT)
 
@@ -251,6 +256,7 @@ struct rcv_queue {
 	u8		start_qs_rbdr_idx; /* RBDR idx in the above QS */
 	u8		caching;
 	struct		rx_tx_queue_stats stats;
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 struct cmp_queue {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7796b8..91d34ea40e2c 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -21,7 +21,7 @@
 #include "nic.h"
 #include "thunder_bgx.h"
 
-#define DRV_NAME	"thunder-BGX"
+#define DRV_NAME	"thunder_bgx"
 #define DRV_VERSION	"1.0"
 
 struct lmac {
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
 }
 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
 
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+	struct bgx *bgx = get_bgx(node, bgx_idx);
+	struct lmac *lmac;
+	u64 csr_offset, cfg;
+
+	if (!bgx)
+		return;
+
+	lmac = &bgx->lmac[lmacid];
+
+	if (lmac->lmac_type == BGX_MODE_SGMII ||
+	    lmac->lmac_type == BGX_MODE_QSGMII ||
+	    lmac->lmac_type == BGX_MODE_RGMII)
+		csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+	else
+		csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+	cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+	if (enable)
+		cfg |= BGX_PKT_RX_PTP_EN;
+	else
+		cfg &= ~BGX_PKT_RX_PTP_EN;
+	bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
 {
 	struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5ab896..5a7567d31138 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
 #define  SPU_DBG_CTL_AN_NONCE_MCT_DIS		BIT_ULL(29)
 
 #define BGX_SMUX_RX_INT			0x20000
+#define BGX_SMUX_RX_FRM_CTL		0x20020
+#define  BGX_PKT_RX_PTP_EN			BIT_ULL(12)
 #define BGX_SMUX_RX_JABBER		0x20030
 #define BGX_SMUX_RX_CTL			0x20048
 #define  SMU_RX_CTL_STATUS			(3ull << 0)
@@ -172,6 +174,7 @@
 #define  GMI_PORT_CFG_SPEED_MSB			BIT_ULL(8)
 #define  GMI_PORT_CFG_RX_IDLE			BIT_ULL(12)
 #define  GMI_PORT_CFG_TX_IDLE			BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL		0x38028
 #define BGX_GMP_GMI_RXX_JABBER		0x38038
 #define BGX_GMP_GMI_TXX_THRESH		0x38210
 #define BGX_GMP_GMI_TXX_APPEND		0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
 void bgx_lmac_internal_loopback(int node, int bgx_idx,
 				int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
 
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 578c7f8f11bf..2d5e8dab1f70 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -20,7 +20,7 @@
 #include "nic.h"
 #include "thunder_bgx.h"
 
-#define DRV_NAME	"thunder-xcv"
+#define DRV_NAME	"thunder_xcv"
 #define DRV_VERSION	"1.0"
 
 /* Register offsets */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 5713e83be08c..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -69,6 +69,7 @@ config CHELSIO_T4
 	depends on PCI && (IPV6 || IPV6=n)
 	select FW_LOADER
 	select MDIO
+	select ZLIB_DEFLATE
 	---help---
 	  This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
 	  adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 6a015362c340..185fe8df7628 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3304,6 +3304,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		netdev->ethtool_ops = &cxgb_ethtool_ops;
 		netdev->min_mtu = 81;
 		netdev->max_mtu = ETH_MAX_MTU;
+		netdev->dev_port = pi->port_id;
 	}
 
 	pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 8c9c6b0d2e5d..53b6a02c778e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
 cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
 	      cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
 	      cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
-	      cudbg_common.o cudbg_lib.o
+	      cudbg_common.o cudbg_lib.o cudbg_zlib.o
 cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
 cxgb4-$(CONFIG_CHELSIO_T4_FCOE) +=  cxgb4_fcoe.o
 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
index f78ba1743b5a..8edc49827af0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -19,7 +19,8 @@
 #include "cudbg_if.h"
 #include "cudbg_lib_common.h"
 
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+		   struct cudbg_buffer *pdbg_buff, u32 size,
 		   struct cudbg_buffer *pin_buff)
 {
 	u32 offset;
@@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
 	if (offset + size > pdbg_buff->size)
 		return CUDBG_STATUS_NO_MEM;
 
+	if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) {
+		if (size > pdbg_init->compress_buff_size)
+			return CUDBG_STATUS_NO_MEM;
+
+		pin_buff->data = (char *)pdbg_init->compress_buff;
+		pin_buff->offset = 0;
+		pin_buff->size = size;
+		return 0;
+	}
+
 	pin_buff->data = (char *)pdbg_buff->data + offset;
 	pin_buff->offset = offset;
 	pin_buff->size = size;
-	pdbg_buff->size -= size;
 	return 0;
 }
 
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
-		    struct cudbg_buffer *pdbg_buff)
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+		    struct cudbg_buffer *pin_buff)
 {
-	pdbg_buff->size += pin_buff->size;
+	/* Clear compression buffer for re-use */
+	if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE)
+		memset(pdbg_init->compress_buff, 0,
+		       pdbg_init->compress_buff_size);
+
 	pin_buff->data = NULL;
 	pin_buff->offset = 0;
 	pin_buff->size = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 605689957496..b57acb8dc35b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -18,17 +18,15 @@
 #ifndef __CUDBG_ENTITY_H__
 #define __CUDBG_ENTITY_H__
 
-#define EDC0_FLAG 3
-#define EDC1_FLAG 4
+#define EDC0_FLAG 0
+#define EDC1_FLAG 1
+#define MC_FLAG 2
+#define MC0_FLAG 3
+#define MC1_FLAG 4
+#define HMA_FLAG 5
 
 #define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
 
-struct card_mem {
-	u16 size_edc0;
-	u16 size_edc1;
-	u16 mem_flag;
-};
-
 struct cudbg_mbox_log {
 	struct mbox_cmd entry;
 	u32 hi[MBOX_LEN / 8];
@@ -87,6 +85,48 @@ struct cudbg_tp_la {
 	u8 data[0];
 };
 
+static const char * const cudbg_region[] = {
+	"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+	"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+	"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+	"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+	"RQUDP region:", "PBL region:", "TXPBL region:",
+	"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+	"On-chip queues:"
+};
+
+/* Memory region info relative to current memory (i.e. wrt 0). */
+struct cudbg_region_info {
+	bool exist; /* Does region exists in current memory? */
+	u32 start;  /* Start wrt 0 */
+	u32 end;    /* End wrt 0 */
+};
+
+struct cudbg_mem_desc {
+	u32 base;
+	u32 limit;
+	u32 idx;
+};
+
+struct cudbg_meminfo {
+	struct cudbg_mem_desc avail[4];
+	struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
+	u32 avail_c;
+	u32 mem_c;
+	u32 up_ram_lo;
+	u32 up_ram_hi;
+	u32 up_extmem2_lo;
+	u32 up_extmem2_hi;
+	u32 rx_pages_data[3];
+	u32 tx_pages_data[4];
+	u32 p_structs;
+	u32 reserved[12];
+	u32 port_used[4];
+	u32 port_alloc[4];
+	u32 loopback_used[NCHAN];
+	u32 loopback_alloc[NCHAN];
+};
+
 struct cudbg_cim_pif_la {
 	int size;
 	u8 data[0];
@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
 	u32 reserved[16];
 };
 
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
 #define CUDBG_MAX_FL_QIDS 1024
 
 struct cudbg_ch_cntxt {
@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
 	{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
 };
 
+#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
+
+static const u32 t5_pcie_config_array[][2] = {
+	{0x0, 0x34},
+	{0x3c, 0x40},
+	{0x50, 0x64},
+	{0x70, 0x80},
+	{0x94, 0xa0},
+	{0xb0, 0xb8},
+	{0xd0, 0xd4},
+	{0x100, 0x128},
+	{0x140, 0x148},
+	{0x150, 0x164},
+	{0x170, 0x178},
+	{0x180, 0x194},
+	{0x1a0, 0x1b8},
+	{0x1c0, 0x208},
+};
+
 static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
 	{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
 	{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
@@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
 	{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
 };
 
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
-	{0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
-	{0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
-	{0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
-	{0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
-	{0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
-	{0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
-	{0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
-	{0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
-	{0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
-	{0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
-	{0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
-	{0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
-	{0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
-	{0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
-	{0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
-	{0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
-	{0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
-	{0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
-	{0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
-	{0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
-	{0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
-	{0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
-	{0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
-	{0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
-	{0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
-	{0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+	{0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+	{0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+	{0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+	{0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+	{0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+	{0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+	{0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+	{0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+	{0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+	{0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+	{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+	{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+	{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+	{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+	{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+	{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+	{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+	{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+	{0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+	{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+	{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+	{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+	{0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+	{0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+	{0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+	{0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+	{0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+	{0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+	{0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+	{0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+	{0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+	{0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+	{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+	{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+	{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+	{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+	{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+	{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+	{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+	{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+	{0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+	{0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+	{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+	{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+	{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
 };
 
 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e10ff1ee62c5..8568a51f6414 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -21,6 +21,7 @@
 /* Error codes */
 #define CUDBG_STATUS_NO_MEM -19
 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
 #define CUDBG_SYSTEM_ERROR -29
 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32
 
@@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type {
 	CUDBG_CIM_OBQ_NCSI = 17,
 	CUDBG_EDC0 = 18,
 	CUDBG_EDC1 = 19,
+	CUDBG_MC0 = 20,
+	CUDBG_MC1 = 21,
 	CUDBG_RSS = 22,
 	CUDBG_RSS_VF_CONF = 25,
 	CUDBG_PATH_MTU = 27,
@@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type {
 	CUDBG_SGE_INDIRECT = 37,
 	CUDBG_ULPRX_LA = 41,
 	CUDBG_TP_LA = 43,
+	CUDBG_MEMINFO = 44,
 	CUDBG_CIM_PIF_LA = 45,
 	CUDBG_CLK = 46,
 	CUDBG_CIM_OBQ_RXQ0 = 47,
@@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type {
 	CUDBG_PCIE_INDIRECT = 50,
 	CUDBG_PM_INDIRECT = 51,
 	CUDBG_TID_INFO = 54,
+	CUDBG_PCIE_CONFIG = 55,
 	CUDBG_DUMP_CONTEXT = 56,
 	CUDBG_MPS_TCAM = 57,
 	CUDBG_VPD_DATA = 58,
@@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type {
 	CUDBG_PBT_TABLE = 65,
 	CUDBG_MBOX_LOG = 66,
 	CUDBG_HMA_INDIRECT = 67,
+	CUDBG_HMA = 68,
 	CUDBG_MAX_ENTITY = 70,
 };
 
@@ -81,6 +87,10 @@ struct cudbg_init {
 	struct adapter *adap; /* Pointer to adapter structure */
 	void *outbuf; /* Output buffer */
 	u32 outbuf_size;  /* Output buffer size */
+	u8 compress_type; /* Type of compression to use */
+	void *compress_buff; /* Compression buffer */
+	u32 compress_buff_size; /* Compression buffer size */
+	void *workspace; /* Workspace for zlib */
 };
 
 static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d699bf88d18f..557fd8bfd54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -15,18 +15,65 @@
  *
  */
 
+#include <linux/sort.h>
+
 #include "t4_regs.h"
 #include "cxgb4.h"
 #include "cudbg_if.h"
 #include "cudbg_lib_common.h"
-#include "cudbg_lib.h"
 #include "cudbg_entity.h"
+#include "cudbg_lib.h"
+#include "cudbg_zlib.h"
 
-static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
-					 struct cudbg_buffer *dbg_buff)
+static int cudbg_do_compression(struct cudbg_init *pdbg_init,
+				struct cudbg_buffer *pin_buff,
+				struct cudbg_buffer *dbg_buff)
 {
-	cudbg_update_buff(pin_buff, dbg_buff);
-	cudbg_put_buff(pin_buff, dbg_buff);
+	struct cudbg_buffer temp_in_buff = { 0 };
+	int bytes_left, bytes_read, bytes;
+	u32 offset = dbg_buff->offset;
+	int rc;
+
+	temp_in_buff.offset = pin_buff->offset;
+	temp_in_buff.data = pin_buff->data;
+	temp_in_buff.size = pin_buff->size;
+
+	bytes_left = pin_buff->size;
+	bytes_read = 0;
+	while (bytes_left > 0) {
+		/* Do compression in smaller chunks */
+		bytes = min_t(unsigned long, bytes_left,
+			      (unsigned long)CUDBG_CHUNK_SIZE);
+		temp_in_buff.data = (char *)pin_buff->data + bytes_read;
+		temp_in_buff.size = bytes;
+		rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
+		if (rc)
+			return rc;
+		bytes_left -= bytes;
+		bytes_read += bytes;
+	}
+
+	pin_buff->size = dbg_buff->offset - offset;
+	return 0;
+}
+
+static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
+					struct cudbg_buffer *pin_buff,
+					struct cudbg_buffer *dbg_buff)
+{
+	int rc = 0;
+
+	if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
+		cudbg_update_buff(pin_buff, dbg_buff);
+	} else {
+		rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
+		if (rc)
+			goto out;
+	}
+
+out:
+	cudbg_put_buff(pdbg_init, pin_buff);
+	return rc;
 }
 
 static int is_fw_attached(struct cudbg_init *pdbg_init)
@@ -84,6 +131,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
 	return 0;
 }
 
+static int cudbg_mem_desc_cmp(const void *a, const void *b)
+{
+	return ((const struct cudbg_mem_desc *)a)->base -
+	       ((const struct cudbg_mem_desc *)b)->base;
+}
+
+int cudbg_fill_meminfo(struct adapter *padap,
+		       struct cudbg_meminfo *meminfo_buff)
+{
+	struct cudbg_mem_desc *md;
+	u32 lo, hi, used, alloc;
+	int n, i;
+
+	memset(meminfo_buff->avail, 0,
+	       ARRAY_SIZE(meminfo_buff->avail) *
+	       sizeof(struct cudbg_mem_desc));
+	memset(meminfo_buff->mem, 0,
+	       (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
+	md  = meminfo_buff->mem;
+
+	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+		meminfo_buff->mem[i].limit = 0;
+		meminfo_buff->mem[i].idx = i;
+	}
+
+	/* Find and sort the populated memory ranges */
+	i = 0;
+	lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+	if (lo & EDRAM0_ENABLE_F) {
+		hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+		meminfo_buff->avail[i].base =
+			cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
+		meminfo_buff->avail[i].limit =
+			meminfo_buff->avail[i].base +
+			cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
+		meminfo_buff->avail[i].idx = 0;
+		i++;
+	}
+
+	if (lo & EDRAM1_ENABLE_F) {
+		hi =  t4_read_reg(padap, MA_EDRAM1_BAR_A);
+		meminfo_buff->avail[i].base =
+			cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
+		meminfo_buff->avail[i].limit =
+			meminfo_buff->avail[i].base +
+			cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
+		meminfo_buff->avail[i].idx = 1;
+		i++;
+	}
+
+	if (is_t5(padap->params.chip)) {
+		if (lo & EXT_MEM0_ENABLE_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 3;
+			i++;
+		}
+
+		if (lo & EXT_MEM1_ENABLE_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 4;
+			i++;
+		}
+	} else {
+		if (lo & EXT_MEM_ENABLE_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 2;
+			i++;
+		}
+
+		if (lo & HMA_MUX_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 5;
+			i++;
+		}
+	}
+
+	if (!i) /* no memory available */
+		return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+	meminfo_buff->avail_c = i;
+	sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
+	     cudbg_mem_desc_cmp, NULL);
+	(md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
+
+	/* the next few have explicit upper bounds */
+	md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
+		    PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
+	md++;
+
+	md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
+		    PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
+	md++;
+
+	if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
+		if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
+			hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
+			md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+		} else {
+			hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+			md->base = t4_read_reg(padap,
+					       LE_DB_HASH_TBL_BASE_ADDR_A);
+		}
+		md->limit = 0;
+	} else {
+		md->base = 0;
+		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
+	}
+	md++;
+
+#define ulp_region(reg) do { \
+	md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
+	(md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
+} while (0)
+
+	ulp_region(RX_ISCSI);
+	ulp_region(RX_TDDP);
+	ulp_region(TX_TPT);
+	ulp_region(RX_STAG);
+	ulp_region(RX_RQ);
+	ulp_region(RX_RQUDP);
+	ulp_region(RX_PBL);
+	ulp_region(TX_PBL);
+#undef ulp_region
+	md->base = 0;
+	md->idx = ARRAY_SIZE(cudbg_region);
+	if (!is_t4(padap->params.chip)) {
+		u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
+		u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
+		u32 size = 0;
+
+		if (is_t5(padap->params.chip)) {
+			if (sge_ctrl & VFIFO_ENABLE_F)
+				size = DBVFIFO_SIZE_G(fifo_size);
+		} else {
+			size = T6_DBVFIFO_SIZE_G(fifo_size);
+		}
+
+		if (size) {
+			md->base = BASEADDR_G(t4_read_reg(padap,
+							  SGE_DBVFIFO_BADDR_A));
+			md->limit = md->base + (size << 2) - 1;
+		}
+	}
+
+	md++;
+
+	md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
+	md->limit = 0;
+	md++;
+	md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
+	md->limit = 0;
+	md++;
+
+	md->base = padap->vres.ocq.start;
+	if (padap->vres.ocq.size)
+		md->limit = md->base + padap->vres.ocq.size - 1;
+	else
+		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
+	md++;
+
+	/* add any address-space holes, there can be up to 3 */
+	for (n = 0; n < i - 1; n++)
+		if (meminfo_buff->avail[n].limit <
+		    meminfo_buff->avail[n + 1].base)
+			(md++)->base = meminfo_buff->avail[n].limit;
+
+	if (meminfo_buff->avail[n].limit)
+		(md++)->base = meminfo_buff->avail[n].limit;
+
+	n = md - meminfo_buff->mem;
+	meminfo_buff->mem_c = n;
+
+	sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
+	     cudbg_mem_desc_cmp, NULL);
+
+	lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
+	hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+	meminfo_buff->up_ram_lo = lo;
+	meminfo_buff->up_ram_hi = hi;
+
+	lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
+	hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+	meminfo_buff->up_extmem2_lo = lo;
+	meminfo_buff->up_extmem2_hi = hi;
+
+	lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+	meminfo_buff->rx_pages_data[0] =  PMRXMAXPAGE_G(lo);
+	meminfo_buff->rx_pages_data[1] =
+		t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
+	meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
+
+	lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
+	hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+	meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
+	meminfo_buff->tx_pages_data[1] =
+		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+	meminfo_buff->tx_pages_data[2] =
+		hi >= (1 << 20) ? 'M' : 'K';
+	meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
+
+	meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+
+	for (i = 0; i < 4; i++) {
+		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(padap,
+					 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
+		if (is_t5(padap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		meminfo_buff->port_used[i] = used;
+		meminfo_buff->port_alloc[i] = alloc;
+	}
+
+	for (i = 0; i < padap->params.arch.nchan; i++) {
+		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(padap,
+					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
+		if (is_t5(padap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		meminfo_buff->loopback_used[i] = used;
+		meminfo_buff->loopback_alloc[i] = alloc;
+	}
+
+	return 0;
+}
+
 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
 			   struct cudbg_buffer *dbg_buff,
 			   struct cudbg_error *cudbg_err)
@@ -98,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
 		buf_size = T5_REGMAP_SIZE;
 
-	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
 	if (rc)
 		return rc;
 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
@@ -122,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
 	}
 
 	dparams = &padap->params.devlog;
-	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -137,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
 		spin_unlock(&padap->win0_lock);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
@@ -163,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
 	}
 
 	size += sizeof(cfg);
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
 	if (rc) {
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
 
@@ -180,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
 			    NULL);
 	if (rc < 0) {
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
@@ -196,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
 	int size, rc;
 
 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -204,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
 			  (u32 *)temp_buff.data,
 			  (u32 *)((char *)temp_buff.data +
 				  5 * CIM_MALA_SIZE));
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
@@ -217,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
 	struct cudbg_cim_qcfg *cim_qcfg_data;
 	int rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -228,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
 	if (rc) {
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
 
@@ -237,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
 			 cim_qcfg_data->obq_wr);
 	if (rc) {
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
 
 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
 			 cim_qcfg_data->thres);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
@@ -258,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
 
 	/* collect CIM IBQ */
 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
-	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -272,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
 		else
 			rc = no_of_read_words;
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
@@ -343,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
 
 	/* collect CIM OBQ */
 	qsize =  cudbg_cim_obq_size(padap, qid);
-	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -357,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
 		else
 			rc = no_of_read_words;
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
@@ -420,23 +731,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
 }
 
+static int cudbg_meminfo_get_mem_index(struct adapter *padap,
+				       struct cudbg_meminfo *mem_info,
+				       u8 mem_type, u8 *idx)
+{
+	u8 i, flag;
+
+	switch (mem_type) {
+	case MEM_EDC0:
+		flag = EDC0_FLAG;
+		break;
+	case MEM_EDC1:
+		flag = EDC1_FLAG;
+		break;
+	case MEM_MC0:
+		/* Some T5 cards have both MC0 and MC1. */
+		flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
+		break;
+	case MEM_MC1:
+		flag = MC1_FLAG;
+		break;
+	case MEM_HMA:
+		flag = HMA_FLAG;
+		break;
+	default:
+		return CUDBG_STATUS_ENTITY_NOT_FOUND;
+	}
+
+	for (i = 0; i < mem_info->avail_c; i++) {
+		if (mem_info->avail[i].idx == flag) {
+			*idx = i;
+			return 0;
+		}
+	}
+
+	return CUDBG_STATUS_ENTITY_NOT_FOUND;
+}
+
+/* Fetch the @region_name's start and end from @meminfo. */
+static int cudbg_get_mem_region(struct adapter *padap,
+				struct cudbg_meminfo *meminfo,
+				u8 mem_type, const char *region_name,
+				struct cudbg_mem_desc *mem_desc)
+{
+	u8 mc, found = 0;
+	u32 i, idx = 0;
+	int rc;
+
+	rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
+		if (!strcmp(cudbg_region[i], region_name)) {
+			found = 1;
+			idx = i;
+			break;
+		}
+	}
+	if (!found)
+		return -EINVAL;
+
+	found = 0;
+	for (i = 0; i < meminfo->mem_c; i++) {
+		if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
+			continue; /* Skip holes */
+
+		if (!(meminfo->mem[i].limit))
+			meminfo->mem[i].limit =
+				i < meminfo->mem_c - 1 ?
+				meminfo->mem[i + 1].base - 1 : ~0;
+
+		if (meminfo->mem[i].idx == idx) {
+			/* Check if the region exists in @mem_type memory */
+			if (meminfo->mem[i].base < meminfo->avail[mc].base &&
+			    meminfo->mem[i].limit < meminfo->avail[mc].base)
+				return -EINVAL;
+
+			if (meminfo->mem[i].base > meminfo->avail[mc].limit)
+				return -EINVAL;
+
+			memcpy(mem_desc, &meminfo->mem[i],
+			       sizeof(struct cudbg_mem_desc));
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Fetch and update the start and end of the requested memory region w.r.t 0
+ * in the corresponding EDC/MC/HMA.
+ */
+static int cudbg_get_mem_relative(struct adapter *padap,
+				  struct cudbg_meminfo *meminfo,
+				  u8 mem_type, u32 *out_base, u32 *out_end)
+{
+	u8 mc_idx;
+	int rc;
+
+	rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
+	if (rc)
+		return rc;
+
+	if (*out_base < meminfo->avail[mc_idx].base)
+		*out_base = 0;
+	else
+		*out_base -= meminfo->avail[mc_idx].base;
+
+	if (*out_end > meminfo->avail[mc_idx].limit)
+		*out_end = meminfo->avail[mc_idx].limit;
+	else
+		*out_end -= meminfo->avail[mc_idx].base;
+
+	return 0;
+}
+
+/* Get TX and RX Payload region */
+static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
+				   const char *region_name,
+				   struct cudbg_region_info *payload)
+{
+	struct cudbg_mem_desc mem_desc = { 0 };
+	struct cudbg_meminfo meminfo;
+	int rc;
+
+	rc = cudbg_fill_meminfo(padap, &meminfo);
+	if (rc)
+		return rc;
+
+	rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
+				  &mem_desc);
+	if (rc) {
+		payload->exist = false;
+		return 0;
+	}
+
+	payload->exist = true;
+	payload->start = mem_desc.base;
+	payload->end = mem_desc.limit;
+
+	return cudbg_get_mem_relative(padap, &meminfo, mem_type,
+				      &payload->start, &payload->end);
+}
+
+#define CUDBG_YIELD_ITERATION 256
+
 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
 			     unsigned long tot_len,
 			     struct cudbg_error *cudbg_err)
 {
+	static const char * const region_name[] = { "Tx payload:",
+						    "Rx payload:" };
 	unsigned long bytes, bytes_left, bytes_read = 0;
 	struct adapter *padap = pdbg_init->adap;
 	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_region_info payload[2];
+	u32 yield_count = 0;
 	int rc = 0;
+	u8 i;
+
+	/* Get TX/RX Payload region range if they exist */
+	memset(payload, 0, sizeof(payload));
+	for (i = 0; i < ARRAY_SIZE(region_name); i++) {
+		rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
+					     &payload[i]);
+		if (rc)
+			return rc;
+
+		if (payload[i].exist) {
+			/* Align start and end to avoid wrap around */
+			payload[i].start = roundup(payload[i].start,
+						   CUDBG_CHUNK_SIZE);
+			payload[i].end = rounddown(payload[i].end,
+						   CUDBG_CHUNK_SIZE);
+		}
+	}
 
 	bytes_left = tot_len;
 	while (bytes_left > 0) {
+		/* As MC size is huge and read through PIO access, this
+		 * loop will hold cpu for a longer time. OS may think that
+		 * the process is hanged and will generate CPU stall traces.
+		 * So yield the cpu regularly.
+		 */
+		yield_count++;
+		if (!(yield_count % CUDBG_YIELD_ITERATION))
+			schedule();
+
 		bytes = min_t(unsigned long, bytes_left,
 			      (unsigned long)CUDBG_CHUNK_SIZE);
-		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+		rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
 		if (rc)
 			return rc;
+
+		for (i = 0; i < ARRAY_SIZE(payload); i++)
+			if (payload[i].exist &&
+			    bytes_read >= payload[i].start &&
+			    bytes_read + bytes <= payload[i].end)
+				/* TX and RX Payload regions can't overlap */
+				goto skip_read;
+
 		spin_lock(&padap->win0_lock);
 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
 				  bytes_read, bytes,
@@ -445,37 +944,23 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 		spin_unlock(&padap->win0_lock);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
+
+skip_read:
 		bytes_left -= bytes;
 		bytes_read += bytes;
-		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+		rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+						  dbg_buff);
+		if (rc) {
+			cudbg_put_buff(pdbg_init, &temp_buff);
+			return rc;
+		}
 	}
 	return rc;
 }
 
-static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
-				   struct card_mem *mem_info)
-{
-	struct adapter *padap = pdbg_init->adap;
-	u32 value;
-
-	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
-	value = EDRAM0_SIZE_G(value);
-	mem_info->size_edc0 = (u16)value;
-
-	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
-	value = EDRAM1_SIZE_G(value);
-	mem_info->size_edc1 = (u16)value;
-
-	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
-	if (value & EDRAM0_ENABLE_F)
-		mem_info->mem_flag |= (1 << EDC0_FLAG);
-	if (value & EDRAM1_ENABLE_F)
-		mem_info->mem_flag |= (1 << EDC1_FLAG);
-}
-
 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
 			     struct cudbg_error *cudbg_err)
 {
@@ -495,37 +980,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
 				    struct cudbg_error *cudbg_err,
 				    u8 mem_type)
 {
-	struct card_mem mem_info = {0};
-	unsigned long flag, size;
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_meminfo mem_info;
+	unsigned long size;
+	u8 mc_idx;
 	int rc;
 
+	memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+	rc = cudbg_fill_meminfo(padap, &mem_info);
+	if (rc)
+		return rc;
+
 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
-	cudbg_collect_mem_info(pdbg_init, &mem_info);
-	switch (mem_type) {
-	case MEM_EDC0:
-		flag = (1 << EDC0_FLAG);
-		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
-		break;
-	case MEM_EDC1:
-		flag = (1 << EDC1_FLAG);
-		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
-		break;
-	default:
-		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
-		goto err;
-	}
+	rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+	if (rc)
+		return rc;
 
-	if (mem_info.mem_flag & flag) {
-		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
-				       size, cudbg_err);
-		if (rc)
-			goto err;
-	} else {
-		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
-		goto err;
-	}
-err:
-	return rc;
+	size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+	return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+				 cudbg_err);
 }
 
 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
@@ -544,26 +1017,51 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 					MEM_EDC1);
 }
 
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+					MEM_MC0);
+}
+
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+					MEM_MC1);
+}
+
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+					MEM_HMA);
+}
+
 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
 		      struct cudbg_buffer *dbg_buff,
 		      struct cudbg_error *cudbg_err)
 {
 	struct adapter *padap = pdbg_init->adap;
 	struct cudbg_buffer temp_buff = { 0 };
-	int rc;
+	int rc, nentries;
 
-	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+	nentries = t4_chip_rss_size(padap);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
+			    &temp_buff);
 	if (rc)
 		return rc;
 
 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
 	if (rc) {
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
@@ -576,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
 	int vf, rc, vf_count;
 
 	vf_count = padap->params.arch.vfcount;
-	rc = cudbg_get_buff(dbg_buff,
+	rc = cudbg_get_buff(pdbg_init, dbg_buff,
 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
 			    &temp_buff);
 	if (rc)
@@ -586,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
 	for (vf = 0; vf < vf_count; vf++)
 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
 				      &vfconf[vf].rss_vf_vfh, true);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
@@ -598,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
 	struct cudbg_buffer temp_buff = { 0 };
 	int rc;
 
-	rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
+			    &temp_buff);
 	if (rc)
 		return rc;
 
 	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
@@ -616,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
 	struct cudbg_pm_stats *pm_stats_buff;
 	int rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -624,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
 	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
@@ -640,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
 	if (!padap->params.vpd.cclk)
 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
 			    &temp_buff);
 	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
 	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
@@ -649,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
 	for (i = 0; i < NTX_SCHED; ++i)
 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
 				&hw_sched_buff->ipg[i], true);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
@@ -674,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
 
 	n = n / (IREG_NUM_ELEM * sizeof(u32));
 	size = sizeof(struct ireg_buf) * n;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -763,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
 			       tp_pio->ireg_local_offset, true);
 		ch_tp_pio++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
@@ -776,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
 	struct ireg_buf *ch_sge_dbg;
 	int i, rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+			    &temp_buff);
 	if (rc)
 		return rc;
 
@@ -797,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
 				 sge_pio->ireg_local_offset);
 		ch_sge_dbg++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
@@ -810,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
 	struct cudbg_ulprx_la *ulprx_la_buff;
 	int rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -818,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
 	ulprx_la_buff->size = ULPRX_LA_SIZE;
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
@@ -832,15 +1325,39 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
 	int size, rc;
 
 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
+
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+			  struct cudbg_buffer *dbg_buff,
+			  struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_meminfo *meminfo_buff;
+	int rc;
+
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+	rc = cudbg_fill_meminfo(padap, meminfo_buff);
+	if (rc) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(pdbg_init, &temp_buff);
+		return rc;
+	}
+
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
@@ -854,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
 
 	size = sizeof(struct cudbg_cim_pif_la) +
 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -863,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
 			   NULL, NULL);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
@@ -880,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
 	if (!padap->params.vpd.cclk)
 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -912,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
 	clk_info_buff->finwait2_timer =
 		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
 
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
@@ -928,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
 
 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
 	size = sizeof(struct ireg_buf) * n * 2;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -969,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
 				 pcie_pio->ireg_local_offset);
 		ch_pcie++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
@@ -985,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
 
 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
 	size = sizeof(struct ireg_buf) * n * 2;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -1026,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
 				 pm_pio->ireg_local_offset);
 		ch_pm++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
@@ -1041,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 	u32 para[2], val[2];
 	int rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff,
+			    sizeof(struct cudbg_tid_info_region_rev1),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -1053,6 +1567,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
 			     sizeof(struct cudbg_ver_hdr);
 
+	/* If firmware is not attached/alive, use backdoor register
+	 * access to collect dump.
+	 */
+	if (!is_fw_attached(pdbg_init))
+		goto fill_tid;
+
 #define FW_PARAM_PFVF_A(param) \
 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
@@ -1064,7 +1584,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
 	if (rc <  0) {
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
 	tid->uotid_base = val[0];
@@ -1083,13 +1603,16 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 				     para, val);
 		if (rc < 0) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 		tid->hpftid_base = val[0];
 		tid->nhpftids = val[1] - val[0] + 1;
 	}
 
+#undef FW_PARAM_PFVF_A
+
+fill_tid:
 	tid->ntids = padap->tids.ntids;
 	tid->nstids = padap->tids.nstids;
 	tid->stid_base = padap->tids.stid_base;
@@ -1109,28 +1632,137 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
 
-#undef FW_PARAM_PFVF_A
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
 
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	u32 size, *value, j;
+	int i, rc, n;
+
+	size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	value = (u32 *)temp_buff.data;
+	for (i = 0; i < n; i++) {
+		for (j = t5_pcie_config_array[i][0];
+		     j <= t5_pcie_config_array[i][1]; j += 4) {
+			t4_hw_pci_read_cfg4(padap, j, value);
+			value++;
+		}
+	}
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
-int cudbg_dump_context_size(struct adapter *padap)
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+	int index, bit, bit_pos = 0;
+
+	switch (type) {
+	case CTXT_EGRESS:
+		bit_pos = 176;
+		break;
+	case CTXT_INGRESS:
+		bit_pos = 141;
+		break;
+	case CTXT_FLM:
+		bit_pos = 89;
+		break;
+	}
+	index = bit_pos / 32;
+	bit =  bit_pos % 32;
+	return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+				      struct cudbg_region_info *ctx_info,
+				      u8 *mem_type)
 {
-	u32 value, size;
+	struct cudbg_mem_desc mem_desc;
+	struct cudbg_meminfo meminfo;
+	u32 i, j, value, found;
 	u8 flq;
+	int rc;
+
+	rc = cudbg_fill_meminfo(padap, &meminfo);
+	if (rc)
+		return rc;
+
+	/* Get EGRESS and INGRESS context region size */
+	for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+		found = 0;
+		memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+		for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+			rc = cudbg_get_mem_region(padap, &meminfo, j,
+						  cudbg_region[i],
+						  &mem_desc);
+			if (!rc) {
+				found = 1;
+				rc = cudbg_get_mem_relative(padap, &meminfo, j,
+							    &mem_desc.base,
+							    &mem_desc.limit);
+				if (rc) {
+					ctx_info[i].exist = false;
+					break;
+				}
+				ctx_info[i].exist = true;
+				ctx_info[i].start = mem_desc.base;
+				ctx_info[i].end = mem_desc.limit;
+				mem_type[i] = j;
+				break;
+			}
+		}
+		if (!found)
+			ctx_info[i].exist = false;
+	}
 
+	/* Get FLM and CNM max qid. */
 	value = t4_read_reg(padap, SGE_FLM_CFG_A);
 
 	/* Get number of data freelist queues */
 	flq = HDRSTARTFLQ_G(value);
-	size = CUDBG_MAX_FL_QIDS >> flq;
+	ctx_info[CTXT_FLM].exist = true;
+	ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
 
-	/* Add extra space for congestion manager contexts.
-	 * The number of CONM contexts are same as number of freelist
+	/* The number of CONM contexts are same as number of freelist
 	 * queues.
 	 */
-	size += size;
+	ctx_info[CTXT_CNM].exist = true;
+	ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+	return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+	struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+	u32 i, size = 0;
+	int rc;
+
+	/* Get max valid qid for each type of queue */
+	rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < CTXT_CNM; i++) {
+		if (!region_info[i].exist) {
+			if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+				size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+					SGE_CTXT_SIZE;
+			continue;
+		}
+
+		size += (region_info[i].end - region_info[i].start + 1) /
+			SGE_CTXT_SIZE;
+	}
 	return size * sizeof(struct cudbg_ch_cntxt);
 }
 
@@ -1153,44 +1785,144 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
 }
 
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+				  u8 ctxt_type,
+				  struct cudbg_ch_cntxt **out_buff)
+{
+	struct cudbg_ch_cntxt *buff = *out_buff;
+	int rc;
+	u32 j;
+
+	for (j = 0; j < max_qid; j++) {
+		cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+		rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+		if (!rc)
+			continue;
+
+		buff->cntxt_type = ctxt_type;
+		buff->cntxt_id = j;
+		buff++;
+		if (ctxt_type == CTXT_FLM) {
+			cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+			buff->cntxt_type = CTXT_CNM;
+			buff->cntxt_id = j;
+			buff++;
+		}
+	}
+
+	*out_buff = buff;
+}
+
 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err)
 {
+	struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
 	struct adapter *padap = pdbg_init->adap;
+	u32 j, size, max_ctx_size, max_ctx_qid;
+	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
 	struct cudbg_buffer temp_buff = { 0 };
 	struct cudbg_ch_cntxt *buff;
-	u32 size, i = 0;
+	u64 *dst_off, *src_off;
+	u8 *ctx_buf;
+	u8 i, k;
 	int rc;
 
+	/* Get max valid qid for each type of queue */
+	rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+	if (rc)
+		return rc;
+
 	rc = cudbg_dump_context_size(padap);
 	if (rc <= 0)
 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
 
 	size = rc;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
+	/* Get buffer with enough space to read the biggest context
+	 * region in memory.
+	 */
+	max_ctx_size = max(region_info[CTXT_EGRESS].end -
+			   region_info[CTXT_EGRESS].start + 1,
+			   region_info[CTXT_INGRESS].end -
+			   region_info[CTXT_INGRESS].start + 1);
+
+	ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+	if (!ctx_buf) {
+		cudbg_put_buff(pdbg_init, &temp_buff);
+		return -ENOMEM;
+	}
+
 	buff = (struct cudbg_ch_cntxt *)temp_buff.data;
-	while (size > 0) {
-		buff->cntxt_type = CTXT_FLM;
-		buff->cntxt_id = i;
-		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
-		buff++;
-		size -= sizeof(struct cudbg_ch_cntxt);
 
-		buff->cntxt_type = CTXT_CNM;
-		buff->cntxt_id = i;
-		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
-		buff++;
-		size -= sizeof(struct cudbg_ch_cntxt);
+	/* Collect EGRESS and INGRESS context data.
+	 * In case of failures, fallback to collecting via FW or
+	 * backdoor access.
+	 */
+	for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+		if (!region_info[i].exist) {
+			max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+			cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+					      &buff);
+			continue;
+		}
 
-		i++;
+		max_ctx_size = region_info[i].end - region_info[i].start + 1;
+		max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+		/* If firmware is not attached/alive, use backdoor register
+		 * access to collect dump.
+		 */
+		if (is_fw_attached(pdbg_init)) {
+			t4_sge_ctxt_flush(padap, padap->mbox, i);
+
+			rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+					  region_info[i].start, max_ctx_size,
+					  (__be32 *)ctx_buf, 1);
+		}
+
+		if (rc || !is_fw_attached(pdbg_init)) {
+			max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+			cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+					      &buff);
+			continue;
+		}
+
+		for (j = 0; j < max_ctx_qid; j++) {
+			src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+			dst_off = (u64 *)buff->data;
+
+			/* The data is stored in 64-bit cpu order.  Convert it
+			 * to big endian before parsing.
+			 */
+			for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+				dst_off[k] = cpu_to_be64(src_off[k]);
+
+			rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+			if (!rc)
+				continue;
+
+			buff->cntxt_type = i;
+			buff->cntxt_id = j;
+			buff++;
+		}
 	}
 
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	kvfree(ctx_buf);
+
+	/* Collect FREELIST and CONGESTION MANAGER contexts */
+	max_ctx_size = region_info[CTXT_FLM].end -
+		       region_info[CTXT_FLM].start + 1;
+	max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+	/* Since FLM and CONM are 1-to-1 mapped, the below function
+	 * will fetch both FLM and CONM contexts.
+	 */
+	cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -1228,9 +1960,10 @@ static void cudbg_mps_rpl_backdoor(struct adapter *padap,
 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
 }
 
-static int cudbg_collect_tcam_index(struct adapter *padap,
+static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
 				    struct cudbg_mps_tcam *tcam, u32 idx)
 {
+	struct adapter *padap = pdbg_init->adap;
 	u64 tcamy, tcamx, val;
 	u32 ctl, data2;
 	int rc = 0;
@@ -1315,12 +2048,22 @@ static int cudbg_collect_tcam_index(struct adapter *padap,
 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
 			      FW_LDST_CMD_IDX_V(idx));
 
-		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
-				&ldst_cmd);
-		if (rc)
+		/* If firmware is not attached/alive, use backdoor register
+		 * access to collect dump.
+		 */
+		if (is_fw_attached(pdbg_init))
+			rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
+					sizeof(ldst_cmd), &ldst_cmd);
+
+		if (rc || !is_fw_attached(pdbg_init)) {
 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
-		else
+			/* Ignore error since we collected directly from
+			 * reading registers.
+			 */
+			rc = 0;
+		} else {
 			mps_rplc = ldst_cmd.u.mps.rplc;
+		}
 
 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
@@ -1351,16 +2094,16 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
 
 	n = padap->params.arch.mps_tcam_size;
 	size = sizeof(struct cudbg_mps_tcam) * n;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
 	for (i = 0; i < n; i++) {
-		rc = cudbg_collect_tcam_index(padap, tcam, i);
+		rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 		total_size += sizeof(struct cudbg_mps_tcam);
@@ -1370,11 +2113,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
 	if (!total_size) {
 		rc = CUDBG_SYSTEM_ERROR;
 		cudbg_err->sys_err = rc;
-		cudbg_put_buff(&temp_buff, dbg_buff);
+		cudbg_put_buff(pdbg_init, &temp_buff);
 		return rc;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
@@ -1425,7 +2167,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
 	if (rc)
 		return rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -1441,8 +2183,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
 	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
 	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
 	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
@@ -1593,7 +2334,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
 
 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
 	size += sizeof(struct cudbg_tcam);
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -1605,7 +2346,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 
@@ -1616,8 +2357,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
 		bytes += sizeof(struct cudbg_tid_data);
 	}
 
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
@@ -1630,13 +2370,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
 	int rc;
 
 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
 	t4_read_cong_tbl(padap, (void *)temp_buff.data);
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
@@ -1654,7 +2393,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
 
 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
 	size = sizeof(struct ireg_buf) * n * 2;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -1690,8 +2429,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
 		}
 		ma_indr++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
@@ -1704,7 +2442,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
 	u32 i, j;
 	int rc;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -1725,8 +2463,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
 				t4_read_reg(padap,
 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
@@ -1735,13 +2472,23 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 {
 	struct adapter *padap = pdbg_init->adap;
 	struct cudbg_buffer temp_buff = { 0 };
+	u32 local_offset, local_range;
 	struct ireg_buf *up_cim;
+	u32 size, j, iter;
+	u32 instance = 0;
 	int i, rc, n;
-	u32 size;
 
-	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+	if (is_t5(padap->params.chip))
+		n = sizeof(t5_up_cim_reg_array) /
+		    ((IREG_NUM_ELEM + 1) * sizeof(u32));
+	else if (is_t6(padap->params.chip))
+		n = sizeof(t6_up_cim_reg_array) /
+		    ((IREG_NUM_ELEM + 1) * sizeof(u32));
+	else
+		return CUDBG_STATUS_NOT_IMPLEMENTED;
+
 	size = sizeof(struct ireg_buf) * n;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -1757,6 +2504,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 						t5_up_cim_reg_array[i][2];
 			up_cim_reg->ireg_offset_range =
 						t5_up_cim_reg_array[i][3];
+			instance = t5_up_cim_reg_array[i][4];
 		} else if (is_t6(padap->params.chip)) {
 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -1764,18 +2512,39 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 						t6_up_cim_reg_array[i][2];
 			up_cim_reg->ireg_offset_range =
 						t6_up_cim_reg_array[i][3];
+			instance = t6_up_cim_reg_array[i][4];
 		}
 
-		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
-				 up_cim_reg->ireg_offset_range, buff);
-		if (rc) {
-			cudbg_put_buff(&temp_buff, dbg_buff);
-			return rc;
+		switch (instance) {
+		case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+			iter = up_cim_reg->ireg_offset_range;
+			local_offset = 0x120;
+			local_range = 1;
+			break;
+		case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+			iter = up_cim_reg->ireg_offset_range;
+			local_offset = 0x10;
+			local_range = 1;
+			break;
+		default:
+			iter = 1;
+			local_offset = 0;
+			local_range = up_cim_reg->ireg_offset_range;
+			break;
+		}
+
+		for (j = 0; j < iter; j++, buff++) {
+			rc = t4_cim_read(padap,
+					 up_cim_reg->ireg_local_offset +
+					 (j * local_offset), local_range, buff);
+			if (rc) {
+				cudbg_put_buff(pdbg_init, &temp_buff);
+				return rc;
+			}
 		}
 		up_cim++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
@@ -1788,7 +2557,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
 	int i, rc;
 	u32 addr;
 
-	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+	rc = cudbg_get_buff(pdbg_init, dbg_buff,
+			    sizeof(struct cudbg_pbt_tables),
 			    &temp_buff);
 	if (rc)
 		return rc;
@@ -1801,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
 				 &pbt->pbt_dynamic[i]);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 	}
@@ -1814,7 +2584,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
 				 &pbt->pbt_static[i]);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 	}
@@ -1826,7 +2596,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
 				 &pbt->lrf_table[i]);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 	}
@@ -1838,12 +2608,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
 				 &pbt->pbt_data[i]);
 		if (rc) {
 			cudbg_err->sys_err = rc;
-			cudbg_put_buff(&temp_buff, dbg_buff);
+			cudbg_put_buff(pdbg_init, &temp_buff);
 			return rc;
 		}
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
@@ -1864,7 +2633,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
 	log = padap->mbox_log;
 	mbox_cmds = padap->mbox_log->size;
 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -1887,8 +2656,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
 		}
 		mboxlog++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
@@ -1906,7 +2674,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
 
 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
 	size = sizeof(struct ireg_buf) * n;
-	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 	if (rc)
 		return rc;
 
@@ -1924,6 +2692,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
 				 hma_fli->ireg_local_offset);
 		hma_indr++;
 	}
-	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
-	return rc;
+	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index caeee8e33e86..eebefe7cd18e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
 		      struct cudbg_buffer *dbg_buff,
 		      struct cudbg_error *cudbg_err);
@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
 			struct cudbg_buffer *dbg_buff,
 			struct cudbg_error *cudbg_err);
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+			  struct cudbg_buffer *dbg_buff,
+			  struct cudbg_error *cudbg_err);
 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
 			     struct cudbg_buffer *dbg_buff,
 			     struct cudbg_error *cudbg_err);
@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 		      struct cudbg_buffer *dbg_buff,
 		      struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
 
 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
 u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
 int cudbg_dump_context_size(struct adapter *padap);
 
-struct cudbg_tcam;
+int cudbg_fill_meminfo(struct adapter *padap,
+		       struct cudbg_meminfo *meminfo_buff);
 void cudbg_fill_le_tcam_info(struct adapter *padap,
 			     struct cudbg_tcam *tcam_region);
 #endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
index 24b33f28e548..8150ea85d6a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -26,6 +26,7 @@ enum cudbg_dump_type {
 
 enum cudbg_compression_type {
 	CUDBG_COMPRESSION_NONE = 1,
+	CUDBG_COMPRESSION_ZLIB,
 };
 
 struct cudbg_hdr {
@@ -78,10 +79,11 @@ struct cudbg_error {
 #define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
 #define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
 
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+		   struct cudbg_buffer *pdbg_buff, u32 size,
 		   struct cudbg_buffer *pin_buff);
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
-		    struct cudbg_buffer *pdbg_buff);
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+		    struct cudbg_buffer *pin_buff);
 void cudbg_update_buff(struct cudbg_buffer *pin_buff,
 		       struct cudbg_buffer *pout_buff);
 #endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
new file mode 100644
index 000000000000..25cc06d75cff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (C) 2018 Chelsio Communications.  All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#include <linux/zlib.h>
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_zlib.h"
+
+static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff,
+				  struct cudbg_buffer *pin_buff)
+{
+	if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) >
+	    pdbg_buff->size)
+		return CUDBG_STATUS_NO_MEM;
+
+	pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset;
+	pin_buff->offset = 0;
+	pin_buff->size = sizeof(struct cudbg_compress_hdr);
+	pdbg_buff->offset += sizeof(struct cudbg_compress_hdr);
+	return 0;
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+			struct cudbg_buffer *pin_buff,
+			struct cudbg_buffer *pout_buff)
+{
+	struct cudbg_buffer temp_buff = { 0 };
+	struct z_stream_s compress_stream;
+	struct cudbg_compress_hdr *c_hdr;
+	int rc;
+
+	/* Write compression header to output buffer before compression */
+	rc = cudbg_get_compress_hdr(pout_buff, &temp_buff);
+	if (rc)
+		return rc;
+
+	c_hdr = (struct cudbg_compress_hdr *)temp_buff.data;
+	c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID;
+
+	memset(&compress_stream, 0, sizeof(struct z_stream_s));
+	compress_stream.workspace = pdbg_init->workspace;
+	rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION,
+			       Z_DEFLATED, CUDBG_ZLIB_WIN_BITS,
+			       CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY);
+	if (rc != Z_OK)
+		return CUDBG_SYSTEM_ERROR;
+
+	compress_stream.next_in = pin_buff->data;
+	compress_stream.avail_in = pin_buff->size;
+	compress_stream.next_out = pout_buff->data + pout_buff->offset;
+	compress_stream.avail_out = pout_buff->size - pout_buff->offset;
+
+	rc = zlib_deflate(&compress_stream, Z_FINISH);
+	if (rc != Z_STREAM_END)
+		return CUDBG_SYSTEM_ERROR;
+
+	rc = zlib_deflateEnd(&compress_stream);
+	if (rc != Z_OK)
+		return CUDBG_SYSTEM_ERROR;
+
+	c_hdr->compress_size = compress_stream.total_out;
+	c_hdr->decompress_size = pin_buff->size;
+	pout_buff->offset += compress_stream.total_out;
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
new file mode 100644
index 000000000000..60d23805dfc3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (C) 2018 Chelsio Communications.  All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ZLIB_H__
+#define __CUDBG_ZLIB_H__
+
+#include <linux/zlib.h>
+
+#define CUDBG_ZLIB_COMPRESS_ID 17
+#define CUDBG_ZLIB_WIN_BITS 12
+#define CUDBG_ZLIB_MEM_LVL 4
+
+struct cudbg_compress_hdr {
+	u32 compress_id;
+	u64 decompress_size;
+	u64 compress_size;
+	u64 rsvd[32];
+};
+
+static inline int cudbg_get_workspace_size(void)
+{
+	return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS,
+					  CUDBG_ZLIB_MEM_LVL);
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+			struct cudbg_buffer *pin_buff,
+			struct cudbg_buffer *pout_buff);
+#endif /* __CUDBG_ZLIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 351f4bf37ca9..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,8 @@ enum {
 	MEM_EDC1,
 	MEM_MC,
 	MEM_MC0 = MEM_MC,
-	MEM_MC1
+	MEM_MC1,
+	MEM_HMA,
 };
 
 enum {
@@ -318,6 +319,7 @@ struct vpd_params {
 };
 
 struct pci_params {
+	unsigned int vpd_cap_addr;
 	unsigned char speed;
 	unsigned char width;
 };
@@ -826,12 +828,17 @@ struct vf_info {
 	unsigned char vf_mac_addr[ETH_ALEN];
 	unsigned int tx_rate;
 	bool pf_set_mac;
+	u16 vlan;
 };
 
 struct mbox_list {
 	struct list_head list;
 };
 
+struct mps_encap_entry {
+	atomic_t refcnt;
+};
+
 struct adapter {
 	void __iomem *regs;
 	void __iomem *bar2;
@@ -846,6 +853,10 @@ struct adapter {
 	enum chip_type chip;
 
 	int msg_enable;
+	__be16 vxlan_port;
+	u8 vxlan_port_cnt;
+	__be16 geneve_port;
+	u8 geneve_port_cnt;
 
 	struct adapter_params params;
 	struct cxgb4_virt_res vres;
@@ -875,7 +886,10 @@ struct adapter {
 	unsigned int clipt_start;
 	unsigned int clipt_end;
 	struct clip_tbl *clipt;
+	unsigned int rawf_start;
+	unsigned int rawf_cnt;
 	struct smt_data *smt;
+	struct mps_encap_entry *mps_encap;
 	struct cxgb4_uld_info *uld;
 	void *uld_handle[CXGB4_ULD_MAX];
 	unsigned int num_uld;
@@ -1317,6 +1331,7 @@ void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
 void cxgb4_set_ethtool_ops(struct net_device *netdev);
 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
 extern int dbfifo_int_thresh;
 
 #define for_each_port(adapter, iter) \
@@ -1435,6 +1450,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
 	q->size = size;
 }
 
+/**
+ *     t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+ *     @fw_mod_type: the Firmware Mofule Type
+ *
+ *     Return whether the Firmware Module Type represents a real Transceiver
+ *     Module/Cable Module Type which has been inserted.
+ */
+static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+{
+	return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+		fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+		fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+		fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
 		       unsigned int data_reg, const u32 *vals,
 		       unsigned int nregs, unsigned int start_idx);
@@ -1524,6 +1554,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
 		     int port, int pf, int vf, u8 mac[]);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
 			int start, int n, const u16 *rspq, unsigned int nrspq);
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -1633,6 +1664,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		int mtu, int promisc, int all_multi, int bcast, int vlanex,
 		bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			 const u8 *addr, const u8 *mask, unsigned int idx,
+			 u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			  const u8 *addr, const u8 *mask, unsigned int idx,
+			  u8 lookup_type, u8 port_id, bool sleep_ok);
 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
 		      unsigned int viid, bool free, unsigned int naddr,
 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
@@ -1665,7 +1702,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
 int t4_update_port_info(struct port_info *pi);
 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@ -1708,6 +1745,9 @@ void t4_uld_mem_free(struct adapter *adap);
 int t4_uld_mem_alloc(struct adapter *adap);
 void t4_uld_clean_up(struct adapter *adap);
 void t4_register_netevent_notifier(void);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+	      unsigned int devid, unsigned int offset,
+	      unsigned int len, u8 *buf);
 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
 		  unsigned int n, bool unmap);
@@ -1722,4 +1762,6 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
 		     struct ulptx_sgl *sgl, u64 *end, unsigned int start,
 		     const dma_addr_t *addr);
 void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+		    u16 vlan);
 #endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 29cc625e9833..30485f9a598f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,11 +18,14 @@
 #include "t4_regs.h"
 #include "cxgb4.h"
 #include "cxgb4_cudbg.h"
-#include "cudbg_entity.h"
+#include "cudbg_zlib.h"
 
 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
 	{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
 	{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+	{ CUDBG_MC0, cudbg_collect_mc0_meminfo },
+	{ CUDBG_MC1, cudbg_collect_mc1_meminfo },
+	{ CUDBG_HMA, cudbg_collect_hma_meminfo },
 };
 
 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
@@ -53,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
 	{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
 	{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
 	{ CUDBG_TP_LA, cudbg_collect_tp_la },
+	{ CUDBG_MEMINFO, cudbg_collect_meminfo },
 	{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
 	{ CUDBG_CLK, cudbg_collect_clk_info },
 	{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
@@ -60,6 +64,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
 	{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
 	{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
 	{ CUDBG_TID_INFO, cudbg_collect_tid },
+	{ CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
 	{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
 	{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
 	{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
@@ -158,8 +163,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 		}
 		len = cudbg_mbytes_to_bytes(len);
 		break;
+	case CUDBG_MC0:
+		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+		if (value & EXT_MEM0_ENABLE_F) {
+			value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+			len = EXT_MEM0_SIZE_G(value);
+		}
+		len = cudbg_mbytes_to_bytes(len);
+		break;
+	case CUDBG_MC1:
+		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+		if (value & EXT_MEM1_ENABLE_F) {
+			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			len = EXT_MEM1_SIZE_G(value);
+		}
+		len = cudbg_mbytes_to_bytes(len);
+		break;
 	case CUDBG_RSS:
-		len = RSS_NENTRIES * sizeof(u16);
+		len = t4_chip_rss_size(adap) * sizeof(u16);
 		break;
 	case CUDBG_RSS_VF_CONF:
 		len = adap->params.arch.vfcount *
@@ -201,6 +222,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 	case CUDBG_TP_LA:
 		len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
 		break;
+	case CUDBG_MEMINFO:
+		len = sizeof(struct cudbg_meminfo);
+		break;
 	case CUDBG_CIM_PIF_LA:
 		len = sizeof(struct cudbg_cim_pif_la);
 		len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
@@ -219,6 +243,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 	case CUDBG_TID_INFO:
 		len = sizeof(struct cudbg_tid_info_region_rev1);
 		break;
+	case CUDBG_PCIE_CONFIG:
+		len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+		break;
 	case CUDBG_DUMP_CONTEXT:
 		len = cudbg_dump_context_size(adap);
 		break;
@@ -248,7 +275,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 		len = sizeof(struct cudbg_ulptx_la);
 		break;
 	case CUDBG_UP_CIM_INDIRECT:
-		n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+		n = 0;
+		if (is_t5(adap->params.chip))
+			n = sizeof(t5_up_cim_reg_array) /
+			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
+		else if (is_t6(adap->params.chip))
+			n = sizeof(t6_up_cim_reg_array) /
+			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
 		len = sizeof(struct ireg_buf) * n;
 		break;
 	case CUDBG_PBT_TABLE:
@@ -264,6 +297,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 			len = sizeof(struct ireg_buf) * n;
 		}
 		break;
+	case CUDBG_HMA:
+		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+		if (value & HMA_MUX_F) {
+			/* In T6, there's no MC1.  So, HMA shares MC1
+			 * address space.
+			 */
+			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			len = EXT_MEM1_SIZE_G(value);
+		}
+		len = cudbg_mbytes_to_bytes(len);
+		break;
 	default:
 		break;
 	}
@@ -275,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
 {
 	u32 i, entity;
 	u32 len = 0;
+	u32 wsize;
 
 	if (flag & CXGB4_ETH_DUMP_HW) {
 		for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
@@ -290,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
 		}
 	}
 
+	/* If compression is enabled, a smaller destination buffer is enough */
+	wsize = cudbg_get_workspace_size();
+	if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
+		len = CUDBG_DUMP_BUFF_SIZE;
+
 	return len;
 }
 
@@ -298,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
 				       const struct cxgb4_collect_entity *e_arr,
 				       u32 arr_size, void *buf, u32 *tot_size)
 {
-	struct adapter *adap = pdbg_init->adap;
 	struct cudbg_error cudbg_err = { 0 };
 	struct cudbg_entity_hdr *entity_hdr;
-	u32 entity_size, i;
-	u32 total_size = 0;
+	u32 i, total_size = 0;
 	int ret;
 
 	for (i = 0; i < arr_size; i++) {
 		const struct cxgb4_collect_entity *e = &e_arr[i];
 
-		/* Skip entities that won't fit in output buffer */
-		entity_size = cxgb4_get_entity_length(adap, e->entity);
-		if (entity_size >
-		    pdbg_init->outbuf_size - *tot_size - total_size)
-			continue;
-
 		entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
 		entity_hdr->entity_type = e->entity;
 		entity_hdr->start_offset = dbg_buff->offset;
@@ -339,16 +381,40 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
 	*tot_size += total_size;
 }
 
+static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
+{
+	u32 workspace_size;
+
+	workspace_size = cudbg_get_workspace_size();
+	pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
+					   workspace_size);
+	if (!pdbg_init->compress_buff)
+		return -ENOMEM;
+
+	pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
+	pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
+			       CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
+	return 0;
+}
+
+static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
+{
+	if (pdbg_init->compress_buff)
+		vfree(pdbg_init->compress_buff);
+}
+
 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
 			u32 flag)
 {
-	struct cudbg_init cudbg_init = { 0 };
 	struct cudbg_buffer dbg_buff = { 0 };
 	u32 size, min_size, total_size = 0;
+	struct cudbg_init cudbg_init;
 	struct cudbg_hdr *cudbg_hdr;
+	int rc;
 
 	size = *buf_size;
 
+	memset(&cudbg_init, 0, sizeof(struct cudbg_init));
 	cudbg_init.adap = adap;
 	cudbg_init.outbuf = buf;
 	cudbg_init.outbuf_size = size;
@@ -365,7 +431,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
 	cudbg_hdr->chip_ver = adap->params.chip;
 	cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
-	cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
 
 	min_size = sizeof(struct cudbg_hdr) +
 		   sizeof(struct cudbg_entity_hdr) *
@@ -373,6 +438,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
 	if (size < min_size)
 		return -ENOMEM;
 
+	rc = cudbg_get_workspace_size();
+	if (rc) {
+		/* Zlib available.  So, use zlib deflate */
+		cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
+		rc = cudbg_alloc_compress_buff(&cudbg_init);
+		if (rc) {
+			/* Ignore error and continue without compression. */
+			dev_warn(adap->pdev_dev,
+				 "Fail allocating compression buffer ret: %d.  Continuing without compression.\n",
+				 rc);
+			cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+			rc = 0;
+		}
+	} else {
+		cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+	}
+
+	cudbg_hdr->compress_type = cudbg_init.compress_type;
 	dbg_buff.offset += min_size;
 	total_size = dbg_buff.offset;
 
@@ -390,8 +473,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
 					   buf,
 					   &total_size);
 
+	cudbg_free_compress_buff(&cudbg_init);
 	cudbg_hdr->data_len = total_size;
-	*buf_size = total_size;
+	if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
+		*buf_size = size;
+	else
+		*buf_size = total_size;
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index c099b5aa2214..ce1ac9a1c878 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -20,8 +20,12 @@
 
 #include "cudbg_if.h"
 #include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
 #include "cudbg_lib.h"
 
+#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */
+#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */
+
 typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
 					struct cudbg_buffer *dbg_buff,
 					struct cudbg_error *cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index cf471831ee71..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -45,6 +45,10 @@
 #include "cxgb4_debugfs.h"
 #include "clip_tbl.h"
 #include "l2t.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+#include "cudbg_lib.h"
 
 /* generic seq_file support for showing a table of size rows x width. */
 static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 			 */
 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
 				/* Inner header VNI */
-				vniy = ((data2 & DATAVIDH2_F) << 23) |
+				vniy = (data2 & DATAVIDH2_F) |
 				       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
 				dip_hit = data2 & DATADIPHIT_F;
 			} else {
@@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 			port_num = DATAPORTNUM_G(data2);
 
 			/* Read tcamx. Change the control param */
+			vnix = 0;
 			ctl |= CTLXYBITSEL_V(1);
 			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
 			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 			data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
 				/* Inner header VNI mask */
-				vnix = ((data2 & DATAVIDH2_F) << 23) |
+				vnix = (data2 & DATAVIDH2_F) |
 				       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
 			}
 		} else {
@@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 					   addr[1], addr[2], addr[3],
 					   addr[4], addr[5],
 					   (unsigned long long)mask,
-					   vniy, vnix, dip_hit ? 'Y' : 'N',
+					   vniy, (vnix | vniy),
+					   dip_hit ? 'Y' : 'N',
 					   port_num,
 					   (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
 					   PORTMAP_G(cls_hi),
@@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
 
 static int rss_open(struct inode *inode, struct file *file)
 {
-	int ret;
-	struct seq_tab *p;
 	struct adapter *adap = inode->i_private;
+	int ret, nentries;
+	struct seq_tab *p;
 
-	p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+	nentries = t4_chip_rss_size(adap);
+	p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
 	if (!p)
 		return -ENOMEM;
 
@@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = {
 
 static int tid_info_show(struct seq_file *seq, void *v)
 {
+	unsigned int tid_start = 0;
 	struct adapter *adap = seq->private;
 	const struct tid_info *t = &adap->tids;
 	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
 
+	if (chip > CHELSIO_T5)
+		tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
 	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
 		unsigned int sb;
 		seq_printf(seq, "Connections in use: %u\n",
@@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
 			sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
 
 		if (sb) {
-			seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
-				   adap->tids.hash_base,
+			seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+				   sb - 1, adap->tids.hash_base,
 				   t->ntids - 1);
 			seq_printf(seq, ", in use: %u/%u\n",
 				   atomic_read(&t->tids_in_use),
@@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
 		seq_printf(seq, "Connections in use: %u\n",
 			   atomic_read(&t->conns_in_use));
 
-		seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+		seq_printf(seq, "TID range: %u..%u", tid_start,
+			   tid_start + t->ntids - 1);
 		seq_printf(seq, ", in use: %u\n",
 			   atomic_read(&t->tids_in_use));
 	}
@@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = {
 	.llseek  = generic_file_llseek,
 };
 
-struct mem_desc {
-	unsigned int base;
-	unsigned int limit;
-	unsigned int idx;
-};
-
-static int mem_desc_cmp(const void *a, const void *b)
-{
-	return ((const struct mem_desc *)a)->base -
-	       ((const struct mem_desc *)b)->base;
-}
-
 static void mem_region_show(struct seq_file *seq, const char *name,
 			    unsigned int from, unsigned int to)
 {
@@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
 static int meminfo_show(struct seq_file *seq, void *v)
 {
 	static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
-					"MC0:", "MC1:"};
-	static const char * const region[] = {
-		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
-		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
-		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
-		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
-		"RQUDP region:", "PBL region:", "TXPBL region:",
-		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
-		"On-chip queues:"
-	};
-
-	int i, n;
-	u32 lo, hi, used, alloc;
-	struct mem_desc avail[4];
-	struct mem_desc mem[ARRAY_SIZE(region) + 3];      /* up to 3 holes */
-	struct mem_desc *md = mem;
+					       "MC0:", "MC1:", "HMA:"};
 	struct adapter *adap = seq->private;
+	struct cudbg_meminfo meminfo;
+	int i, rc;
 
-	for (i = 0; i < ARRAY_SIZE(mem); i++) {
-		mem[i].limit = 0;
-		mem[i].idx = i;
-	}
-
-	/* Find and sort the populated memory ranges */
-	i = 0;
-	lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-	if (lo & EDRAM0_ENABLE_F) {
-		hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
-		avail[i].base = EDRAM0_BASE_G(hi) << 20;
-		avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
-		avail[i].idx = 0;
-		i++;
-	}
-	if (lo & EDRAM1_ENABLE_F) {
-		hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
-		avail[i].base = EDRAM1_BASE_G(hi) << 20;
-		avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
-		avail[i].idx = 1;
-		i++;
-	}
-
-	if (is_t5(adap->params.chip)) {
-		if (lo & EXT_MEM0_ENABLE_F) {
-			hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
-			avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
-			avail[i].limit =
-				avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
-			avail[i].idx = 3;
-			i++;
-		}
-		if (lo & EXT_MEM1_ENABLE_F) {
-			hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
-			avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
-			avail[i].limit =
-				avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
-			avail[i].idx = 4;
-			i++;
-		}
-	} else {
-		if (lo & EXT_MEM_ENABLE_F) {
-			hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
-			avail[i].base = EXT_MEM_BASE_G(hi) << 20;
-			avail[i].limit =
-				avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
-			avail[i].idx = 2;
-			i++;
-		}
-	}
-	if (!i)                                    /* no memory available */
-		return 0;
-	sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
-	(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
-	(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
-	(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
-
-	/* the next few have explicit upper bounds */
-	md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
-	md->limit = md->base - 1 +
-		    t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
-		    PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
-	md++;
-
-	md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
-	md->limit = md->base - 1 +
-		    t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
-		    PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
-	md++;
-
-	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
-			hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
-			md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
-		 } else {
-			hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
-			md->base = t4_read_reg(adap,
-					       LE_DB_HASH_TBL_BASE_ADDR_A);
-		}
-		md->limit = 0;
-	} else {
-		md->base = 0;
-		md->idx = ARRAY_SIZE(region);  /* hide it */
-	}
-	md++;
-
-#define ulp_region(reg) do { \
-	md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
-	(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
-} while (0)
-
-	ulp_region(RX_ISCSI);
-	ulp_region(RX_TDDP);
-	ulp_region(TX_TPT);
-	ulp_region(RX_STAG);
-	ulp_region(RX_RQ);
-	ulp_region(RX_RQUDP);
-	ulp_region(RX_PBL);
-	ulp_region(TX_PBL);
-#undef ulp_region
-	md->base = 0;
-	md->idx = ARRAY_SIZE(region);
-	if (!is_t4(adap->params.chip)) {
-		u32 size = 0;
-		u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
-		u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
-
-		if (is_t5(adap->params.chip)) {
-			if (sge_ctrl & VFIFO_ENABLE_F)
-				size = DBVFIFO_SIZE_G(fifo_size);
-		} else {
-			size = T6_DBVFIFO_SIZE_G(fifo_size);
-		}
-
-		if (size) {
-			md->base = BASEADDR_G(t4_read_reg(adap,
-					SGE_DBVFIFO_BADDR_A));
-			md->limit = md->base + (size << 2) - 1;
-		}
-	}
-
-	md++;
-
-	md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
-	md->limit = 0;
-	md++;
-	md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
-	md->limit = 0;
-	md++;
-
-	md->base = adap->vres.ocq.start;
-	if (adap->vres.ocq.size)
-		md->limit = md->base + adap->vres.ocq.size - 1;
-	else
-		md->idx = ARRAY_SIZE(region);  /* hide it */
-	md++;
-
-	/* add any address-space holes, there can be up to 3 */
-	for (n = 0; n < i - 1; n++)
-		if (avail[n].limit < avail[n + 1].base)
-			(md++)->base = avail[n].limit;
-	if (avail[n].limit)
-		(md++)->base = avail[n].limit;
-
-	n = md - mem;
-	sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+	memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
+	rc = cudbg_fill_meminfo(adap, &meminfo);
+	if (rc)
+		return -ENXIO;
 
-	for (lo = 0; lo < i; lo++)
-		mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
-				avail[lo].limit - 1);
+	for (i = 0; i < meminfo.avail_c; i++)
+		mem_region_show(seq, memory[meminfo.avail[i].idx],
+				meminfo.avail[i].base,
+				meminfo.avail[i].limit - 1);
 
 	seq_putc(seq, '\n');
-	for (i = 0; i < n; i++) {
-		if (mem[i].idx >= ARRAY_SIZE(region))
+	for (i = 0; i < meminfo.mem_c; i++) {
+		if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
 			continue;                        /* skip holes */
-		if (!mem[i].limit)
-			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
-		mem_region_show(seq, region[mem[i].idx], mem[i].base,
-				mem[i].limit);
+		if (!meminfo.mem[i].limit)
+			meminfo.mem[i].limit =
+				i < meminfo.mem_c - 1 ?
+				meminfo.mem[i + 1].base - 1 : ~0;
+		mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
+				meminfo.mem[i].base, meminfo.mem[i].limit);
 	}
 
 	seq_putc(seq, '\n');
-	lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
-	hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
-	mem_region_show(seq, "uP RAM:", lo, hi);
-
-	lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
-	hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
-	mem_region_show(seq, "uP Extmem2:", lo, hi);
+	mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
+	mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
+			meminfo.up_extmem2_hi);
 
-	lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
 	seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
-		   PMRXMAXPAGE_G(lo),
-		   t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
-		   (lo & PMRXNUMCHN_F) ? 2 : 1);
+		   meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
+		   meminfo.rx_pages_data[2]);
 
-	lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
-	hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
 	seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
-		   PMTXMAXPAGE_G(lo),
-		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
-		   hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
-	seq_printf(seq, "%u p-structs\n\n",
-		   t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
-
-	for (i = 0; i < 4; i++) {
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
-			lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
-		else
-			lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
-		if (is_t5(adap->params.chip)) {
-			used = T5_USED_G(lo);
-			alloc = T5_ALLOC_G(lo);
-		} else {
-			used = USED_G(lo);
-			alloc = ALLOC_G(lo);
-		}
+		   meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
+		   meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+
+	seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+
+	for (i = 0; i < 4; i++)
 		/* For T6 these are MAC buffer groups */
 		seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
-			   i, used, alloc);
-	}
-	for (i = 0; i < adap->params.arch.nchan; i++) {
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
-			lo = t4_read_reg(adap,
-					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
-		else
-			lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
-		if (is_t5(adap->params.chip)) {
-			used = T5_USED_G(lo);
-			alloc = T5_ALLOC_G(lo);
-		} else {
-			used = USED_G(lo);
-			alloc = ALLOC_G(lo);
-		}
+			   i, meminfo.port_used[i], meminfo.port_alloc[i]);
+
+	for (i = 0; i < adap->params.arch.nchan; i++)
 		/* For T6 these are MAC buffer groups */
 		seq_printf(seq,
 			   "Loopback %d using %u pages out of %u allocated\n",
-			   i, used, alloc);
-	}
+			   i, meminfo.loopback_used[i],
+			   meminfo.loopback_alloc[i]);
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index eb338212f5af..7852d98bad75 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
 		else
 			return PORT_OTHER;
 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
-		   port_type == FW_PORT_TYPE_KR_SFP28) {
+		   port_type == FW_PORT_TYPE_KR_SFP28 ||
+		   port_type == FW_PORT_TYPE_KR_XLAUI) {
 		return PORT_NONE;
 	}
 
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
 		break;
 
+	case FW_PORT_TYPE_KR_XLAUI:
+		SET_LMM(Backplane);
+		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+		break;
+
 	case FW_PORT_TYPE_CR2_QSFP:
 		SET_LMM(FIBRE);
 		SET_LMM(50000baseSR2_Full);
@@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
 	return 0;
 }
 
+static int cxgb4_get_module_info(struct net_device *dev,
+				 struct ethtool_modinfo *modinfo)
+{
+	struct port_info *pi = netdev_priv(dev);
+	u8 sff8472_comp, sff_diag_type, sff_rev;
+	struct adapter *adapter = pi->adapter;
+	int ret;
+
+	if (!t4_is_inserted_mod_type(pi->mod_type))
+		return -EINVAL;
+
+	switch (pi->port_type) {
+	case FW_PORT_TYPE_SFP:
+	case FW_PORT_TYPE_QSA:
+	case FW_PORT_TYPE_SFP28:
+		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
+				SFF_8472_COMP_LEN, &sff8472_comp);
+		if (ret)
+			return ret;
+		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
+				SFP_DIAG_TYPE_LEN, &sff_diag_type);
+		if (ret)
+			return ret;
+
+		if (!sff8472_comp || (sff_diag_type & 4)) {
+			modinfo->type = ETH_MODULE_SFF_8079;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		}
+		break;
+
+	case FW_PORT_TYPE_QSFP:
+	case FW_PORT_TYPE_QSFP_10G:
+	case FW_PORT_TYPE_CR_QSFP:
+	case FW_PORT_TYPE_CR2_QSFP:
+	case FW_PORT_TYPE_CR4_QSFP:
+		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				I2C_DEV_ADDR_A0, SFF_REV_ADDR,
+				SFF_REV_LEN, &sff_rev);
+		/* For QSFP type ports, revision value >= 3
+		 * means the SFP is 8636 compliant.
+		 */
+		if (ret)
+			return ret;
+		if (sff_rev >= 0x3) {
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cxgb4_get_module_eeprom(struct net_device *dev,
+				   struct ethtool_eeprom *eprom, u8 *data)
+{
+	int ret = 0, offset = eprom->offset, len = eprom->len;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	memset(data, 0, eprom->len);
+	if (offset + len <= I2C_PAGE_SIZE)
+		return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				 I2C_DEV_ADDR_A0, offset, len, data);
+
+	/* offset + len spans 0xa0 and 0xa1 pages */
+	if (offset <= I2C_PAGE_SIZE) {
+		/* read 0xa0 page */
+		len = I2C_PAGE_SIZE - offset;
+		ret =  t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				 I2C_DEV_ADDR_A0, offset, len, data);
+		if (ret)
+			return ret;
+		offset = I2C_PAGE_SIZE;
+		/* Remaining bytes to be read from second page =
+		 * Total length - bytes read from first page
+		 */
+		len = eprom->len - len;
+	}
+	/* Read additional optical diagnostics from page 0xa2 if supported */
+	return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
+			 offset, len, &data[eprom->len - len]);
+}
+
 static const struct ethtool_ops cxgb_ethtool_ops = {
 	.get_link_ksettings = get_link_ksettings,
 	.set_link_ksettings = set_link_ksettings,
@@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
 	.set_dump          = set_dump,
 	.get_dump_flag     = get_dump_flag,
 	.get_dump_data     = get_dump_data,
+	.get_module_info   = cxgb4_get_module_info,
+	.get_module_eeprom = cxgb4_get_module_eeprom,
 };
 
 void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5980f308a253..3177b0c9bd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
 		if (ftid >= t->nftids)
 			ftid = -1;
 	} else {
-		ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
-		if (ftid < 0)
-			goto out_unlock;
+		if (is_t6(adap->params.chip)) {
+			ftid = bitmap_find_free_region(t->ftid_bmap,
+						       t->nftids, 1);
+			if (ftid < 0)
+				goto out_unlock;
+
+			/* this is only a lookup, keep the found region
+			 * unallocated
+			 */
+			bitmap_release_region(t->ftid_bmap, ftid, 1);
+		} else {
+			ftid = bitmap_find_free_region(t->ftid_bmap,
+						       t->nftids, 2);
+			if (ftid < 0)
+				goto out_unlock;
 
-		/* this is only a lookup, keep the found region unallocated */
-		bitmap_release_region(t->ftid_bmap, ftid, 2);
+			bitmap_release_region(t->ftid_bmap, ftid, 2);
+		}
 	}
 out_unlock:
 	spin_unlock_bh(&t->ftid_lock);
 	return ftid;
 }
 
-static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
+			  unsigned int chip_ver)
 {
 	spin_lock_bh(&t->ftid_lock);
 
@@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
 		return -EBUSY;
 	}
 
-	if (family == PF_INET)
+	if (family == PF_INET) {
 		__set_bit(fidx, t->ftid_bmap);
-	else
-		bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+	} else {
+		if (chip_ver < CHELSIO_T6)
+			bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+		else
+			bitmap_allocate_region(t->ftid_bmap, fidx, 1);
+	}
 
 	spin_unlock_bh(&t->ftid_lock);
 	return 0;
 }
 
-static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
+			     unsigned int chip_ver)
 {
 	spin_lock_bh(&t->ftid_lock);
-	if (family == PF_INET)
+	if (family == PF_INET) {
 		__clear_bit(fidx, t->ftid_bmap);
-	else
-		bitmap_release_region(t->ftid_bmap, fidx, 2);
+	} else {
+		if (chip_ver < CHELSIO_T6)
+			bitmap_release_region(t->ftid_bmap, fidx, 2);
+		else
+			bitmap_release_region(t->ftid_bmap, fidx, 1);
+	}
 	spin_unlock_bh(&t->ftid_lock);
 }
 
@@ -694,7 +716,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
 	if (f->smt)
 		cxgb4_smt_release(f->smt);
 
-	if (f->fs.hash && f->fs.type)
+	if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
 		cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
 
 	/* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1211,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 		       struct filter_ctx *ctx)
 {
 	struct adapter *adapter = netdev2adap(dev);
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
 	unsigned int max_fidx, fidx;
 	struct filter_entry *f;
 	u32 iconf;
@@ -1225,12 +1248,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 	 * insertion.
 	 */
 	if (fs->type == 0) { /* IPv4 */
-		/* If our IPv4 filter isn't being written to a
-		 * multiple of four filter index and there's an IPv6
-		 * filter at the multiple of 4 base slot, then we
-		 * prevent insertion.
+		/* For T6, If our IPv4 filter isn't being written to a
+		 * multiple of two filter index and there's an IPv6
+		 * filter at the multiple of 2 base slot, then we need
+		 * to delete that IPv6 filter ...
+		 * For adapters below T6, IPv6 filter occupies 4 entries.
+		 * Hence we need to delete the filter in multiple of 4 slot.
 		 */
-		fidx = filter_id & ~0x3;
+		if (chip_ver < CHELSIO_T6)
+			fidx = filter_id & ~0x3;
+		else
+			fidx = filter_id & ~0x1;
+
 		if (fidx != filter_id &&
 		    adapter->tids.ftid_tab[fidx].fs.type) {
 			f = &adapter->tids.ftid_tab[fidx];
@@ -1242,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 			}
 		}
 	} else { /* IPv6 */
-		/* Ensure that the IPv6 filter is aligned on a
-		 * multiple of 4 boundary.
-		 */
-		if (filter_id & 0x3) {
-			dev_err(adapter->pdev_dev,
-				"Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
-			return -EINVAL;
-		}
+		if (chip_ver < CHELSIO_T6) {
+			/* Ensure that the IPv6 filter is aligned on a
+			 * multiple of 4 boundary.
+			 */
+			if (filter_id & 0x3) {
+				dev_err(adapter->pdev_dev,
+					"Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+				return -EINVAL;
+			}
 
-		/* Check all except the base overlapping IPv4 filter slots. */
-		for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+			/* Check all except the base overlapping IPv4 filter
+			 * slots.
+			 */
+			for (fidx = filter_id + 1; fidx < filter_id + 4;
+			     fidx++) {
+				f = &adapter->tids.ftid_tab[fidx];
+				if (f->valid) {
+					dev_err(adapter->pdev_dev,
+						"Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+						fidx);
+					return -EBUSY;
+				}
+			}
+		} else {
+			/* For T6, CLIP being enabled, IPv6 filter would occupy
+			 * 2 entries.
+			 */
+			if (filter_id & 0x1)
+				return -EINVAL;
+			/* Check overlapping IPv4 filter slot */
+			fidx = filter_id + 1;
 			f = &adapter->tids.ftid_tab[fidx];
 			if (f->valid) {
-				dev_err(adapter->pdev_dev,
-					"Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
-					fidx);
-				return -EINVAL;
+				pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
+				       __func__, fidx);
+				return -EBUSY;
 			}
 		}
 	}
@@ -1272,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 
 	fidx = filter_id + adapter->tids.ftid_base;
 	ret = cxgb4_set_ftid(&adapter->tids, filter_id,
-			     fs->type ? PF_INET6 : PF_INET);
+			     fs->type ? PF_INET6 : PF_INET,
+			     chip_ver);
 	if (ret)
 		return ret;
 
-	/* Check to make sure the filter requested is writable ... */
+	/* Check t  make sure the filter requested is writable ... */
 	ret = writable_filter(f);
 	if (ret) {
 		/* Clear the bits we have set above */
 		cxgb4_clear_ftid(&adapter->tids, filter_id,
-				 fs->type ? PF_INET6 : PF_INET);
+				 fs->type ? PF_INET6 : PF_INET,
+				 chip_ver);
 		return ret;
 	}
 
@@ -1291,6 +1341,17 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 	if (f->valid)
 		clear_filter(adapter, f);
 
+	if (is_t6(adapter->params.chip) && fs->type &&
+	    ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+	    IPV6_ADDR_ANY) {
+		ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+		if (ret) {
+			cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
+					 chip_ver);
+			return ret;
+		}
+	}
+
 	/* Convert the filter specification into our internal format.
 	 * We copy the PF/VF specification into the Outer VLAN field
 	 * here so the rest of the code -- including the interface to
@@ -1316,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 	ret = set_filter_wr(adapter, filter_id);
 	if (ret) {
 		cxgb4_clear_ftid(&adapter->tids, filter_id,
-				 fs->type ? PF_INET6 : PF_INET);
+				 fs->type ? PF_INET6 : PF_INET,
+				 chip_ver);
 		clear_filter(adapter, f);
 	}
 
@@ -1394,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
 		       struct filter_ctx *ctx)
 {
 	struct adapter *adapter = netdev2adap(dev);
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
 	struct filter_entry *f;
 	unsigned int max_fidx;
 	int ret;
@@ -1419,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
 	if (f->valid) {
 		f->ctx = ctx;
 		cxgb4_clear_ftid(&adapter->tids, filter_id,
-				 f->fs.type ? PF_INET6 : PF_INET);
+				 f->fs.type ? PF_INET6 : PF_INET,
+				 chip_ver);
 		return del_filter_wr(adapter, filter_id);
 	}
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05a4abfd5ec1..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
 #include <net/addrconf.h>
 #include <linux/uaccess.h>
 #include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
 
 #include "cxgb4.h"
 #include "cxgb4_filter.h"
@@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
  */
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
 	static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
 
 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
  * called for both.
@@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
 
 #define CH_PCI_ID_TABLE_ENTRY(devid) \
-		{PCI_VDEVICE(CHELSIO, (devid)), 4}
+		{PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
 
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
 		{ 0, } \
@@ -1673,7 +1676,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
 {
 	struct adapter *adap = netdev2adap(dev);
 
-	return t4_sge_ctxt_flush(adap, adap->mbox);
+	return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
@@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 #ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
 {
 	/* Turn carrier off since we don't have to transmit anything on this
 	 * interface.
@@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
 }
 
 /* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
 {
-	unsigned int i;
 	u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+	unsigned int i, vf, nvfs;
+	u16 a, b;
 	int err;
 	u8 *na;
-	u16 a, b;
 
+	adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+							    PCI_CAP_ID_VPD);
 	err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
-	if (!err) {
-		na = adap->params.vpd.na;
-		for (i = 0; i < ETH_ALEN; i++)
-			hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
-				      hex2val(na[2 * i + 1]));
-		a = (hw_addr[0] << 8) | hw_addr[1];
-		b = (hw_addr[1] << 8) | hw_addr[2];
-		a ^= b;
-		a |= 0x0200;    /* locally assigned Ethernet MAC address */
-		a &= ~0x0100;   /* not a multicast Ethernet MAC address */
-		macaddr[0] = a >> 8;
-		macaddr[1] = a & 0xff;
-
-		for (i = 2; i < 5; i++)
-			macaddr[i] = hw_addr[i + 1];
-
-		for (i = 0; i < adap->num_vfs; i++) {
-			macaddr[5] = adap->pf * 16 + i;
-			ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
-		}
+	if (err)
+		return;
+
+	na = adap->params.vpd.na;
+	for (i = 0; i < ETH_ALEN; i++)
+		hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+			      hex2val(na[2 * i + 1]));
+
+	a = (hw_addr[0] << 8) | hw_addr[1];
+	b = (hw_addr[1] << 8) | hw_addr[2];
+	a ^= b;
+	a |= 0x0200;    /* locally assigned Ethernet MAC address */
+	a &= ~0x0100;   /* not a multicast Ethernet MAC address */
+	macaddr[0] = a >> 8;
+	macaddr[1] = a & 0xff;
+
+	for (i = 2; i < 5; i++)
+		macaddr[i] = hw_addr[i + 1];
+
+	for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+		vf < nvfs; vf++) {
+		macaddr[5] = adap->pf * 16 + vf;
+		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
 	}
 }
 
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adap = pi->adapter;
@@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
 	return ret;
 }
 
-static int cxgb_get_vf_config(struct net_device *dev,
-			      int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+				    int vf, struct ifla_vf_info *ivi)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adap = pi->adapter;
@@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
 	return 0;
 }
 
-static int cxgb_get_phys_port_id(struct net_device *dev,
-				 struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+				       struct netdev_phys_item_id *ppid)
 {
 	struct port_info *pi = netdev_priv(dev);
 	unsigned int phy_port_id;
@@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
 	return 0;
 }
 
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
-			    int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+				  int min_tx_rate, int max_tx_rate)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adap = pi->adapter;
@@ -2775,7 +2783,30 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
 	return 0;
 }
 
-#endif
+static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
+				  u16 vlan, u8 qos, __be16 vlan_proto)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	int ret;
+
+	if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
+		return -EINVAL;
+
+	if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
+		return -EPROTONOSUPPORT;
+
+	ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
+	if (!ret) {
+		adap->vfinfo[vf].vlan = vlan;
+		return 0;
+	}
+
+	dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
+		ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
+	return ret;
+}
+#endif /* CONFIG_PCI_IOV */
 
 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
 {
@@ -2897,9 +2928,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
 static int cxgb_setup_tc_flower(struct net_device *dev,
 				struct tc_cls_flower_offload *cls_flower)
 {
-	if (cls_flower->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_flower->command) {
 	case TC_CLSFLOWER_REPLACE:
 		return cxgb4_tc_flower_replace(dev, cls_flower);
@@ -2915,9 +2943,6 @@ static int cxgb_setup_tc_flower(struct net_device *dev,
 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
 				 struct tc_cls_u32_offload *cls_u32)
 {
-	if (cls_u32->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_u32->command) {
 	case TC_CLSU32_NEW_KNODE:
 	case TC_CLSU32_REPLACE_KNODE:
@@ -2943,7 +2968,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 		return -EINVAL;
 	}
 
-	if (!tc_can_offload(dev))
+	if (!tc_cls_can_offload_and_chain0(dev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -2987,6 +3012,174 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
 	}
 }
 
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+				struct udp_tunnel_info *ti)
+{
+	struct port_info *pi = netdev_priv(netdev);
+	struct adapter *adapter = pi->adapter;
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+	int ret = 0, i;
+
+	if (chip_ver < CHELSIO_T6)
+		return;
+
+	switch (ti->type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		if (!adapter->vxlan_port_cnt ||
+		    adapter->vxlan_port != ti->port)
+			return; /* Invalid VxLAN destination port */
+
+		adapter->vxlan_port_cnt--;
+		if (adapter->vxlan_port_cnt)
+			return;
+
+		adapter->vxlan_port = 0;
+		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+		break;
+	case UDP_TUNNEL_TYPE_GENEVE:
+		if (!adapter->geneve_port_cnt ||
+		    adapter->geneve_port != ti->port)
+			return; /* Invalid GENEVE destination port */
+
+		adapter->geneve_port_cnt--;
+		if (adapter->geneve_port_cnt)
+			return;
+
+		adapter->geneve_port = 0;
+		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+	default:
+		return;
+	}
+
+	/* Matchall mac entries can be deleted only after all tunnel ports
+	 * are brought down or removed.
+	 */
+	if (!adapter->rawf_cnt)
+		return;
+	for_each_port(adapter, i) {
+		pi = adap2pinfo(adapter, i);
+		ret = t4_free_raw_mac_filt(adapter, pi->viid,
+					   match_all_mac, match_all_mac,
+					   adapter->rawf_start +
+					    pi->port_id,
+					   1, pi->port_id, true);
+		if (ret < 0) {
+			netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+				    i);
+			return;
+		}
+		atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+			   pi->port_id].refcnt);
+	}
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+				struct udp_tunnel_info *ti)
+{
+	struct port_info *pi = netdev_priv(netdev);
+	struct adapter *adapter = pi->adapter;
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+	int i, ret;
+
+	if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
+		return;
+
+	switch (ti->type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		/* Callback for adding vxlan port can be called with the same
+		 * port for both IPv4 and IPv6. We should not disable the
+		 * offloading when the same port for both protocols is added
+		 * and later one of them is removed.
+		 */
+		if (adapter->vxlan_port_cnt &&
+		    adapter->vxlan_port == ti->port) {
+			adapter->vxlan_port_cnt++;
+			return;
+		}
+
+		/* We will support only one VxLAN port */
+		if (adapter->vxlan_port_cnt) {
+			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+				    be16_to_cpu(adapter->vxlan_port),
+				    be16_to_cpu(ti->port));
+			return;
+		}
+
+		adapter->vxlan_port = ti->port;
+		adapter->vxlan_port_cnt = 1;
+
+		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+			     VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+		break;
+	case UDP_TUNNEL_TYPE_GENEVE:
+		if (adapter->geneve_port_cnt &&
+		    adapter->geneve_port == ti->port) {
+			adapter->geneve_port_cnt++;
+			return;
+		}
+
+		/* We will support only one GENEVE port */
+		if (adapter->geneve_port_cnt) {
+			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+				    be16_to_cpu(adapter->geneve_port),
+				    be16_to_cpu(ti->port));
+			return;
+		}
+
+		adapter->geneve_port = ti->port;
+		adapter->geneve_port_cnt = 1;
+
+		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
+			     GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+	default:
+		return;
+	}
+
+	/* Create a 'match all' mac filter entry for inner mac,
+	 * if raw mac interface is supported. Once the linux kernel provides
+	 * driver entry points for adding/deleting the inner mac addresses,
+	 * we will remove this 'match all' entry and fallback to adding
+	 * exact match filters.
+	 */
+	for_each_port(adapter, i) {
+		pi = adap2pinfo(adapter, i);
+
+		ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+					    match_all_mac,
+					    match_all_mac,
+					    adapter->rawf_start +
+					    pi->port_id,
+					    1, pi->port_id, true);
+		if (ret < 0) {
+			netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+				    be16_to_cpu(ti->port));
+			cxgb_del_udp_tunnel(netdev, ti);
+			return;
+		}
+		atomic_inc(&adapter->mps_encap[ret].refcnt);
+	}
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+					     struct net_device *dev,
+					     netdev_features_t features)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+		return features;
+
+	/* Check if hw supports offload for this packet */
+	if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+		return features;
+
+	/* Offload is not supported for this encapsulated packet */
+	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
 static netdev_features_t cxgb_fix_features(struct net_device *dev,
 					   netdev_features_t features)
 {
@@ -3018,20 +3211,25 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 	.ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
 	.ndo_setup_tc         = cxgb_setup_tc,
+	.ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
+	.ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
+	.ndo_features_check   = cxgb_features_check,
 	.ndo_fix_features     = cxgb_fix_features,
 };
 
 #ifdef CONFIG_PCI_IOV
 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
-	.ndo_open             = dummy_open,
-	.ndo_set_vf_mac       = cxgb_set_vf_mac,
-	.ndo_get_vf_config    = cxgb_get_vf_config,
-	.ndo_set_vf_rate      = cxgb_set_vf_rate,
-	.ndo_get_phys_port_id = cxgb_get_phys_port_id,
+	.ndo_open             = cxgb4_mgmt_open,
+	.ndo_set_vf_mac       = cxgb4_mgmt_set_vf_mac,
+	.ndo_get_vf_config    = cxgb4_mgmt_get_vf_config,
+	.ndo_set_vf_rate      = cxgb4_mgmt_set_vf_rate,
+	.ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+	.ndo_set_vf_vlan      = cxgb4_mgmt_set_vf_vlan,
 };
 #endif
 
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+				   struct ethtool_drvinfo *info)
 {
 	struct adapter *adapter = netdev2adap(dev);
 
@@ -3043,7 +3241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 }
 
 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
-	.get_drvinfo       = get_drvinfo,
+	.get_drvinfo       = cxgb4_mgmt_get_drvinfo,
 };
 
 void t4_fatal_err(struct adapter *adap)
@@ -4759,7 +4957,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
 }
 
 #ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
 {
 	dev->type = ARPHRD_NONE;
 	dev->mtu = 0;
@@ -4775,38 +4973,6 @@ static void dummy_setup(struct net_device *dev)
 	dev->needs_free_netdev = true;
 }
 
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
-	struct adapter *adap = pci_get_drvdata(pdev);
-	struct net_device *netdev;
-	struct port_info *pi;
-	char name[IFNAMSIZ];
-	int err;
-
-	snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
-	netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
-			      dummy_setup);
-	if (!netdev)
-		return -ENOMEM;
-
-	pi = netdev_priv(netdev);
-	pi->adapter = adap;
-	pi->tx_chan = adap->pf % adap->params.nports;
-	SET_NETDEV_DEV(netdev, &pdev->dev);
-
-	adap->port[0] = netdev;
-	pi->port_id = 0;
-
-	err = register_netdev(adap->port[0]);
-	if (err) {
-		pr_info("Unable to register VF mgmt netdev %s\n", name);
-		free_netdev(adap->port[0]);
-		adap->port[0] = NULL;
-		return err;
-	}
-	return 0;
-}
-
 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 {
 	struct adapter *adap = pci_get_drvdata(pdev);
@@ -4818,7 +4984,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 	/* Check if cxgb4 is the MASTER and fw is initialized */
 	if (!(pcie_fw & PCIE_FW_INIT_F) ||
 	    !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
-	    PCIE_FW_MASTER_G(pcie_fw) != 4) {
+	    PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
 		dev_warn(&pdev->dev,
 			 "cxgb4 driver needs to be MASTER to support SRIOV\n");
 		return -EOPNOTSUPP;
@@ -4830,46 +4996,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 	if (current_vfs && pci_vfs_assigned(pdev)) {
 		dev_err(&pdev->dev,
 			"Cannot modify SR-IOV while VFs are assigned\n");
-		num_vfs = current_vfs;
-		return num_vfs;
+		return current_vfs;
 	}
-
-	/* Disable SRIOV when zero is passed.
-	 * One needs to disable SRIOV before modifying it, else
-	 * stack throws the below warning:
-	 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+	/* Note that the upper-level code ensures that we're never called with
+	 * a non-zero "num_vfs" when we already have VFs instantiated.  But
+	 * it never hurts to code defensively.
 	 */
+	if (num_vfs != 0 && current_vfs != 0)
+		return -EBUSY;
+
+	/* Nothing to do for no change. */
+	if (num_vfs == current_vfs)
+		return num_vfs;
+
+	/* Disable SRIOV when zero is passed. */
 	if (!num_vfs) {
 		pci_disable_sriov(pdev);
-		if (adap->port[0]) {
-			unregister_netdev(adap->port[0]);
-			adap->port[0] = NULL;
-		}
+		/* free VF Management Interface */
+		unregister_netdev(adap->port[0]);
+		free_netdev(adap->port[0]);
+		adap->port[0] = NULL;
+
 		/* free VF resources */
+		adap->num_vfs = 0;
 		kfree(adap->vfinfo);
 		adap->vfinfo = NULL;
-		adap->num_vfs = 0;
-		return num_vfs;
+		return 0;
 	}
 
-	if (num_vfs != current_vfs) {
-		err = pci_enable_sriov(pdev, num_vfs);
+	if (!current_vfs) {
+		struct fw_pfvf_cmd port_cmd, port_rpl;
+		struct net_device *netdev;
+		unsigned int pmask, port;
+		struct pci_dev *pbridge;
+		struct port_info *pi;
+		char name[IFNAMSIZ];
+		u32 devcap2;
+		u16 flags;
+		int pos;
+
+		/* If we want to instantiate Virtual Functions, then our
+		 * parent bridge's PCI-E needs to support Alternative Routing
+		 * ID (ARI) because our VFs will show up at function offset 8
+		 * and above.
+		 */
+		pbridge = pdev->bus->self;
+		pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+		pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+		pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+		if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+		    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+			/* Our parent bridge does not support ARI so issue a
+			 * warning and skip instantiating the VFs.  They
+			 * won't be reachable.
+			 */
+			dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+				 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+				 PCI_FUNC(pbridge->devfn));
+			return -ENOTSUPP;
+		}
+		memset(&port_cmd, 0, sizeof(port_cmd));
+		port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+						 FW_CMD_REQUEST_F |
+						 FW_CMD_READ_F |
+						 FW_PFVF_CMD_PFN_V(adap->pf) |
+						 FW_PFVF_CMD_VFN_V(0));
+		port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+		err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+				 &port_rpl);
 		if (err)
 			return err;
+		pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+		port = ffs(pmask) - 1;
+		/* Allocate VF Management Interface. */
+		snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+			 adap->pf);
+		netdev = alloc_netdev(sizeof(struct port_info),
+				      name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+		if (!netdev)
+			return -ENOMEM;
 
-		adap->num_vfs = num_vfs;
-		err = config_mgmt_dev(pdev);
-		if (err)
+		pi = netdev_priv(netdev);
+		pi->adapter = adap;
+		pi->lport = port;
+		pi->tx_chan = port;
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		adap->port[0] = netdev;
+		pi->port_id = 0;
+
+		err = register_netdev(adap->port[0]);
+		if (err) {
+			pr_info("Unable to register VF mgmt netdev %s\n", name);
+			free_netdev(adap->port[0]);
+			adap->port[0] = NULL;
 			return err;
+		}
+		/* Allocate and set up VF Information. */
+		adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+				       sizeof(struct vf_info), GFP_KERNEL);
+		if (!adap->vfinfo) {
+			unregister_netdev(adap->port[0]);
+			free_netdev(adap->port[0]);
+			adap->port[0] = NULL;
+			return -ENOMEM;
+		}
+		cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+	}
+	/* Instantiate the requested number of VFs. */
+	err = pci_enable_sriov(pdev, num_vfs);
+	if (err) {
+		pr_info("Unable to instantiate %d VFs\n", num_vfs);
+		if (!current_vfs) {
+			unregister_netdev(adap->port[0]);
+			free_netdev(adap->port[0]);
+			adap->port[0] = NULL;
+			kfree(adap->vfinfo);
+			adap->vfinfo = NULL;
+		}
+		return err;
 	}
 
-	adap->vfinfo = kcalloc(adap->num_vfs,
-			       sizeof(struct vf_info), GFP_KERNEL);
-	if (adap->vfinfo)
-		fill_vf_station_mac_addr(adap);
+	adap->num_vfs = num_vfs;
 	return num_vfs;
 }
-#endif
+#endif /* CONFIG_PCI_IOV */
 
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -4882,9 +5134,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	u32 whoami, pl_rev;
 	enum chip_type chip;
 	static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
-	u32 v, port_vec;
-#endif
 
 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -4908,6 +5157,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_disable_device;
 	}
 
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto out_unmap_bar0;
+	}
+
+	adapter->regs = regs;
 	err = t4_wait_dev_ready(regs);
 	if (err < 0)
 		goto out_unmap_bar0;
@@ -4918,13 +5174,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	chip = get_chip_type(pdev, pl_rev);
 	func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
 		SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+	adapter->pdev = pdev;
+	adapter->pdev_dev = &pdev->dev;
+	adapter->name = pci_name(pdev);
+	adapter->mbox = func;
+	adapter->pf = func;
+	adapter->msg_enable = DFLT_MSG_ENABLE;
+	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+				    (sizeof(struct mbox_cmd) *
+				     T4_OS_LOG_MBOX_CMDS),
+				    GFP_KERNEL);
+	if (!adapter->mbox_log) {
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+	spin_lock_init(&adapter->mbox_lock);
+	INIT_LIST_HEAD(&adapter->mlist.list);
+	pci_set_drvdata(pdev, adapter);
+
 	if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
-		iounmap(regs);
-#endif
 		pci_disable_device(pdev);
 		pci_save_state(pdev);        /* to restore SR-IOV later */
-		goto sriov;
+		return 0;
 	}
 
 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
@@ -4933,53 +5205,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		if (err) {
 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
 				"coherent allocations\n");
-			goto out_unmap_bar0;
+			goto out_free_adapter;
 		}
 	} else {
 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
 			dev_err(&pdev->dev, "no usable DMA configuration\n");
-			goto out_unmap_bar0;
+			goto out_free_adapter;
 		}
 	}
 
 	pci_enable_pcie_error_reporting(pdev);
 	pci_set_master(pdev);
 	pci_save_state(pdev);
-
-	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
-	if (!adapter) {
-		err = -ENOMEM;
-		goto out_unmap_bar0;
-	}
 	adap_idx++;
-
 	adapter->workq = create_singlethread_workqueue("cxgb4");
 	if (!adapter->workq) {
 		err = -ENOMEM;
 		goto out_free_adapter;
 	}
 
-	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
-				    (sizeof(struct mbox_cmd) *
-				     T4_OS_LOG_MBOX_CMDS),
-				    GFP_KERNEL);
-	if (!adapter->mbox_log) {
-		err = -ENOMEM;
-		goto out_free_adapter;
-	}
 	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
 
 	/* PCI device has been enabled */
 	adapter->flags |= DEV_ENABLED;
-
-	adapter->regs = regs;
-	adapter->pdev = pdev;
-	adapter->pdev_dev = &pdev->dev;
-	adapter->name = pci_name(pdev);
-	adapter->mbox = func;
-	adapter->pf = func;
-	adapter->msg_enable = DFLT_MSG_ENABLE;
 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
 
 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5002,9 +5251,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	spin_lock_init(&adapter->stats_lock);
 	spin_lock_init(&adapter->tid_release_lock);
 	spin_lock_init(&adapter->win0_lock);
-	spin_lock_init(&adapter->mbox_lock);
-
-	INIT_LIST_HEAD(&adapter->mlist.list);
 
 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
 	INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5080,6 +5326,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 			NETIF_F_HW_TC;
+
+		if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
 		if (highdma)
 			netdev->hw_features |= NETIF_F_HIGHDMA;
 		netdev->features |= netdev->hw_features;
@@ -5273,58 +5523,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	setup_fw_sge_queues(adapter);
 	return 0;
 
-sriov:
-#ifdef CONFIG_PCI_IOV
-	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
-	if (!adapter) {
-		err = -ENOMEM;
-		goto free_pci_region;
-	}
-
-	adapter->pdev = pdev;
-	adapter->pdev_dev = &pdev->dev;
-	adapter->name = pci_name(pdev);
-	adapter->mbox = func;
-	adapter->pf = func;
-	adapter->regs = regs;
-	adapter->adap_idx = adap_idx;
-	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
-				    (sizeof(struct mbox_cmd) *
-				     T4_OS_LOG_MBOX_CMDS),
-				    GFP_KERNEL);
-	if (!adapter->mbox_log) {
-		err = -ENOMEM;
-		goto free_adapter;
-	}
-	spin_lock_init(&adapter->mbox_lock);
-	INIT_LIST_HEAD(&adapter->mlist.list);
-
-	v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
-	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
-	err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
-			      &v, &port_vec);
-	if (err < 0) {
-		dev_err(adapter->pdev_dev, "Could not fetch port params\n");
-		goto free_mbox_log;
-	}
-
-	adapter->params.nports = hweight32(port_vec);
-	pci_set_drvdata(pdev, adapter);
-	return 0;
-
-free_mbox_log:
-	kfree(adapter->mbox_log);
- free_adapter:
-	kfree(adapter);
- free_pci_region:
-	iounmap(regs);
-	pci_disable_sriov(pdev);
-	pci_release_regions(pdev);
-	return err;
-#else
-	return 0;
-#endif
-
  out_free_dev:
 	free_some_resources(adapter);
 	if (adapter->flags & USING_MSIX)
@@ -5416,14 +5614,7 @@ static void remove_one(struct pci_dev *pdev)
 	}
 #ifdef CONFIG_PCI_IOV
 	else {
-		if (adapter->port[0])
-			unregister_netdev(adapter->port[0]);
-		iounmap(adapter->regs);
-		kfree(adapter->vfinfo);
-		kfree(adapter->mbox_log);
-		kfree(adapter);
-		pci_disable_sriov(pdev);
-		pci_release_regions(pdev);
+		cxgb4_iov_configure(adapter->pdev, 0);
 	}
 #endif
 }
@@ -5467,18 +5658,6 @@ static void shutdown_one(struct pci_dev *pdev)
 		if (adapter->flags & FW_OK)
 			t4_fw_bye(adapter, adapter->mbox);
 	}
-#ifdef CONFIG_PCI_IOV
-	else {
-		if (adapter->port[0])
-			unregister_netdev(adapter->port[0]);
-		iounmap(adapter->regs);
-		kfree(adapter->vfinfo);
-		kfree(adapter->mbox_log);
-		kfree(adapter);
-		pci_disable_sriov(pdev);
-		pci_release_regions(pdev);
-	}
-#endif
 }
 
 static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index a452d5a1b0f3..36563364bae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -43,7 +43,7 @@
 
 #define STATS_CHECK_PERIOD (HZ / 2)
 
-struct ch_tc_pedit_fields pedits[] = {
+static struct ch_tc_pedit_fields pedits[] = {
 	PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
 	PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
 	PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
@@ -408,9 +408,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
 		} else if (is_tcf_gact_shot(a)) {
 			fs->action = FILTER_DROP;
 		} else if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
-			struct net_device *out = __dev_get_by_index(dev_net(in),
-								    ifindex);
+			struct net_device *out = tcf_mirred_dev(a);
 			struct port_info *pi = netdev_priv(out);
 
 			fs->action = FILTER_SWITCH;
@@ -585,14 +583,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
 			/* Do nothing */
 		} else if (is_tcf_mirred_egress_redirect(a)) {
 			struct adapter *adap = netdev2adap(dev);
-			struct net_device *n_dev;
-			unsigned int i, ifindex;
+			struct net_device *n_dev, *target_dev;
+			unsigned int i;
 			bool found = false;
 
-			ifindex = tcf_mirred_ifindex(a);
+			target_dev = tcf_mirred_dev(a);
 			for_each_port(adap, i) {
 				n_dev = adap->port[i];
-				if (ifindex == n_dev->ifindex) {
+				if (target_dev == n_dev) {
 					found = true;
 					break;
 				}
@@ -768,9 +766,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
 
 	rhashtable_walk_enter(&adap->flower_tbl, &iter);
 	do {
-		flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
-		if (IS_ERR(flower_entry))
-			goto walk_stop;
+		rhashtable_walk_start(&iter);
 
 		while ((flower_entry = rhashtable_walk_next(&iter)) &&
 		       !IS_ERR(flower_entry)) {
@@ -789,8 +785,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
 				spin_unlock(&flower_entry->lock);
 			}
 		}
-walk_stop:
+
 		rhashtable_walk_stop(&iter);
+
 	} while (flower_entry == ERR_PTR(-EAGAIN));
 	rhashtable_walk_exit(&iter);
 	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index cd0cd13a964d..ab174bcfbfb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap,
 
 		/* Re-direct to specified port in hardware. */
 		if (is_tcf_mirred_egress_redirect(a)) {
-			struct net_device *n_dev;
-			unsigned int i, index;
+			struct net_device *n_dev, *target_dev;
 			bool found = false;
+			unsigned int i;
 
-			index = tcf_mirred_ifindex(a);
+			target_dev = tcf_mirred_dev(a);
 			for_each_port(adap, i) {
 				n_dev = adap->port[i];
-				if (index == n_dev->ifindex) {
+				if (target_dev == n_dev) {
 					fs->action = FILTER_SWITCH;
 					fs->eport = i;
 					found = true;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6c7b0ac0b48b..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -761,12 +761,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
  *	Returns whether an Ethernet packet is small enough to fit as
  *	immediate data. Return value corresponds to headroom required.
  */
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
 {
-	int hdrlen = skb_shinfo(skb)->gso_size ?
-			sizeof(struct cpl_tx_pkt_lso_core) : 0;
+	int hdrlen = 0;
 
-	hdrlen += sizeof(struct cpl_tx_pkt);
+	if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+	    chip_ver > CHELSIO_T5) {
+		hdrlen = sizeof(struct cpl_tx_tnl_lso);
+		hdrlen += sizeof(struct cpl_tx_pkt_core);
+	} else {
+		hdrlen = skb_shinfo(skb)->gso_size ?
+			 sizeof(struct cpl_tx_pkt_lso_core) : 0;
+		hdrlen += sizeof(struct cpl_tx_pkt);
+	}
 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
 		return hdrlen;
 	return 0;
@@ -779,10 +786,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
  *	Returns the number of flits needed for a Tx WR for the given Ethernet
  *	packet, including the needed WR and CPL headers.
  */
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+					 unsigned int chip_ver)
 {
 	unsigned int flits;
-	int hdrlen = is_eth_imm(skb);
+	int hdrlen = is_eth_imm(skb, chip_ver);
 
 	/* If the skb is small enough, we can pump it out as a work request
 	 * with only immediate data.  In that case we just have to have the
@@ -801,13 +809,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
 	 * with an embedded TX Packet Write CPL message.
 	 */
 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
-	if (skb_shinfo(skb)->gso_size)
-		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
-			  sizeof(struct cpl_tx_pkt_lso_core) +
-			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
-	else
+	if (skb_shinfo(skb)->gso_size) {
+		if (skb->encapsulation && chip_ver > CHELSIO_T5)
+			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+				 sizeof(struct cpl_tx_tnl_lso);
+		else
+			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+				 sizeof(struct cpl_tx_pkt_lso_core);
+
+		hdrlen += sizeof(struct cpl_tx_pkt_core);
+		flits += (hdrlen / sizeof(__be64));
+	} else {
 		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	}
 	return flits;
 }
 
@@ -818,9 +833,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
  *	Returns the number of Tx descriptors needed for the given Ethernet
  *	packet, including the needed WR and CPL headers.
  */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+					 unsigned int chip_ver)
 {
-	return flits_to_desc(calc_tx_flits(skb));
+	return flits_to_desc(calc_tx_flits(skb, chip_ver));
 }
 
 /**
@@ -1148,6 +1164,105 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
 }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+	u8 l4_hdr = 0;
+	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+	struct port_info *pi = netdev_priv(skb->dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+	    skb->inner_protocol != htons(ETH_P_TEB))
+		return tnl_type;
+
+	switch (vlan_get_protocol(skb)) {
+	case htons(ETH_P_IP):
+		l4_hdr = ip_hdr(skb)->protocol;
+		break;
+	case htons(ETH_P_IPV6):
+		l4_hdr = ipv6_hdr(skb)->nexthdr;
+		break;
+	default:
+		return tnl_type;
+	}
+
+	switch (l4_hdr) {
+	case IPPROTO_UDP:
+		if (adapter->vxlan_port == udp_hdr(skb)->dest)
+			tnl_type = TX_TNL_TYPE_VXLAN;
+		else if (adapter->geneve_port == udp_hdr(skb)->dest)
+			tnl_type = TX_TNL_TYPE_GENEVE;
+		break;
+	default:
+		return tnl_type;
+	}
+
+	return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+				   struct cpl_tx_tnl_lso *tnl_lso,
+				   enum cpl_tx_tnl_lso_type tnl_type)
+{
+	u32 val;
+	int in_eth_xtra_len;
+	int l3hdr_len = skb_network_header_len(skb);
+	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+	const struct skb_shared_info *ssi = skb_shinfo(skb);
+	bool v6 = (ip_hdr(skb)->version == 6);
+
+	val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+	      CPL_TX_TNL_LSO_FIRST_F |
+	      CPL_TX_TNL_LSO_LAST_F |
+	      (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+	      CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+	      CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+	      (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+	      CPL_TX_TNL_LSO_IPLENSETOUT_F |
+	      (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+	tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+	tnl_lso->IpIdOffsetOut = 0;
+
+	/* Get the tunnel header length */
+	val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+	in_eth_xtra_len = skb_inner_network_header(skb) -
+			  skb_inner_mac_header(skb) - ETH_HLEN;
+
+	switch (tnl_type) {
+	case TX_TNL_TYPE_VXLAN:
+	case TX_TNL_TYPE_GENEVE:
+		tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+			htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+			CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+		break;
+	default:
+		tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+		break;
+	}
+
+	tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+		 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+		       CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+	tnl_lso->r1 = 0;
+
+	val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+	      CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+	      CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+	      CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+	tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+	tnl_lso->IpIdOffset = htons(0);
+
+	tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+	tnl_lso->TCPSeqOffset = htonl(0);
+	tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
 /**
  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
  *	@skb: the packet
@@ -1171,6 +1286,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 	bool immediate = false;
 	int len, max_pkt_len;
 	bool ptp_enabled = is_ptp_enabled(skb, dev);
+	unsigned int chip_ver;
+	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
 #ifdef CONFIG_CHELSIO_T4_FCOE
 	int err;
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1227,7 +1345,8 @@ out_free:	dev_kfree_skb_any(skb);
 	}
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
-	flits = calc_tx_flits(skb);
+	chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+	flits = calc_tx_flits(skb, chip_ver);
 	ndesc = flits_to_desc(flits);
 	credits = txq_avail(&q->q) - ndesc;
 
@@ -1241,9 +1360,12 @@ out_free:	dev_kfree_skb_any(skb);
 		return NETDEV_TX_BUSY;
 	}
 
-	if (is_eth_imm(skb))
+	if (is_eth_imm(skb, chip_ver))
 		immediate = true;
 
+	if (skb->encapsulation && chip_ver > CHELSIO_T5)
+		tnl_type = cxgb_encap_offload_supported(skb);
+
 	if (!immediate &&
 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
 		q->mapping_err++;
@@ -1269,33 +1391,58 @@ out_free:	dev_kfree_skb_any(skb);
 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
 		int l3hdr_len = skb_network_header_len(skb);
 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+		struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+		if (tnl_type)
+			len += sizeof(*tnl_lso);
+		else
+			len += sizeof(*lso);
 
-		len += sizeof(*lso);
 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
 				       FW_WR_IMMDLEN_V(len));
-		lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
-					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
-					LSO_IPV6_V(v6) |
-					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
-					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
-					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
-		lso->c.ipid_ofst = htons(0);
-		lso->c.mss = htons(ssi->gso_size);
-		lso->c.seqno_offset = htonl(0);
-		if (is_t4(adap->params.chip))
-			lso->c.len = htonl(skb->len);
-		else
-			lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
-		cpl = (void *)(lso + 1);
+		if (tnl_type) {
+			struct iphdr *iph = ip_hdr(skb);
 
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
-			cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
-		else
-			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+			t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+			cpl = (void *)(tnl_lso + 1);
+			/* Driver is expected to compute partial checksum that
+			 * does not include the IP Total Length.
+			 */
+			if (iph->version == 4) {
+				iph->check = 0;
+				iph->tot_len = 0;
+				iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+								 iph->ihl));
+			}
+			if (skb->ip_summed == CHECKSUM_PARTIAL)
+				cntrl = hwcsum(adap->params.chip, skb);
+		} else {
+			lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+					  LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+					  LSO_IPV6_V(v6) |
+					  LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+					  LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+					  LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+			lso->c.ipid_ofst = htons(0);
+			lso->c.mss = htons(ssi->gso_size);
+			lso->c.seqno_offset = htonl(0);
+			if (is_t4(adap->params.chip))
+				lso->c.len = htonl(skb->len);
+			else
+				lso->c.len =
+					htonl(LSO_T5_XFER_SIZE_V(skb->len));
+			cpl = (void *)(lso + 1);
 
-		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
-					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
-			 TXPKT_IPHDR_LEN_V(l3hdr_len);
+			if (CHELSIO_CHIP_VERSION(adap->params.chip)
+			    <= CHELSIO_T5)
+				cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+			else
+				cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+			cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+				 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+				 TXPKT_IPHDR_LEN_V(l3hdr_len);
+		}
 		q->tso++;
 		q->tx_cso += ssi->gso_segs;
 	} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 375ef86a84da..047609ef0515 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -195,9 +195,11 @@ static void t4_report_fw_error(struct adapter *adap)
 	u32 pcie_fw;
 
 	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
-	if (pcie_fw & PCIE_FW_ERR_F)
+	if (pcie_fw & PCIE_FW_ERR_F) {
 		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
 			reason[PCIE_FW_EVAL_G(pcie_fw)]);
+		adap->flags &= ~FW_OK;
+	}
 }
 
 /*
@@ -317,9 +319,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
 	 * wait [for a while] till we're at the front [or bail out with an
 	 * EBUSY] ...
 	 */
-	spin_lock(&adap->mbox_lock);
+	spin_lock_bh(&adap->mbox_lock);
 	list_add_tail(&entry.list, &adap->mlist.list);
-	spin_unlock(&adap->mbox_lock);
+	spin_unlock_bh(&adap->mbox_lock);
 
 	delay_idx = 0;
 	ms = delay[0];
@@ -332,9 +334,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
 		 */
 		pcie_fw = t4_read_reg(adap, PCIE_FW_A);
 		if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
-			spin_lock(&adap->mbox_lock);
+			spin_lock_bh(&adap->mbox_lock);
 			list_del(&entry.list);
-			spin_unlock(&adap->mbox_lock);
+			spin_unlock_bh(&adap->mbox_lock);
 			ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
 			t4_record_mbox(adap, cmd, size, access, ret);
 			return ret;
@@ -365,9 +367,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
 		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 	if (v != MBOX_OWNER_DRV) {
-		spin_lock(&adap->mbox_lock);
+		spin_lock_bh(&adap->mbox_lock);
 		list_del(&entry.list);
-		spin_unlock(&adap->mbox_lock);
+		spin_unlock_bh(&adap->mbox_lock);
 		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
 		t4_record_mbox(adap, cmd, size, access, ret);
 		return ret;
@@ -418,9 +420,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
 			execute = i + ms;
 			t4_record_mbox(adap, cmd_rpl,
 				       MBOX_LEN, access, execute);
-			spin_lock(&adap->mbox_lock);
+			spin_lock_bh(&adap->mbox_lock);
 			list_del(&entry.list);
-			spin_unlock(&adap->mbox_lock);
+			spin_unlock_bh(&adap->mbox_lock);
 			return -FW_CMD_RETVAL_G((int)res);
 		}
 	}
@@ -430,9 +432,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
 		*(const u8 *)cmd, mbox);
 	t4_report_fw_error(adap);
-	spin_lock(&adap->mbox_lock);
+	spin_lock_bh(&adap->mbox_lock);
 	list_del(&entry.list);
-	spin_unlock(&adap->mbox_lock);
+	spin_unlock_bh(&adap->mbox_lock);
 	t4_fatal_err(adap);
 	return ret;
 }
@@ -524,11 +526,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 	 * MEM_EDC1 = 1
 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
+	 * MEM_HMA  = 4
 	 */
 	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
-	if (mtype != MEM_MC1)
+	if (mtype == MEM_HMA) {
+		memoffset = 2 * (edc_size * 1024 * 1024);
+	} else if (mtype != MEM_MC1) {
 		memoffset = (mtype * (edc_size * 1024 * 1024));
-	else {
+	} else {
 		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
 						      MA_EXT_MEMORY0_BAR_A));
 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@ -4923,6 +4928,14 @@ void t4_intr_disable(struct adapter *adapter)
 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
 }
 
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		return RSS_NENTRIES;
+	else
+		return T6_RSS_NENTRIES;
+}
+
 /**
  *	t4_config_rss_range - configure a portion of the RSS mapping table
  *	@adapter: the adapter
@@ -5061,10 +5074,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
  */
 int t4_read_rss(struct adapter *adapter, u16 *map)
 {
+	int i, ret, nentries;
 	u32 val;
-	int i, ret;
 
-	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+	nentries = t4_chip_rss_size(adapter);
+	for (i = 0; i < nentries / 2; ++i) {
 		ret = rd_rss_row(adapter, i, &val);
 		if (ret)
 			return ret;
@@ -5076,7 +5090,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
 
 static unsigned int t4_use_ldst(struct adapter *adap)
 {
-	return (adap->flags & FW_OK) || !adap->use_bd;
+	return (adap->flags & FW_OK) && !adap->use_bd;
 }
 
 /**
@@ -6071,6 +6085,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
 		"CR2_QSFP",
 		"SFP28",
 		"KR_SFP28",
+		"KR_XLAUI"
 	};
 
 	if (port_type < ARRAY_SIZE(port_type_description))
@@ -6526,18 +6541,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  *      t4_sge_ctxt_flush - flush the SGE context cache
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
+ *      @ctx_type: Egress or Ingress
  *
  *      Issues a FW command through the given mailbox to flush the
  *      SGE context cache.
  */
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 {
 	int ret;
 	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+						 FW_LDST_ADDRSPC_SGE_EGRC :
+						 FW_LDST_ADDRSPC_SGE_INGC);
 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
 					FW_CMD_REQUEST_F | FW_CMD_READ_F |
 					ldst_addrspace);
@@ -7451,6 +7469,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 }
 
 /**
+ *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ *	@adap: the adapter
+ *	@viid: the VI id
+ *	@addr: the MAC address
+ *	@mask: the mask
+ *	@idx: index of the entry in mps tcam
+ *	@lookup_type: MAC address for inner (1) or outer (0) header
+ *	@port_id: the port index
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Removes the mac entry at the specified index using raw mac interface.
+ *
+ *	Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			 const u8 *addr, const u8 *mask, unsigned int idx,
+			 u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_raw *p = &c.u.raw;
+	u32 val;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_CMD_EXEC_V(0) |
+				   FW_VI_MAC_CMD_VIID_V(viid));
+	val = FW_CMD_LEN16_V(1) |
+	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+					  FW_CMD_LEN16_V(val));
+
+	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+				     FW_VI_MAC_ID_BASED_FREE);
+
+	/* Lookup Type. Outer header: 0, Inner header: 1 */
+	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+				   DATAPORTNUM_V(port_id));
+	/* Lookup mask and port mask */
+	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+				    DATAPORTNUM_V(DATAPORTNUM_M));
+
+	/* Copy the address and the mask */
+	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ *	@adap: the adapter
+ *	@viid: the VI id
+ *	@mac: the MAC address
+ *	@mask: the mask
+ *	@idx: index at which to add this entry
+ *	@port_id: the port index
+ *	@lookup_type: MAC address for inner (1) or outer (0) header
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Adds the mac entry at the specified index using raw mac interface.
+ *
+ *	Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			  const u8 *addr, const u8 *mask, unsigned int idx,
+			  u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+	int ret = 0;
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_raw *p = &c.u.raw;
+	u32 val;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_MAC_CMD_VIID_V(viid));
+	val = FW_CMD_LEN16_V(1) |
+	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+	c.freemacs_to_len16 = cpu_to_be32(val);
+
+	/* Specify that this is an inner mac address */
+	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+	/* Lookup Type. Outer header: 0, Inner header: 1 */
+	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+				   DATAPORTNUM_V(port_id));
+	/* Lookup mask and port mask */
+	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+				    DATAPORTNUM_V(DATAPORTNUM_M));
+
+	/* Copy the address and the mask */
+	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+	if (ret == 0) {
+		ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+		if (ret != idx)
+			ret = -ENOMEM;
+	}
+
+	return ret;
+}
+
+/**
  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
@@ -8491,22 +8615,6 @@ found:
 	return 0;
 }
 
-static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
-{
-	u16 val;
-	u32 pcie_cap;
-
-	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
-	if (pcie_cap) {
-		pci_read_config_word(adapter->pdev,
-				     pcie_cap + PCI_EXP_DEVCTL2, &val);
-		val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
-		val |= range;
-		pci_write_config_word(adapter->pdev,
-				      pcie_cap + PCI_EXP_DEVCTL2, val);
-	}
-}
-
 /**
  *	t4_prep_adapter - prepare SW and HW for operation
  *	@adapter: the adapter
@@ -8592,8 +8700,9 @@ int t4_prep_adapter(struct adapter *adapter)
 	adapter->params.portvec = 1;
 	adapter->params.vpd.cclk = 50000;
 
-	/* Set pci completion timeout value to 4 seconds. */
-	set_pcie_completion_timeout(adapter, 0xd);
+	/* Set PCIe completion timeout to 4 seconds. */
+	pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
 	return 0;
 }
 
@@ -9736,3 +9845,91 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
 	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
 			       NULL, 1);
 }
+
+/**
+ *	t4_i2c_rd - read I2C data from adapter
+ *	@adap: the adapter
+ *	@port: Port number if per-port device; <0 if not
+ *	@devid: per-port device ID or absolute device ID
+ *	@offset: byte offset into device I2C space
+ *	@len: byte length of I2C space data
+ *	@buf: buffer in which to return I2C data
+ *
+ *	Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+	      unsigned int devid, unsigned int offset,
+	      unsigned int len, u8 *buf)
+{
+	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+	int ret = 0;
+
+	if (len > I2C_PAGE_SIZE)
+		return -EINVAL;
+
+	/* Dont allow reads that spans multiple pages */
+	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+		return -EINVAL;
+
+	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+	ldst_cmd.op_to_addrspace =
+		cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+			    FW_CMD_REQUEST_F |
+			    FW_CMD_READ_F |
+			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+	ldst_cmd.u.i2c.did = devid;
+
+	while (len > 0) {
+		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+
+		ldst_cmd.u.i2c.boffset = offset;
+		ldst_cmd.u.i2c.blen = i2c_len;
+
+		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+				 &ldst_rpl);
+		if (ret)
+			break;
+
+		memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+		offset += i2c_len;
+		buf += i2c_len;
+		len -= i2c_len;
+	}
+
+	return ret;
+}
+
+/**
+ *      t4_set_vlan_acl - Set a VLAN id for the specified VF
+ *      @adapter: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @vf: one of the VFs instantiated by the specified PF
+ *      @vlan: The vlanid to be set
+ */
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+		    u16 vlan)
+{
+	struct fw_acl_vlan_cmd vlan_cmd;
+	unsigned int enable;
+
+	enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
+	memset(&vlan_cmd, 0, sizeof(vlan_cmd));
+	vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+					 FW_CMD_REQUEST_F |
+					 FW_CMD_WRITE_F |
+					 FW_CMD_EXEC_F |
+					 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
+					 FW_ACL_VLAN_CMD_VFN_V(vf));
+	vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
+	/* Drop all packets that donot match vlan id */
+	vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
+	if (enable != 0) {
+		vlan_cmd.nvlan = 1;
+		vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
+	}
+
+	return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index a964ed184356..361d5032c288 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -38,21 +38,22 @@
 #include <linux/types.h>
 
 enum {
-	NCHAN          = 4,     /* # of HW channels */
-	MAX_MTU        = 9600,  /* max MAC MTU, excluding header + FCS */
-	EEPROMSIZE     = 17408, /* Serial EEPROM physical size */
-	EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
-	EEPROMPFSIZE   = 1024,  /* EEPROM writable area size for PFn, n>0 */
-	RSS_NENTRIES   = 2048,  /* # of entries in RSS mapping table */
-	TCB_SIZE       = 128,   /* TCB size */
-	NMTUS          = 16,    /* size of MTU table */
-	NCCTRL_WIN     = 32,    /* # of congestion control windows */
-	NTX_SCHED      = 8,     /* # of HW Tx scheduling queues */
-	PM_NSTATS      = 5,     /* # of PM stats */
-	T6_PM_NSTATS   = 7,     /* # of PM stats in T6 */
-	MBOX_LEN       = 64,    /* mailbox size in bytes */
-	TRACE_LEN      = 112,   /* length of trace data and mask */
-	FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
+	NCHAN           = 4,    /* # of HW channels */
+	MAX_MTU         = 9600, /* max MAC MTU, excluding header + FCS */
+	EEPROMSIZE      = 17408,/* Serial EEPROM physical size */
+	EEPROMVSIZE     = 32768,/* Serial EEPROM virtual address space size */
+	EEPROMPFSIZE    = 1024, /* EEPROM writable area size for PFn, n>0 */
+	RSS_NENTRIES    = 2048, /* # of entries in RSS mapping table */
+	T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+	TCB_SIZE        = 128,  /* TCB size */
+	NMTUS           = 16,   /* size of MTU table */
+	NCCTRL_WIN      = 32,   /* # of congestion control windows */
+	NTX_SCHED       = 8,    /* # of HW Tx scheduling queues */
+	PM_NSTATS       = 5,    /* # of PM stats */
+	T6_PM_NSTATS    = 7,    /* # of PM stats in T6 */
+	MBOX_LEN        = 64,   /* mailbox size in bytes */
+	TRACE_LEN       = 112,  /* length of trace data and mask */
+	FILTER_OPT_LEN  = 36,   /* filter tuple width for optional components */
 };
 
 enum {
@@ -70,7 +71,9 @@ enum {
 
 /* SGE context types */
 enum ctxt_type {
-	CTXT_FLM = 2,
+	CTXT_EGRESS,
+	CTXT_INGRESS,
+	CTXT_FLM,
 	CTXT_CNM,
 };
 
@@ -284,4 +287,14 @@ enum {
 #define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
 #define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
 
+#define I2C_DEV_ADDR_A0		0xa0
+#define I2C_DEV_ADDR_A2		0xa2
+#define I2C_PAGE_SIZE		0x100
+#define SFP_DIAG_TYPE_ADDR	0x5c
+#define SFP_DIAG_TYPE_LEN	0x1
+#define SFF_8472_COMP_ADDR	0x5e
+#define SFF_8472_COMP_LEN	0x1
+#define SFF_REV_ADDR		0x1
+#define SFF_REV_LEN		0x1
+
 #endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
 
 	CPL_FW6_MSG           = 0xE0,
 	CPL_FW6_PLD           = 0xE1,
+	CPL_TX_TNL_LSO        = 0xEC,
 	CPL_TX_PKT_LSO        = 0xED,
 	CPL_TX_PKT_XT         = 0xEE,
 
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
 #define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
 #define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
 
+enum cpl_tx_tnl_lso_type {
+	TX_TNL_TYPE_OPAQUE,
+	TX_TNL_TYPE_NVGRE,
+	TX_TNL_TYPE_VXLAN,
+	TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+	__be32 op_to_IpIdSplitOut;
+	__be16 IpIdOffsetOut;
+	__be16 UdpLenSetOut_to_TnlHdrLen;
+	__be64 r1;
+	__be32 Flow_to_TcpHdrLen;
+	__be16 IpIdOffset;
+	__be16 IpIdSplit_to_Mss;
+	__be32 TCPSeqOffset;
+	__be32 EthLenOffset_Size;
+	/* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S		24
+#define CPL_TX_TNL_LSO_OPCODE_M		0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x)      ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x)      \
+	(((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S		23
+#define CPL_TX_TNL_LSO_FIRST_M		0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x)	((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F		CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S		22
+#define CPL_TX_TNL_LSO_LAST_M		0x1
+#define CPL_TX_TNL_LSO_LAST_V(x)	((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F		CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S	21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M	0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+	 CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S	20
+#define CPL_TX_TNL_LSO_IPV6OUT_M	0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x)	((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F        CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S	16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M	0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S	4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M	0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x)    \
+	(((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S	0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M	0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x)   \
+	(((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S            0
+#define CPL_TX_TNL_LSO_MSS_M            0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x)         ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x)         \
+	(((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S		0
+#define CPL_TX_TNL_LSO_SIZE_M		0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x)	((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S   16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M   0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S    4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M    0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S    3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M    0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F    CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S	2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M	0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F	CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S	1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M	0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x)  ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x)  \
+	(((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F     CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S   14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M   0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+	 CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F   CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S   15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M   0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+	 CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F   CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S	12
+#define CPL_TX_TNL_LSO_TNLTYPE_M	0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x)	((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN	16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN	0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x)	((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x)	\
+	(((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S      0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M      0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x)   \
+	(((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S		20
+#define CPL_TX_TNL_LSO_IPV6_M		0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x)	((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F		CPL_TX_TNL_LSO_IPV6_V(1U)
+
 #define ULP_TX_SC_MORE_S 23
 #define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
 #define ULP_TX_SC_MORE_F  ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60cf9e02de5d..51b18035d691 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
 	CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
 
 	/* T6 adapters:
 	 */
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
 	CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
 	CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a7cfece72828..a6df73398d17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -45,6 +45,9 @@
 #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
 #define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
 
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
 #define MYPORT_BASE 0x1c000
 #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
 
@@ -961,6 +964,10 @@
 
 #define MA_EXT_MEMORY1_BAR_A 0x7808
 
+#define HMA_MUX_S    5
+#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
+#define HMA_MUX_F    HMA_MUX_V(1U)
+
 #define EXT_MEM1_BASE_S    16
 #define EXT_MEM1_BASE_M    0xfffU
 #define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
@@ -2504,6 +2511,28 @@
 #define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
 #define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
 
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S    16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F    VXLAN_EN_V(1U)
+
+#define VXLAN_S    0
+#define VXLAN_M    0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
+#define MPS_RX_GENEVE_TYPE_A 0x11238
+
+#define GENEVE_EN_S    16
+#define GENEVE_EN_V(x) ((x) << GENEVE_EN_S)
+#define GENEVE_EN_F    GENEVE_EN_V(1U)
+
+#define GENEVE_S    0
+#define GENEVE_M    0xffffU
+#define GENEVE_V(x) ((x) << GENEVE_S)
+#define GENEVE_G(x) (((x) >> GENEVE_S) & GENEVE_M)
+
 #define MPS_CLS_TCAM_Y_L_A 0xf000
 #define MPS_CLS_TCAM_DATA0_A 0xf000
 #define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2530,8 +2559,14 @@
 
 #define DATAPORTNUM_S    12
 #define DATAPORTNUM_M    0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
 #define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
 
+#define DATALKPTYPE_S    10
+#define DATALKPTYPE_M    0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
 #define DATADIPHIT_S    8
 #define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
 #define DATADIPHIT_F    DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index be3658301832..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -835,6 +835,7 @@ enum fw_ldst_addrspc {
 	FW_LDST_ADDRSPC_MPS       = 0x0020,
 	FW_LDST_ADDRSPC_FUNC      = 0x0028,
 	FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+	FW_LDST_ADDRSPC_I2C       = 0x0038,
 };
 
 enum fw_ldst_mps_fid {
@@ -2066,6 +2067,7 @@ struct fw_vi_cmd {
 #define FW_VI_MAC_ADD_MAC		0x3FF
 #define FW_VI_MAC_ADD_PERSIST_MAC	0x3FE
 #define FW_VI_MAC_MAC_BASED_FREE	0x3FD
+#define FW_VI_MAC_ID_BASED_FREE		0x3FC
 #define FW_CLS_TCAM_NUM_ENTRIES		336
 
 enum fw_vi_mac_smac {
@@ -2082,6 +2084,13 @@ enum fw_vi_mac_result {
 	FW_VI_MAC_R_F_ACL_CHECK
 };
 
+enum fw_vi_mac_entry_types {
+	FW_VI_MAC_TYPE_EXACTMAC,
+	FW_VI_MAC_TYPE_HASHVEC,
+	FW_VI_MAC_TYPE_RAW,
+	FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
 struct fw_vi_mac_cmd {
 	__be32 op_to_viid;
 	__be32 freemacs_to_len16;
@@ -2093,6 +2102,13 @@ struct fw_vi_mac_cmd {
 		struct fw_vi_mac_hash {
 			__be64 hashvec;
 		} hash;
+		struct fw_vi_mac_raw {
+			__be32 raw_idx_pkd;
+			__be32 data0_pkd;
+			__be32 data1[2];
+			__be64 data0m_pkd;
+			__be32 data1m[2];
+		} raw;
 	} u;
 };
 
@@ -2102,6 +2118,12 @@ struct fw_vi_mac_cmd {
 #define FW_VI_MAC_CMD_FREEMACS_S	31
 #define FW_VI_MAC_CMD_FREEMACS_V(x)	((x) << FW_VI_MAC_CMD_FREEMACS_S)
 
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S      23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M      0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x)   ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x)	\
+	(((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
 #define FW_VI_MAC_CMD_HASHVECEN_S	23
 #define FW_VI_MAC_CMD_HASHVECEN_V(x)	((x) << FW_VI_MAC_CMD_HASHVECEN_S)
 #define FW_VI_MAC_CMD_HASHVECEN_F	FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2128,6 +2150,12 @@ struct fw_vi_mac_cmd {
 #define FW_VI_MAC_CMD_IDX_G(x)	\
 	(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
 
+#define FW_VI_MAC_CMD_RAW_IDX_S         16
+#define FW_VI_MAC_CMD_RAW_IDX_M         0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x)      ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x)      \
+	(((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
 #define FW_RXMODE_MTU_NO_CHG	65535
 
 struct fw_vi_rxmode_cmd {
@@ -2332,14 +2360,22 @@ struct fw_acl_vlan_cmd {
 #define FW_ACL_VLAN_CMD_VFN_S		0
 #define FW_ACL_VLAN_CMD_VFN_V(x)	((x) << FW_ACL_VLAN_CMD_VFN_S)
 
-#define FW_ACL_VLAN_CMD_EN_S	31
-#define FW_ACL_VLAN_CMD_EN_V(x)	((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_S		31
+#define FW_ACL_VLAN_CMD_EN_M		0x1
+#define FW_ACL_VLAN_CMD_EN_V(x)		((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_G(x)         \
+	(((x) >> S_FW_ACL_VLAN_CMD_EN_S) & FW_ACL_VLAN_CMD_EN_M)
+#define FW_ACL_VLAN_CMD_EN_F            FW_ACL_VLAN_CMD_EN_V(1U)
 
 #define FW_ACL_VLAN_CMD_DROPNOVLAN_S	7
 #define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x)	((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
 
-#define FW_ACL_VLAN_CMD_FM_S	6
-#define FW_ACL_VLAN_CMD_FM_V(x)	((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_S		6
+#define FW_ACL_VLAN_CMD_FM_M		0x1
+#define FW_ACL_VLAN_CMD_FM_V(x)         ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_G(x)         \
+	(((x) >> FW_ACL_VLAN_CMD_FM_S) & FW_ACL_VLAN_CMD_FM_M)
+#define FW_ACL_VLAN_CMD_FM_F            FW_ACL_VLAN_CMD_FM_V(1U)
 
 /* old 16-bit port capabilities bitmap (fw_port_cap16_t) */
 enum fw_port_cap {
@@ -2835,6 +2871,7 @@ enum fw_port_type {
 	FW_PORT_TYPE_CR2_QSFP,
 	FW_PORT_TYPE_SFP28,
 	FW_PORT_TYPE_KR_SFP28,
+	FW_PORT_TYPE_KR_XLAUI,
 
 	FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
 };
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 08c6ddb84a04..5883f09e3804 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -92,6 +92,7 @@ struct sge_rspq;
  */
 struct port_info {
 	struct adapter *adapter;	/* our adapter */
+	u32 vlan_id;			/* vlan id for VST */
 	u16 viid;			/* virtual interface ID */
 	s16 xact_addr_filt;		/* index of our MAC address filter */
 	u16 rss_size;			/* size of VI's RSS table slice */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b48361cfdc78..b7e79e64d2ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -791,6 +791,8 @@ static int cxgb4vf_open(struct net_device *dev)
 	if (err)
 		goto err_unwind;
 
+	pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
+
 	netif_tx_start_all_queues(dev);
 	set_bit(pi->port_id, &adapter->open_device_map);
 	return 0;
@@ -1229,7 +1231,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
 		else
 			return PORT_OTHER;
 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
-		   port_type == FW_PORT_TYPE_KR_SFP28) {
+		   port_type == FW_PORT_TYPE_KR_SFP28 ||
+		   port_type == FW_PORT_TYPE_KR_XLAUI) {
 		return PORT_NONE;
 	}
 
@@ -1323,6 +1326,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 		SET_LMM(25000baseKR_Full);
 		break;
 
+	case FW_PORT_TYPE_KR_XLAUI:
+		SET_LMM(Backplane);
+		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+		break;
+
 	case FW_PORT_TYPE_CR2_QSFP:
 		SET_LMM(FIBRE);
 		SET_LMM(50000baseSR2_Full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e673c656..dfce5df7538e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1202,6 +1202,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 	BUG_ON(qidx >= pi->nqsets);
 	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
 
+	if (pi->vlan_id && !skb_vlan_tag_present(skb))
+		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
+				       pi->vlan_id);
+
 	/*
 	 * Take this opportunity to reclaim any TX Descriptors whose DMA
 	 * transfers have completed.
@@ -1570,6 +1574,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
 {
 	struct adapter *adapter = rxq->rspq.adapter;
 	struct sge *s = &adapter->sge;
+	struct port_info *pi;
 	int ret;
 	struct sk_buff *skb;
 
@@ -1586,8 +1591,9 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
 	skb->truesize += skb->data_len;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	skb_record_rx_queue(skb, rxq->rspq.idx);
+	pi = netdev_priv(skb->dev);
 
-	if (pkt->vlan_ex) {
+	if (pkt->vlan_ex && !pi->vlan_id) {
 		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
 					be16_to_cpu(pkt->vlan));
 		rxq->stats.vlan_ex++;
@@ -1620,6 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
 	struct adapter *adapter = rspq->adapter;
 	struct sge *s = &adapter->sge;
+	struct port_info *pi;
 
 	/*
 	 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1644,6 +1651,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 	__skb_pull(skb, s->pktshift);
 	skb->protocol = eth_type_trans(skb, rspq->netdev);
 	skb_record_rx_queue(skb, rspq->idx);
+	pi = netdev_priv(skb->dev);
 	rxq->stats.pkts++;
 
 	if (csum_ok && !pkt->err_vec &&
@@ -1660,9 +1668,10 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 	} else
 		skb_checksum_none_assert(skb);
 
-	if (pkt->vlan_ex) {
+	if (pkt->vlan_ex && !pi->vlan_id) {
 		rxq->stats.vlan_ex++;
-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       be16_to_cpu(pkt->vlan));
 	}
 
 	netif_receive_skb(skb);
@@ -2619,8 +2628,8 @@ void t4vf_sge_stop(struct adapter *adapter)
 int t4vf_sge_init(struct adapter *adapter)
 {
 	struct sge_params *sge_params = &adapter->params.sge;
-	u32 fl0 = sge_params->sge_fl_buffer_size[0];
-	u32 fl1 = sge_params->sge_fl_buffer_size[1];
+	u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+	u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
 	struct sge *s = &adapter->sge;
 
 	/*
@@ -2628,9 +2637,20 @@ int t4vf_sge_init(struct adapter *adapter)
 	 * the Physical Function Driver.  Ideally we should be able to deal
 	 * with _any_ configuration.  Practice is different ...
 	 */
-	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+	/* We only bother using the Large Page logic if the Large Page Buffer
+	 * is larger than our Page Size Buffer.
+	 */
+	if (fl_large_pg <= fl_small_pg)
+		fl_large_pg = 0;
+
+	/* The Page Size Buffer must be exactly equal to our Page Size and the
+	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
+	 */
+	if (fl_small_pg != PAGE_SIZE ||
+	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
 		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
-			fl0, fl1);
+			fl_small_pg, fl_large_pg);
 		return -EINVAL;
 	}
 	if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2662,8 @@ int t4vf_sge_init(struct adapter *adapter)
 	/*
 	 * Now translate the adapter parameters into our internal forms.
 	 */
-	if (fl1)
-		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+	if (fl_large_pg)
+		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
 	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
 			? 128 : 64);
 	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9cf9c56b0f73..712e8f0c71b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -413,5 +413,6 @@ int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
 int t4vf_prep_adapter(struct adapter *);
 int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
 			unsigned int *naddr, u8 *addr);
+int t4vf_get_vf_vlan_acl(struct adapter *adapter);
 
 #endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 67aec59a14e6..798695bf8678 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -2147,3 +2147,31 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
 
 	return ret;
 }
+
+/**
+ *	t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to
+ *                             the VI of this VF.
+ *	@adapter: The adapter
+ *
+ *	Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
+ *	is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_vlan_acl(struct adapter *adapter)
+{
+	struct fw_acl_vlan_cmd cmd;
+	int vlan = 0;
+	int ret = 0;
+
+	cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+			      FW_CMD_REQUEST_F | FW_CMD_READ_F);
+
+	/* Note: Do not enable the ACL */
+	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+
+	if (!ret)
+		vlan = be16_to_cpu(cmd.vlanid[0]);
+
+	return vlan;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 6a9527004cb1..9b218f0e5a4c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
 #define ENIC_CQ_MAX		(ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX		(ENIC_CQ_MAX + 2)
 
+#define ENIC_WQ_NAPI_BUDGET	256
+
 #define ENIC_AIC_LARGE_PKT_DIFF	3
 
 struct enic_msix_entry {
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 462d0ce51240..efb9333c7cf8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -18,6 +18,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
 
 #include "enic_res.h"
 #include "enic.h"
@@ -578,6 +579,16 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
 	return __enic_set_rsskey(enic);
 }
 
+static int enic_get_ts_info(struct net_device *netdev,
+			    struct ethtool_ts_info *info)
+{
+	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE;
+
+	return 0;
+}
+
 static const struct ethtool_ops enic_ethtool_ops = {
 	.get_drvinfo = enic_get_drvinfo,
 	.get_msglevel = enic_get_msglevel,
@@ -597,6 +608,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
 	.get_rxfh = enic_get_rxfh,
 	.set_rxfh = enic_set_rxfh,
 	.get_link_ksettings = enic_get_ksettings,
+	.get_ts_info = enic_get_ts_info,
 };
 
 void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e130fb757e7b..f202ba72a811 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -856,6 +856,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 
 	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
 		netif_tx_stop_queue(txq);
+	skb_tx_timestamp(skb);
 	if (!skb->xmit_more || netif_xmit_stopped(txq))
 		vnic_wq_doorbell(wq);
 
@@ -1499,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
 	unsigned int cq_wq = enic_cq_wq(enic, 0);
 	unsigned int intr = enic_legacy_io_intr();
 	unsigned int rq_work_to_do = budget;
-	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
 	unsigned int  work_done, rq_work_done = 0, wq_work_done;
 	int err;
 
@@ -1597,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
 	struct vnic_wq *wq = &enic->wq[wq_index];
 	unsigned int cq;
 	unsigned int intr;
-	unsigned int wq_work_to_do = -1; /* clean all desc possible */
+	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
 	unsigned int wq_work_done;
 	unsigned int wq_irq;
 
diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig
new file mode 100644
index 000000000000..89bc4579724d
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Cortina ethernet devices
+
+config NET_VENDOR_CORTINA
+	bool "Cortina Gemini devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y
+	  and read the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+if NET_VENDOR_CORTINA
+
+config GEMINI_ETHERNET
+	tristate "Gemini Gigabit Ethernet support"
+	depends on OF
+	depends on HAS_IOMEM
+	select PHYLIB
+	select CRC32
+	---help---
+	  This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
+
+endif # NET_VENDOR_CORTINA
diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile
new file mode 100644
index 000000000000..4e86d398a89c
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Cortina Gemini network device drivers.
+
+obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
new file mode 100644
index 000000000000..5eb999af2c40
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -0,0 +1,2593 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Ethernet device driver for Cortina Systems Gemini SoC
+ * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
+ * Net Engine and Gigabit Ethernet MAC (GMAC)
+ * This hardware contains a TCP Offload Engine (TOE) but currently the
+ * driver does not make use of it.
+ *
+ * Authors:
+ * Linus Walleij <linus.walleij@linaro.org>
+ * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
+ * Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Paulius Zaleckas <paulius.zaleckas@gmail.com>
+ * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
+ * Gary Chen & Ch Hsu Storlink Semiconductor
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include "gemini.h"
+
+#define DRV_NAME		"gmac-gemini"
+#define DRV_VERSION		"1.0"
+
+#define HSIZE_8			0x00
+#define HSIZE_16		0x01
+#define HSIZE_32		0x02
+
+#define HBURST_SINGLE		0x00
+#define HBURST_INCR		0x01
+#define HBURST_INCR4		0x02
+#define HBURST_INCR8		0x03
+
+#define HPROT_DATA_CACHE	BIT(0)
+#define HPROT_PRIVILIGED	BIT(1)
+#define HPROT_BUFFERABLE	BIT(2)
+#define HPROT_CACHABLE		BIT(3)
+
+#define DEFAULT_RX_COALESCE_NSECS	0
+#define DEFAULT_GMAC_RXQ_ORDER		9
+#define DEFAULT_GMAC_TXQ_ORDER		8
+#define DEFAULT_RX_BUF_ORDER		11
+#define DEFAULT_NAPI_WEIGHT		64
+#define TX_MAX_FRAGS			16
+#define TX_QUEUE_NUM			1	/* max: 6 */
+#define RX_MAX_ALLOC_ORDER		2
+
+#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
+		      GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
+#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
+			      GMAC0_SWTQ00_FIN_INT_BIT)
+#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
+
+#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
+		NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+/**
+ * struct gmac_queue_page - page buffer per-page info
+ */
+struct gmac_queue_page {
+	struct page *page;
+	dma_addr_t mapping;
+};
+
+struct gmac_txq {
+	struct gmac_txdesc *ring;
+	struct sk_buff	**skb;
+	unsigned int	cptr;
+	unsigned int	noirq_packets;
+};
+
+struct gemini_ethernet;
+
+struct gemini_ethernet_port {
+	u8 id; /* 0 or 1 */
+
+	struct gemini_ethernet *geth;
+	struct net_device *netdev;
+	struct device *dev;
+	void __iomem *dma_base;
+	void __iomem *gmac_base;
+	struct clk *pclk;
+	struct reset_control *reset;
+	int irq;
+	__le32 mac_addr[3];
+
+	void __iomem		*rxq_rwptr;
+	struct gmac_rxdesc	*rxq_ring;
+	unsigned int		rxq_order;
+
+	struct napi_struct	napi;
+	struct hrtimer		rx_coalesce_timer;
+	unsigned int		rx_coalesce_nsecs;
+	unsigned int		freeq_refill;
+	struct gmac_txq		txq[TX_QUEUE_NUM];
+	unsigned int		txq_order;
+	unsigned int		irq_every_tx_packets;
+
+	dma_addr_t		rxq_dma_base;
+	dma_addr_t		txq_dma_base;
+
+	unsigned int		msg_enable;
+	spinlock_t		config_lock; /* Locks config register */
+
+	struct u64_stats_sync	tx_stats_syncp;
+	struct u64_stats_sync	rx_stats_syncp;
+	struct u64_stats_sync	ir_stats_syncp;
+
+	struct rtnl_link_stats64 stats;
+	u64			hw_stats[RX_STATS_NUM];
+	u64			rx_stats[RX_STATUS_NUM];
+	u64			rx_csum_stats[RX_CHKSUM_NUM];
+	u64			rx_napi_exits;
+	u64			tx_frag_stats[TX_MAX_FRAGS];
+	u64			tx_frags_linearized;
+	u64			tx_hw_csummed;
+};
+
+struct gemini_ethernet {
+	struct device *dev;
+	void __iomem *base;
+	struct gemini_ethernet_port *port0;
+	struct gemini_ethernet_port *port1;
+
+	spinlock_t	irq_lock; /* Locks IRQ-related registers */
+	unsigned int	freeq_order;
+	unsigned int	freeq_frag_order;
+	struct gmac_rxdesc *freeq_ring;
+	dma_addr_t	freeq_dma_base;
+	struct gmac_queue_page	*freeq_pages;
+	unsigned int	num_freeq_pages;
+	spinlock_t	freeq_lock; /* Locks queue from reentrance */
+};
+
+#define GMAC_STATS_NUM	( \
+	RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
+	TX_MAX_FRAGS + 2)
+
+static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
+	"GMAC_IN_DISCARDS",
+	"GMAC_IN_ERRORS",
+	"GMAC_IN_MCAST",
+	"GMAC_IN_BCAST",
+	"GMAC_IN_MAC1",
+	"GMAC_IN_MAC2",
+	"RX_STATUS_GOOD_FRAME",
+	"RX_STATUS_TOO_LONG_GOOD_CRC",
+	"RX_STATUS_RUNT_FRAME",
+	"RX_STATUS_SFD_NOT_FOUND",
+	"RX_STATUS_CRC_ERROR",
+	"RX_STATUS_TOO_LONG_BAD_CRC",
+	"RX_STATUS_ALIGNMENT_ERROR",
+	"RX_STATUS_TOO_LONG_BAD_ALIGN",
+	"RX_STATUS_RX_ERR",
+	"RX_STATUS_DA_FILTERED",
+	"RX_STATUS_BUFFER_FULL",
+	"RX_STATUS_11",
+	"RX_STATUS_12",
+	"RX_STATUS_13",
+	"RX_STATUS_14",
+	"RX_STATUS_15",
+	"RX_CHKSUM_IP_UDP_TCP_OK",
+	"RX_CHKSUM_IP_OK_ONLY",
+	"RX_CHKSUM_NONE",
+	"RX_CHKSUM_3",
+	"RX_CHKSUM_IP_ERR_UNKNOWN",
+	"RX_CHKSUM_IP_ERR",
+	"RX_CHKSUM_TCP_UDP_ERR",
+	"RX_CHKSUM_7",
+	"RX_NAPI_EXITS",
+	"TX_FRAGS[1]",
+	"TX_FRAGS[2]",
+	"TX_FRAGS[3]",
+	"TX_FRAGS[4]",
+	"TX_FRAGS[5]",
+	"TX_FRAGS[6]",
+	"TX_FRAGS[7]",
+	"TX_FRAGS[8]",
+	"TX_FRAGS[9]",
+	"TX_FRAGS[10]",
+	"TX_FRAGS[11]",
+	"TX_FRAGS[12]",
+	"TX_FRAGS[13]",
+	"TX_FRAGS[14]",
+	"TX_FRAGS[15]",
+	"TX_FRAGS[16+]",
+	"TX_FRAGS_LINEARIZED",
+	"TX_HW_CSUMMED",
+};
+
+static void gmac_dump_dma_state(struct net_device *netdev);
+
+static void gmac_update_config0_reg(struct net_device *netdev,
+				    u32 val, u32 vmask)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&port->config_lock, flags);
+
+	reg = readl(port->gmac_base + GMAC_CONFIG0);
+	reg = (reg & ~vmask) | val;
+	writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+	spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_enable_tx_rx(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&port->config_lock, flags);
+
+	reg = readl(port->gmac_base + GMAC_CONFIG0);
+	reg &= ~CONFIG0_TX_RX_DISABLE;
+	writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+	spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_disable_tx_rx(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&port->config_lock, flags);
+
+	val = readl(port->gmac_base + GMAC_CONFIG0);
+	val |= CONFIG0_TX_RX_DISABLE;
+	writel(val, port->gmac_base + GMAC_CONFIG0);
+
+	spin_unlock_irqrestore(&port->config_lock, flags);
+
+	mdelay(10);	/* let GMAC consume packet */
+}
+
+static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&port->config_lock, flags);
+
+	val = readl(port->gmac_base + GMAC_CONFIG0);
+	val &= ~CONFIG0_FLOW_CTL;
+	if (tx)
+		val |= CONFIG0_FLOW_TX;
+	if (rx)
+		val |= CONFIG0_FLOW_RX;
+	writel(val, port->gmac_base + GMAC_CONFIG0);
+
+	spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_speed_set(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct phy_device *phydev = netdev->phydev;
+	union gmac_status status, old_status;
+	int pause_tx = 0;
+	int pause_rx = 0;
+
+	status.bits32 = readl(port->gmac_base + GMAC_STATUS);
+	old_status.bits32 = status.bits32;
+	status.bits.link = phydev->link;
+	status.bits.duplex = phydev->duplex;
+
+	switch (phydev->speed) {
+	case 1000:
+		status.bits.speed = GMAC_SPEED_1000;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+			status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
+		netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+		break;
+	case 100:
+		status.bits.speed = GMAC_SPEED_100;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+			status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+		netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+		break;
+	case 10:
+		status.bits.speed = GMAC_SPEED_10;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+			status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+		netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+		break;
+	default:
+		netdev_warn(netdev, "Not supported PHY speed (%d)\n",
+			    phydev->speed);
+	}
+
+	if (phydev->duplex == DUPLEX_FULL) {
+		u16 lcladv = phy_read(phydev, MII_ADVERTISE);
+		u16 rmtadv = phy_read(phydev, MII_LPA);
+		u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+		if (cap & FLOW_CTRL_RX)
+			pause_rx = 1;
+		if (cap & FLOW_CTRL_TX)
+			pause_tx = 1;
+	}
+
+	gmac_set_flow_control(netdev, pause_tx, pause_rx);
+
+	if (old_status.bits32 == status.bits32)
+		return;
+
+	if (netif_msg_link(port)) {
+		phy_print_status(phydev);
+		netdev_info(netdev, "link flow control: %s\n",
+			    phydev->pause
+			    ? (phydev->asym_pause ? "tx" : "both")
+			    : (phydev->asym_pause ? "rx" : "none")
+		);
+	}
+
+	gmac_disable_tx_rx(netdev);
+	writel(status.bits32, port->gmac_base + GMAC_STATUS);
+	gmac_enable_tx_rx(netdev);
+}
+
+static int gmac_setup_phy(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	union gmac_status status = { .bits32 = 0 };
+	struct device *dev = port->dev;
+	struct phy_device *phy;
+
+	phy = of_phy_get_and_connect(netdev,
+				     dev->of_node,
+				     gmac_speed_set);
+	if (!phy)
+		return -ENODEV;
+	netdev->phydev = phy;
+
+	netdev_info(netdev, "connected to PHY \"%s\"\n",
+		    phydev_name(phy));
+	phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+			   (unsigned long)phy->phy_id,
+			   phy_modes(phy->interface));
+
+	phy->supported &= PHY_GBIT_FEATURES;
+	phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
+	phy->advertising = phy->supported;
+
+	/* set PHY interface type */
+	switch (phy->interface) {
+	case PHY_INTERFACE_MODE_MII:
+		netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+		status.bits.mii_rmii = GMAC_PHY_MII;
+		netdev_info(netdev, "connect to MII\n");
+		break;
+	case PHY_INTERFACE_MODE_GMII:
+		netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+		status.bits.mii_rmii = GMAC_PHY_GMII;
+		netdev_info(netdev, "connect to GMII\n");
+		break;
+	case PHY_INTERFACE_MODE_RGMII:
+		dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+		status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+		netdev_info(netdev, "connect to RGMII\n");
+		break;
+	default:
+		netdev_err(netdev, "Unsupported MII interface\n");
+		phy_disconnect(phy);
+		netdev->phydev = NULL;
+		return -EINVAL;
+	}
+	writel(status.bits32, port->gmac_base + GMAC_STATUS);
+
+	return 0;
+}
+
+static int gmac_pick_rx_max_len(int max_l3_len)
+{
+	/* index = CONFIG_MAXLEN_XXX values */
+	static const int max_len[8] = {
+		1536, 1518, 1522, 1542,
+		9212, 10236, 1518, 1518
+	};
+	int i, n = 5;
+
+	max_l3_len += ETH_HLEN + VLAN_HLEN;
+
+	if (max_l3_len > max_len[n])
+		return -1;
+
+	for (i = 0; i < 5; i++) {
+		if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
+			n = i;
+	}
+
+	return n;
+}
+
+static int gmac_init(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	union gmac_config0 config0 = { .bits = {
+		.dis_tx = 1,
+		.dis_rx = 1,
+		.ipv4_rx_chksum = 1,
+		.ipv6_rx_chksum = 1,
+		.rx_err_detect = 1,
+		.rgmm_edge = 1,
+		.port0_chk_hwq = 1,
+		.port1_chk_hwq = 1,
+		.port0_chk_toeq = 1,
+		.port1_chk_toeq = 1,
+		.port0_chk_classq = 1,
+		.port1_chk_classq = 1,
+	} };
+	union gmac_ahb_weight ahb_weight = { .bits = {
+		.rx_weight = 1,
+		.tx_weight = 1,
+		.hash_weight = 1,
+		.pre_req = 0x1f,
+		.tq_dv_threshold = 0,
+	} };
+	union gmac_tx_wcr0 hw_weigh = { .bits = {
+		.hw_tq3 = 1,
+		.hw_tq2 = 1,
+		.hw_tq1 = 1,
+		.hw_tq0 = 1,
+	} };
+	union gmac_tx_wcr1 sw_weigh = { .bits = {
+		.sw_tq5 = 1,
+		.sw_tq4 = 1,
+		.sw_tq3 = 1,
+		.sw_tq2 = 1,
+		.sw_tq1 = 1,
+		.sw_tq0 = 1,
+	} };
+	union gmac_config1 config1 = { .bits = {
+		.set_threshold = 16,
+		.rel_threshold = 24,
+	} };
+	union gmac_config2 config2 = { .bits = {
+		.set_threshold = 16,
+		.rel_threshold = 32,
+	} };
+	union gmac_config3 config3 = { .bits = {
+		.set_threshold = 0,
+		.rel_threshold = 0,
+	} };
+	union gmac_config0 tmp;
+	u32 val;
+
+	config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
+	tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+	config0.bits.reserved = tmp.bits.reserved;
+	writel(config0.bits32, port->gmac_base + GMAC_CONFIG0);
+	writel(config1.bits32, port->gmac_base + GMAC_CONFIG1);
+	writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
+	writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
+
+	val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+	writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
+
+	writel(hw_weigh.bits32,
+	       port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG);
+	writel(sw_weigh.bits32,
+	       port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG);
+
+	port->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
+	port->txq_order = DEFAULT_GMAC_TXQ_ORDER;
+	port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
+
+	/* Mark every quarter of the queue a packet for interrupt
+	 * in order to be able to wake up the queue if it was stopped
+	 */
+	port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+
+	return 0;
+}
+
+static void gmac_uninit(struct net_device *netdev)
+{
+	if (netdev->phydev)
+		phy_disconnect(netdev->phydev);
+}
+
+static int gmac_setup_txqs(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned int n_txq = netdev->num_tx_queues;
+	struct gemini_ethernet *geth = port->geth;
+	size_t entries = 1 << port->txq_order;
+	struct gmac_txq *txq = port->txq;
+	struct gmac_txdesc *desc_ring;
+	size_t len = n_txq * entries;
+	struct sk_buff **skb_tab;
+	void __iomem *rwptr_reg;
+	unsigned int r;
+	int i;
+
+	rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+	skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL);
+	if (!skb_tab)
+		return -ENOMEM;
+
+	desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring),
+				       &port->txq_dma_base, GFP_KERNEL);
+
+	if (!desc_ring) {
+		kfree(skb_tab);
+		return -ENOMEM;
+	}
+
+	if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
+		dev_warn(geth->dev, "TX queue base it not aligned\n");
+		return -ENOMEM;
+	}
+
+	writel(port->txq_dma_base | port->txq_order,
+	       port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+	for (i = 0; i < n_txq; i++) {
+		txq->ring = desc_ring;
+		txq->skb = skb_tab;
+		txq->noirq_packets = 0;
+
+		r = readw(rwptr_reg);
+		rwptr_reg += 2;
+		writew(r, rwptr_reg);
+		rwptr_reg += 2;
+		txq->cptr = r;
+
+		txq++;
+		desc_ring += entries;
+		skb_tab += entries;
+	}
+
+	return 0;
+}
+
+static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
+			   unsigned int r)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned int m = (1 << port->txq_order) - 1;
+	struct gemini_ethernet *geth = port->geth;
+	unsigned int c = txq->cptr;
+	union gmac_txdesc_0 word0;
+	union gmac_txdesc_1 word1;
+	unsigned int hwchksum = 0;
+	unsigned long bytes = 0;
+	struct gmac_txdesc *txd;
+	unsigned short nfrags;
+	unsigned int errs = 0;
+	unsigned int pkts = 0;
+	unsigned int word3;
+	dma_addr_t mapping;
+
+	if (c == r)
+		return;
+
+	while (c != r) {
+		txd = txq->ring + c;
+		word0 = txd->word0;
+		word1 = txd->word1;
+		mapping = txd->word2.buf_adr;
+		word3 = txd->word3.bits32;
+
+		dma_unmap_single(geth->dev, mapping,
+				 word0.bits.buffer_size, DMA_TO_DEVICE);
+
+		if (word3 & EOF_BIT)
+			dev_kfree_skb(txq->skb[c]);
+
+		c++;
+		c &= m;
+
+		if (!(word3 & SOF_BIT))
+			continue;
+
+		if (!word0.bits.status_tx_ok) {
+			errs++;
+			continue;
+		}
+
+		pkts++;
+		bytes += txd->word1.bits.byte_count;
+
+		if (word1.bits32 & TSS_CHECKUM_ENABLE)
+			hwchksum++;
+
+		nfrags = word0.bits.desc_count - 1;
+		if (nfrags) {
+			if (nfrags >= TX_MAX_FRAGS)
+				nfrags = TX_MAX_FRAGS - 1;
+
+			u64_stats_update_begin(&port->tx_stats_syncp);
+			port->tx_frag_stats[nfrags]++;
+			u64_stats_update_end(&port->ir_stats_syncp);
+		}
+	}
+
+	u64_stats_update_begin(&port->ir_stats_syncp);
+	port->stats.tx_errors += errs;
+	port->stats.tx_packets += pkts;
+	port->stats.tx_bytes += bytes;
+	port->tx_hw_csummed += hwchksum;
+	u64_stats_update_end(&port->ir_stats_syncp);
+
+	txq->cptr = c;
+}
+
+static void gmac_cleanup_txqs(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned int n_txq = netdev->num_tx_queues;
+	struct gemini_ethernet *geth = port->geth;
+	void __iomem *rwptr_reg;
+	unsigned int r, i;
+
+	rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+	for (i = 0; i < n_txq; i++) {
+		r = readw(rwptr_reg);
+		rwptr_reg += 2;
+		writew(r, rwptr_reg);
+		rwptr_reg += 2;
+
+		gmac_clean_txq(netdev, port->txq + i, r);
+	}
+	writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+	kfree(port->txq->skb);
+	dma_free_coherent(geth->dev,
+			  n_txq * sizeof(*port->txq->ring) << port->txq_order,
+			  port->txq->ring, port->txq_dma_base);
+}
+
+static int gmac_setup_rxq(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	struct nontoe_qhdr __iomem *qhdr;
+
+	qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+	port->rxq_rwptr = &qhdr->word1;
+
+	/* Remap a slew of memory to use for the RX queue */
+	port->rxq_ring = dma_alloc_coherent(geth->dev,
+				sizeof(*port->rxq_ring) << port->rxq_order,
+				&port->rxq_dma_base, GFP_KERNEL);
+	if (!port->rxq_ring)
+		return -ENOMEM;
+	if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
+		dev_warn(geth->dev, "RX queue base it not aligned\n");
+		return -ENOMEM;
+	}
+
+	writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0);
+	writel(0, port->rxq_rwptr);
+	return 0;
+}
+
+static struct gmac_queue_page *
+gmac_get_queue_page(struct gemini_ethernet *geth,
+		    struct gemini_ethernet_port *port,
+		    dma_addr_t addr)
+{
+	struct gmac_queue_page *gpage;
+	dma_addr_t mapping;
+	int i;
+
+	/* Only look for even pages */
+	mapping = addr & PAGE_MASK;
+
+	if (!geth->freeq_pages) {
+		dev_err(geth->dev, "try to get page with no page list\n");
+		return NULL;
+	}
+
+	/* Look up a ring buffer page from virtual mapping */
+	for (i = 0; i < geth->num_freeq_pages; i++) {
+		gpage = &geth->freeq_pages[i];
+		if (gpage->mapping == mapping)
+			return gpage;
+	}
+
+	return NULL;
+}
+
+static void gmac_cleanup_rxq(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	struct gmac_rxdesc *rxd = port->rxq_ring;
+	static struct gmac_queue_page *gpage;
+	struct nontoe_qhdr __iomem *qhdr;
+	void __iomem *dma_reg;
+	void __iomem *ptr_reg;
+	dma_addr_t mapping;
+	union dma_rwptr rw;
+	unsigned int r, w;
+
+	qhdr = geth->base +
+		TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+	dma_reg = &qhdr->word0;
+	ptr_reg = &qhdr->word1;
+
+	rw.bits32 = readl(ptr_reg);
+	r = rw.bits.rptr;
+	w = rw.bits.wptr;
+	writew(r, ptr_reg + 2);
+
+	writel(0, dma_reg);
+
+	/* Loop from read pointer to write pointer of the RX queue
+	 * and free up all pages by the queue.
+	 */
+	while (r != w) {
+		mapping = rxd[r].word2.buf_adr;
+		r++;
+		r &= ((1 << port->rxq_order) - 1);
+
+		if (!mapping)
+			continue;
+
+		/* Freeq pointers are one page off */
+		gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+		if (!gpage) {
+			dev_err(geth->dev, "could not find page\n");
+			continue;
+		}
+		/* Release the RX queue reference to the page */
+		put_page(gpage->page);
+	}
+
+	dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order,
+			  port->rxq_ring, port->rxq_dma_base);
+}
+
+static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
+					      int pn)
+{
+	struct gmac_rxdesc *freeq_entry;
+	struct gmac_queue_page *gpage;
+	unsigned int fpp_order;
+	unsigned int frag_len;
+	dma_addr_t mapping;
+	struct page *page;
+	int i;
+
+	/* First allocate and DMA map a single page */
+	page = alloc_page(GFP_ATOMIC);
+	if (!page)
+		return NULL;
+
+	mapping = dma_map_single(geth->dev, page_address(page),
+				 PAGE_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(geth->dev, mapping)) {
+		put_page(page);
+		return NULL;
+	}
+
+	/* The assign the page mapping (physical address) to the buffer address
+	 * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
+	 * 4k), and the default RX frag order is 11 (fragments are up 20 2048
+	 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
+	 * each page normally needs two entries in the queue.
+	 */
+	frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */
+	fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+	freeq_entry = geth->freeq_ring + (pn << fpp_order);
+	dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
+		 pn, frag_len, (1 << fpp_order), freeq_entry);
+	for (i = (1 << fpp_order); i > 0; i--) {
+		freeq_entry->word2.buf_adr = mapping;
+		freeq_entry++;
+		mapping += frag_len;
+	}
+
+	/* If the freeq entry already has a page mapped, then unmap it. */
+	gpage = &geth->freeq_pages[pn];
+	if (gpage->page) {
+		mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+		dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+		/* This should be the last reference to the page so it gets
+		 * released
+		 */
+		put_page(gpage->page);
+	}
+
+	/* Then put our new mapping into the page table */
+	dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n",
+		pn, (unsigned int)mapping, page);
+	gpage->mapping = mapping;
+	gpage->page = page;
+
+	return page;
+}
+
+/**
+ * geth_fill_freeq() - Fill the freeq with empty fragments to use
+ * @geth: the ethernet adapter
+ * @refill: whether to reset the queue by filling in all freeq entries or
+ * just refill it, usually the interrupt to refill the queue happens when
+ * the queue is half empty.
+ */
+static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill)
+{
+	unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+	unsigned int count = 0;
+	unsigned int pn, epn;
+	unsigned long flags;
+	union dma_rwptr rw;
+	unsigned int m_pn;
+
+	/* Mask for page */
+	m_pn = (1 << (geth->freeq_order - fpp_order)) - 1;
+
+	spin_lock_irqsave(&geth->freeq_lock, flags);
+
+	rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG);
+	pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order;
+	epn = (rw.bits.rptr >> fpp_order) - 1;
+	epn &= m_pn;
+
+	/* Loop over the freeq ring buffer entries */
+	while (pn != epn) {
+		struct gmac_queue_page *gpage;
+		struct page *page;
+
+		gpage = &geth->freeq_pages[pn];
+		page = gpage->page;
+
+		dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n",
+			pn, page_ref_count(page), 1 << fpp_order);
+
+		if (page_ref_count(page) > 1) {
+			unsigned int fl = (pn - epn) & m_pn;
+
+			if (fl > 64 >> fpp_order)
+				break;
+
+			page = geth_freeq_alloc_map_page(geth, pn);
+			if (!page)
+				break;
+		}
+
+		/* Add one reference per fragment in the page */
+		page_ref_add(page, 1 << fpp_order);
+		count += 1 << fpp_order;
+		pn++;
+		pn &= m_pn;
+	}
+
+	writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+
+	spin_unlock_irqrestore(&geth->freeq_lock, flags);
+
+	return count;
+}
+
+static int geth_setup_freeq(struct gemini_ethernet *geth)
+{
+	unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+	unsigned int frag_len = 1 << geth->freeq_frag_order;
+	unsigned int len = 1 << geth->freeq_order;
+	unsigned int pages = len >> fpp_order;
+	union queue_threshold qt;
+	union dma_skb_size skbsz;
+	unsigned int filled;
+	unsigned int pn;
+
+	geth->freeq_ring = dma_alloc_coherent(geth->dev,
+		sizeof(*geth->freeq_ring) << geth->freeq_order,
+		&geth->freeq_dma_base, GFP_KERNEL);
+	if (!geth->freeq_ring)
+		return -ENOMEM;
+	if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
+		dev_warn(geth->dev, "queue ring base it not aligned\n");
+		goto err_freeq;
+	}
+
+	/* Allocate a mapping to page look-up index */
+	geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
+				   GFP_KERNEL);
+	if (!geth->freeq_pages)
+		goto err_freeq;
+	geth->num_freeq_pages = pages;
+
+	dev_info(geth->dev, "allocate %d pages for queue\n", pages);
+	for (pn = 0; pn < pages; pn++)
+		if (!geth_freeq_alloc_map_page(geth, pn))
+			goto err_freeq_alloc;
+
+	filled = geth_fill_freeq(geth, false);
+	if (!filled)
+		goto err_freeq_alloc;
+
+	qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+	qt.bits.swfq_empty = 32;
+	writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+
+	skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order;
+	writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG);
+	writel(geth->freeq_dma_base | geth->freeq_order,
+	       geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+	return 0;
+
+err_freeq_alloc:
+	while (pn > 0) {
+		struct gmac_queue_page *gpage;
+		dma_addr_t mapping;
+
+		--pn;
+		mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+		dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+		gpage = &geth->freeq_pages[pn];
+		put_page(gpage->page);
+	}
+
+	kfree(geth->freeq_pages);
+err_freeq:
+	dma_free_coherent(geth->dev,
+			  sizeof(*geth->freeq_ring) << geth->freeq_order,
+			  geth->freeq_ring, geth->freeq_dma_base);
+	geth->freeq_ring = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
+ * @geth: the Gemini global ethernet state
+ */
+static void geth_cleanup_freeq(struct gemini_ethernet *geth)
+{
+	unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+	unsigned int frag_len = 1 << geth->freeq_frag_order;
+	unsigned int len = 1 << geth->freeq_order;
+	unsigned int pages = len >> fpp_order;
+	unsigned int pn;
+
+	writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG),
+	       geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+	writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+	for (pn = 0; pn < pages; pn++) {
+		struct gmac_queue_page *gpage;
+		dma_addr_t mapping;
+
+		mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+		dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+
+		gpage = &geth->freeq_pages[pn];
+		while (page_ref_count(gpage->page) > 0)
+			put_page(gpage->page);
+	}
+
+	kfree(geth->freeq_pages);
+
+	dma_free_coherent(geth->dev,
+			  sizeof(*geth->freeq_ring) << geth->freeq_order,
+			  geth->freeq_ring, geth->freeq_dma_base);
+}
+
+/**
+ * geth_resize_freeq() - resize the software queue depth
+ * @port: the port requesting the change
+ *
+ * This gets called at least once during probe() so the device queue gets
+ * "resized" from the hardware defaults. Since both ports/net devices share
+ * the same hardware queue, some synchronization between the ports is
+ * needed.
+ */
+static int geth_resize_freeq(struct gemini_ethernet_port *port)
+{
+	struct gemini_ethernet *geth = port->geth;
+	struct net_device *netdev = port->netdev;
+	struct gemini_ethernet_port *other_port;
+	struct net_device *other_netdev;
+	unsigned int new_size = 0;
+	unsigned int new_order;
+	unsigned long flags;
+	u32 en;
+	int ret;
+
+	if (netdev->dev_id == 0)
+		other_netdev = geth->port1->netdev;
+	else
+		other_netdev = geth->port0->netdev;
+
+	if (other_netdev && netif_running(other_netdev))
+		return -EBUSY;
+
+	new_size = 1 << (port->rxq_order + 1);
+	netdev_dbg(netdev, "port %d size: %d order %d\n",
+		   netdev->dev_id,
+		   new_size,
+		   port->rxq_order);
+	if (other_netdev) {
+		other_port = netdev_priv(other_netdev);
+		new_size += 1 << (other_port->rxq_order + 1);
+		netdev_dbg(other_netdev, "port %d size: %d order %d\n",
+			   other_netdev->dev_id,
+			   (1 << (other_port->rxq_order + 1)),
+			   other_port->rxq_order);
+	}
+
+	new_order = min(15, ilog2(new_size - 1) + 1);
+	dev_dbg(geth->dev, "set shared queue to size %d order %d\n",
+		new_size, new_order);
+	if (geth->freeq_order == new_order)
+		return 0;
+
+	spin_lock_irqsave(&geth->irq_lock, flags);
+
+	/* Disable the software queue IRQs */
+	en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	en &= ~SWFQ_EMPTY_INT_BIT;
+	writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+	/* Drop the old queue */
+	if (geth->freeq_ring)
+		geth_cleanup_freeq(geth);
+
+	/* Allocate a new queue with the desired order */
+	geth->freeq_order = new_order;
+	ret = geth_setup_freeq(geth);
+
+	/* Restart the interrupts - NOTE if this is the first resize
+	 * after probe(), this is where the interrupts get turned on
+	 * in the first place.
+	 */
+	spin_lock_irqsave(&geth->irq_lock, flags);
+	en |= SWFQ_EMPTY_INT_BIT;
+	writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+	return ret;
+}
+
+static void gmac_tx_irq_enable(struct net_device *netdev,
+			       unsigned int txq, int en)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	u32 val, mask;
+
+	netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+
+	mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+
+	if (en)
+		writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+
+	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+	val = en ? val | mask : val & ~mask;
+	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+}
+
+static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+{
+	struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num);
+
+	gmac_tx_irq_enable(netdev, txq_num, 0);
+	netif_tx_wake_queue(ntxq);
+}
+
+static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+			    struct gmac_txq *txq, unsigned short *desc)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct skb_shared_info *skb_si =  skb_shinfo(skb);
+	unsigned short m = (1 << port->txq_order) - 1;
+	short frag, last_frag = skb_si->nr_frags - 1;
+	struct gemini_ethernet *geth = port->geth;
+	unsigned int word1, word3, buflen;
+	unsigned short w = *desc;
+	struct gmac_txdesc *txd;
+	skb_frag_t *skb_frag;
+	dma_addr_t mapping;
+	unsigned short mtu;
+	void *buffer;
+
+	mtu  = ETH_HLEN;
+	mtu += netdev->mtu;
+	if (skb->protocol == htons(ETH_P_8021Q))
+		mtu += VLAN_HLEN;
+
+	word1 = skb->len;
+	word3 = SOF_BIT;
+
+	if (word1 > mtu) {
+		word1 |= TSS_MTU_ENABLE_BIT;
+		word3 |= mtu;
+	}
+
+	if (skb->ip_summed != CHECKSUM_NONE) {
+		int tcp = 0;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			word1 |= TSS_IP_CHKSUM_BIT;
+			tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+		} else { /* IPv6 */
+			word1 |= TSS_IPV6_ENABLE_BIT;
+			tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
+		}
+
+		word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
+	}
+
+	frag = -1;
+	while (frag <= last_frag) {
+		if (frag == -1) {
+			buffer = skb->data;
+			buflen = skb_headlen(skb);
+		} else {
+			skb_frag = skb_si->frags + frag;
+			buffer = page_address(skb_frag_page(skb_frag)) +
+				 skb_frag->page_offset;
+			buflen = skb_frag->size;
+		}
+
+		if (frag == last_frag) {
+			word3 |= EOF_BIT;
+			txq->skb[w] = skb;
+		}
+
+		mapping = dma_map_single(geth->dev, buffer, buflen,
+					 DMA_TO_DEVICE);
+		if (dma_mapping_error(geth->dev, mapping))
+			goto map_error;
+
+		txd = txq->ring + w;
+		txd->word0.bits32 = buflen;
+		txd->word1.bits32 = word1;
+		txd->word2.buf_adr = mapping;
+		txd->word3.bits32 = word3;
+
+		word3 &= MTU_SIZE_BIT_MASK;
+		w++;
+		w &= m;
+		frag++;
+	}
+
+	*desc = w;
+	return 0;
+
+map_error:
+	while (w != *desc) {
+		w--;
+		w &= m;
+
+		dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
+			       txq->ring[w].word0.bits.buffer_size,
+			       DMA_TO_DEVICE);
+	}
+	return -ENOMEM;
+}
+
+static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned short m = (1 << port->txq_order) - 1;
+	struct netdev_queue *ntxq;
+	unsigned short r, w, d;
+	void __iomem *ptr_reg;
+	struct gmac_txq *txq;
+	int txq_num, nfrags;
+	union dma_rwptr rw;
+
+	SKB_FRAG_ASSERT(skb);
+
+	if (skb->len >= 0x10000)
+		goto out_drop_free;
+
+	txq_num = skb_get_queue_mapping(skb);
+	ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
+	txq = &port->txq[txq_num];
+	ntxq = netdev_get_tx_queue(netdev, txq_num);
+	nfrags = skb_shinfo(skb)->nr_frags;
+
+	rw.bits32 = readl(ptr_reg);
+	r = rw.bits.rptr;
+	w = rw.bits.wptr;
+
+	d = txq->cptr - w - 1;
+	d &= m;
+
+	if (d < nfrags + 2) {
+		gmac_clean_txq(netdev, txq, r);
+		d = txq->cptr - w - 1;
+		d &= m;
+
+		if (d < nfrags + 2) {
+			netif_tx_stop_queue(ntxq);
+
+			d = txq->cptr + nfrags + 16;
+			d &= m;
+			txq->ring[d].word3.bits.eofie = 1;
+			gmac_tx_irq_enable(netdev, txq_num, 1);
+
+			u64_stats_update_begin(&port->tx_stats_syncp);
+			netdev->stats.tx_fifo_errors++;
+			u64_stats_update_end(&port->tx_stats_syncp);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
+		if (skb_linearize(skb))
+			goto out_drop;
+
+		u64_stats_update_begin(&port->tx_stats_syncp);
+		port->tx_frags_linearized++;
+		u64_stats_update_end(&port->tx_stats_syncp);
+
+		if (gmac_map_tx_bufs(netdev, skb, txq, &w))
+			goto out_drop_free;
+	}
+
+	writew(w, ptr_reg + 2);
+
+	gmac_clean_txq(netdev, txq, r);
+	return NETDEV_TX_OK;
+
+out_drop_free:
+	dev_kfree_skb(skb);
+out_drop:
+	u64_stats_update_begin(&port->tx_stats_syncp);
+	port->stats.tx_dropped++;
+	u64_stats_update_end(&port->tx_stats_syncp);
+	return NETDEV_TX_OK;
+}
+
+static void gmac_tx_timeout(struct net_device *netdev)
+{
+	netdev_err(netdev, "Tx timeout\n");
+	gmac_dump_dma_state(netdev);
+}
+
+static void gmac_enable_irq(struct net_device *netdev, int enable)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	unsigned long flags;
+	u32 val, mask;
+
+	netdev_info(netdev, "%s device %d %s\n", __func__,
+		    netdev->dev_id, enable ? "enable" : "disable");
+	spin_lock_irqsave(&geth->irq_lock, flags);
+
+	mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
+	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+	val = enable ? (val | mask) : (val & ~mask);
+	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+	mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+	val = enable ? (val | mask) : (val & ~mask);
+	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+	mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8);
+	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	val = enable ? (val | mask) : (val & ~mask);
+	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+	spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static void gmac_enable_rx_irq(struct net_device *netdev, int enable)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	unsigned long flags;
+	u32 val, mask;
+
+	netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id,
+		   enable ? "enable" : "disable");
+	spin_lock_irqsave(&geth->irq_lock, flags);
+	mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+
+	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+	val = enable ? (val | mask) : (val & ~mask);
+	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+	spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port,
+					      union gmac_rxdesc_0 word0,
+					      unsigned int frame_len)
+{
+	unsigned int rx_csum = word0.bits.chksum_status;
+	unsigned int rx_status = word0.bits.status;
+	struct sk_buff *skb = NULL;
+
+	port->rx_stats[rx_status]++;
+	port->rx_csum_stats[rx_csum]++;
+
+	if (word0.bits.derr || word0.bits.perr ||
+	    rx_status || frame_len < ETH_ZLEN ||
+	    rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
+		port->stats.rx_errors++;
+
+		if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
+			port->stats.rx_length_errors++;
+		if (RX_ERROR_OVER(rx_status))
+			port->stats.rx_over_errors++;
+		if (RX_ERROR_CRC(rx_status))
+			port->stats.rx_crc_errors++;
+		if (RX_ERROR_FRAME(rx_status))
+			port->stats.rx_frame_errors++;
+		return NULL;
+	}
+
+	skb = napi_get_frags(&port->napi);
+	if (!skb)
+		goto update_exit;
+
+	if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+update_exit:
+	port->stats.rx_bytes += frame_len;
+	port->stats.rx_packets++;
+	return skb;
+}
+
+static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned short m = (1 << port->rxq_order) - 1;
+	struct gemini_ethernet *geth = port->geth;
+	void __iomem *ptr_reg = port->rxq_rwptr;
+	unsigned int frame_len, frag_len;
+	struct gmac_rxdesc *rx = NULL;
+	struct gmac_queue_page *gpage;
+	static struct sk_buff *skb;
+	union gmac_rxdesc_0 word0;
+	union gmac_rxdesc_1 word1;
+	union gmac_rxdesc_3 word3;
+	struct page *page = NULL;
+	unsigned int page_offs;
+	unsigned short r, w;
+	union dma_rwptr rw;
+	dma_addr_t mapping;
+	int frag_nr = 0;
+
+	rw.bits32 = readl(ptr_reg);
+	/* Reset interrupt as all packages until here are taken into account */
+	writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+	       geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+	r = rw.bits.rptr;
+	w = rw.bits.wptr;
+
+	while (budget && w != r) {
+		rx = port->rxq_ring + r;
+		word0 = rx->word0;
+		word1 = rx->word1;
+		mapping = rx->word2.buf_adr;
+		word3 = rx->word3;
+
+		r++;
+		r &= m;
+
+		frag_len = word0.bits.buffer_size;
+		frame_len = word1.bits.byte_count;
+		page_offs = mapping & ~PAGE_MASK;
+
+		if (!mapping) {
+			netdev_err(netdev,
+				   "rxq[%u]: HW BUG: zero DMA desc\n", r);
+			goto err_drop;
+		}
+
+		/* Freeq pointers are one page off */
+		gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+		if (!gpage) {
+			dev_err(geth->dev, "could not find mapping\n");
+			continue;
+		}
+		page = gpage->page;
+
+		if (word3.bits32 & SOF_BIT) {
+			if (skb) {
+				napi_free_frags(&port->napi);
+				port->stats.rx_dropped++;
+			}
+
+			skb = gmac_skb_if_good_frame(port, word0, frame_len);
+			if (!skb)
+				goto err_drop;
+
+			page_offs += NET_IP_ALIGN;
+			frag_len -= NET_IP_ALIGN;
+			frag_nr = 0;
+
+		} else if (!skb) {
+			put_page(page);
+			continue;
+		}
+
+		if (word3.bits32 & EOF_BIT)
+			frag_len = frame_len - skb->len;
+
+		/* append page frag to skb */
+		if (frag_nr == MAX_SKB_FRAGS)
+			goto err_drop;
+
+		if (frag_len == 0)
+			netdev_err(netdev, "Received fragment with len = 0\n");
+
+		skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
+		skb->len += frag_len;
+		skb->data_len += frag_len;
+		skb->truesize += frag_len;
+		frag_nr++;
+
+		if (word3.bits32 & EOF_BIT) {
+			napi_gro_frags(&port->napi);
+			skb = NULL;
+			--budget;
+		}
+		continue;
+
+err_drop:
+		if (skb) {
+			napi_free_frags(&port->napi);
+			skb = NULL;
+		}
+
+		if (mapping)
+			put_page(page);
+
+		port->stats.rx_dropped++;
+	}
+
+	writew(r, ptr_reg);
+	return budget;
+}
+
+static int gmac_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct gemini_ethernet_port *port = netdev_priv(napi->dev);
+	struct gemini_ethernet *geth = port->geth;
+	unsigned int freeq_threshold;
+	unsigned int received;
+
+	freeq_threshold = 1 << (geth->freeq_order - 1);
+	u64_stats_update_begin(&port->rx_stats_syncp);
+
+	received = gmac_rx(napi->dev, budget);
+	if (received < budget) {
+		napi_gro_flush(napi, false);
+		napi_complete_done(napi, received);
+		gmac_enable_rx_irq(napi->dev, 1);
+		++port->rx_napi_exits;
+	}
+
+	port->freeq_refill += (budget - received);
+	if (port->freeq_refill > freeq_threshold) {
+		port->freeq_refill -= freeq_threshold;
+		geth_fill_freeq(geth, true);
+	}
+
+	u64_stats_update_end(&port->rx_stats_syncp);
+	return received;
+}
+
+static void gmac_dump_dma_state(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	void __iomem *ptr_reg;
+	u32 reg[5];
+
+	/* Interrupt status */
+	reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+	reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+	reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+	reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+	reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+	netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+	/* Interrupt enable */
+	reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+	reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+	reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+	reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+	reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+	/* RX DMA status */
+	reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG);
+	reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG);
+	reg[2] = GET_RPTR(port->rxq_rwptr);
+	reg[3] = GET_WPTR(port->rxq_rwptr);
+	netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+		   reg[0], reg[1], reg[2], reg[3]);
+
+	reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG);
+	reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG);
+	reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG);
+	reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG);
+	netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   reg[0], reg[1], reg[2], reg[3]);
+
+	/* TX DMA status */
+	ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+	reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG);
+	reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG);
+	reg[2] = GET_RPTR(ptr_reg);
+	reg[3] = GET_WPTR(ptr_reg);
+	netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+		   reg[0], reg[1], reg[2], reg[3]);
+
+	reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG);
+	reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG);
+	reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG);
+	reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG);
+	netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   reg[0], reg[1], reg[2], reg[3]);
+
+	/* FREE queues status */
+	ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG;
+
+	reg[0] = GET_RPTR(ptr_reg);
+	reg[1] = GET_WPTR(ptr_reg);
+
+	ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG;
+
+	reg[2] = GET_RPTR(ptr_reg);
+	reg[3] = GET_WPTR(ptr_reg);
+	netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
+		   reg[0], reg[1], reg[2], reg[3]);
+}
+
+static void gmac_update_hw_stats(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned int rx_discards, rx_mcast, rx_bcast;
+	struct gemini_ethernet *geth = port->geth;
+	unsigned long flags;
+
+	spin_lock_irqsave(&geth->irq_lock, flags);
+	u64_stats_update_begin(&port->ir_stats_syncp);
+
+	rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS);
+	port->hw_stats[0] += rx_discards;
+	port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS);
+	rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST);
+	port->hw_stats[2] += rx_mcast;
+	rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST);
+	port->hw_stats[3] += rx_bcast;
+	port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1);
+	port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2);
+
+	port->stats.rx_missed_errors += rx_discards;
+	port->stats.multicast += rx_mcast;
+	port->stats.multicast += rx_bcast;
+
+	writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8),
+	       geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+	u64_stats_update_end(&port->ir_stats_syncp);
+	spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+/**
+ * gmac_get_intr_flags() - get interrupt status flags for a port from
+ * @netdev: the net device for the port to get flags from
+ * @i: the interrupt status register 0..4
+ */
+static u32 gmac_get_intr_flags(struct net_device *netdev, int i)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	struct gemini_ethernet *geth = port->geth;
+	void __iomem *irqif_reg, *irqen_reg;
+	unsigned int offs, val;
+
+	/* Calculate the offset using the stride of the status registers */
+	offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG -
+		    GLOBAL_INTERRUPT_STATUS_0_REG);
+
+	irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
+	irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
+
+	val = readl(irqif_reg) & readl(irqen_reg);
+	return val;
+}
+
+static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer)
+{
+	struct gemini_ethernet_port *port =
+		container_of(timer, struct gemini_ethernet_port,
+			     rx_coalesce_timer);
+
+	napi_schedule(&port->napi);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gmac_irq(int irq, void *data)
+{
+	struct gemini_ethernet_port *port;
+	struct net_device *netdev = data;
+	struct gemini_ethernet *geth;
+	u32 val, orr = 0;
+
+	port = netdev_priv(netdev);
+	geth = port->geth;
+
+	val = gmac_get_intr_flags(netdev, 0);
+	orr |= val;
+
+	if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) {
+		/* Oh, crap */
+		netdev_err(netdev, "hw failure/sw bug\n");
+		gmac_dump_dma_state(netdev);
+
+		/* don't know how to recover, just reduce losses */
+		gmac_enable_irq(netdev, 0);
+		return IRQ_HANDLED;
+	}
+
+	if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6)))
+		gmac_tx_irq(netdev, 0);
+
+	val = gmac_get_intr_flags(netdev, 1);
+	orr |= val;
+
+	if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) {
+		gmac_enable_rx_irq(netdev, 0);
+
+		if (!port->rx_coalesce_nsecs) {
+			napi_schedule(&port->napi);
+		} else {
+			ktime_t ktime;
+
+			ktime = ktime_set(0, port->rx_coalesce_nsecs);
+			hrtimer_start(&port->rx_coalesce_timer, ktime,
+				      HRTIMER_MODE_REL);
+		}
+	}
+
+	val = gmac_get_intr_flags(netdev, 4);
+	orr |= val;
+
+	if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8)))
+		gmac_update_hw_stats(netdev);
+
+	if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+		writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+		       geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+		spin_lock(&geth->irq_lock);
+		u64_stats_update_begin(&port->ir_stats_syncp);
+		++port->stats.rx_fifo_errors;
+		u64_stats_update_end(&port->ir_stats_syncp);
+		spin_unlock(&geth->irq_lock);
+	}
+
+	return orr ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void gmac_start_dma(struct gemini_ethernet_port *port)
+{
+	void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+	union gmac_dma_ctrl dma_ctrl;
+
+	dma_ctrl.bits32 = readl(dma_ctrl_reg);
+	dma_ctrl.bits.rd_enable = 1;
+	dma_ctrl.bits.td_enable = 1;
+	dma_ctrl.bits.loopback = 0;
+	dma_ctrl.bits.drop_small_ack = 0;
+	dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
+	dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
+	dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
+	dma_ctrl.bits.rd_bus = HSIZE_8;
+	dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
+	dma_ctrl.bits.td_burst_size = HBURST_INCR8;
+	dma_ctrl.bits.td_bus = HSIZE_8;
+
+	writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static void gmac_stop_dma(struct gemini_ethernet_port *port)
+{
+	void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+	union gmac_dma_ctrl dma_ctrl;
+
+	dma_ctrl.bits32 = readl(dma_ctrl_reg);
+	dma_ctrl.bits.rd_enable = 0;
+	dma_ctrl.bits.td_enable = 0;
+	writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static int gmac_open(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	int err;
+
+	if (!netdev->phydev) {
+		err = gmac_setup_phy(netdev);
+		if (err) {
+			netif_err(port, ifup, netdev,
+				  "PHY init failed: %d\n", err);
+			return err;
+		}
+	}
+
+	err = request_irq(netdev->irq, gmac_irq,
+			  IRQF_SHARED, netdev->name, netdev);
+	if (err) {
+		netdev_err(netdev, "no IRQ\n");
+		return err;
+	}
+
+	netif_carrier_off(netdev);
+	phy_start(netdev->phydev);
+
+	err = geth_resize_freeq(port);
+	if (err) {
+		netdev_err(netdev, "could not resize freeq\n");
+		goto err_stop_phy;
+	}
+
+	err = gmac_setup_rxq(netdev);
+	if (err) {
+		netdev_err(netdev, "could not setup RXQ\n");
+		goto err_stop_phy;
+	}
+
+	err = gmac_setup_txqs(netdev);
+	if (err) {
+		netdev_err(netdev, "could not setup TXQs\n");
+		gmac_cleanup_rxq(netdev);
+		goto err_stop_phy;
+	}
+
+	napi_enable(&port->napi);
+
+	gmac_start_dma(port);
+	gmac_enable_irq(netdev, 1);
+	gmac_enable_tx_rx(netdev);
+	netif_tx_start_all_queues(netdev);
+
+	hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_REL);
+	port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+
+	netdev_info(netdev, "opened\n");
+
+	return 0;
+
+err_stop_phy:
+	phy_stop(netdev->phydev);
+	free_irq(netdev->irq, netdev);
+	return err;
+}
+
+static int gmac_stop(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+	hrtimer_cancel(&port->rx_coalesce_timer);
+	netif_tx_stop_all_queues(netdev);
+	gmac_disable_tx_rx(netdev);
+	gmac_stop_dma(port);
+	napi_disable(&port->napi);
+
+	gmac_enable_irq(netdev, 0);
+	gmac_cleanup_rxq(netdev);
+	gmac_cleanup_txqs(netdev);
+
+	phy_stop(netdev->phydev);
+	free_irq(netdev->irq, netdev);
+
+	gmac_update_hw_stats(netdev);
+	return 0;
+}
+
+static void gmac_set_rx_mode(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	union gmac_rx_fltr filter = { .bits = {
+		.broadcast = 1,
+		.multicast = 1,
+		.unicast = 1,
+	} };
+	struct netdev_hw_addr *ha;
+	unsigned int bit_nr;
+	u32 mc_filter[2];
+
+	mc_filter[1] = 0;
+	mc_filter[0] = 0;
+
+	if (netdev->flags & IFF_PROMISC) {
+		filter.bits.error = 1;
+		filter.bits.promiscuous = 1;
+		mc_filter[1] = ~0;
+		mc_filter[0] = ~0;
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		mc_filter[1] = ~0;
+		mc_filter[0] = ~0;
+	} else {
+		netdev_for_each_mc_addr(ha, netdev) {
+			bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
+		}
+	}
+
+	writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0);
+	writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1);
+	writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR);
+}
+
+static void gmac_write_mac_address(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	__le32 addr[3];
+
+	memset(addr, 0, sizeof(addr));
+	memcpy(addr, netdev->dev_addr, ETH_ALEN);
+
+	writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0);
+	writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1);
+	writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2);
+}
+
+static int gmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+	struct sockaddr *sa = addr;
+
+	memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+	gmac_write_mac_address(netdev);
+
+	return 0;
+}
+
+static void gmac_clear_hw_stats(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+	readl(port->gmac_base + GMAC_IN_DISCARDS);
+	readl(port->gmac_base + GMAC_IN_ERRORS);
+	readl(port->gmac_base + GMAC_IN_MCAST);
+	readl(port->gmac_base + GMAC_IN_BCAST);
+	readl(port->gmac_base + GMAC_IN_MAC1);
+	readl(port->gmac_base + GMAC_IN_MAC2);
+}
+
+static void gmac_get_stats64(struct net_device *netdev,
+			     struct rtnl_link_stats64 *stats)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned int start;
+
+	gmac_update_hw_stats(netdev);
+
+	/* Racing with RX NAPI */
+	do {
+		start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+		stats->rx_packets = port->stats.rx_packets;
+		stats->rx_bytes = port->stats.rx_bytes;
+		stats->rx_errors = port->stats.rx_errors;
+		stats->rx_dropped = port->stats.rx_dropped;
+
+		stats->rx_length_errors = port->stats.rx_length_errors;
+		stats->rx_over_errors = port->stats.rx_over_errors;
+		stats->rx_crc_errors = port->stats.rx_crc_errors;
+		stats->rx_frame_errors = port->stats.rx_frame_errors;
+
+	} while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+
+	/* Racing with MIB and TX completion interrupts */
+	do {
+		start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+		stats->tx_errors = port->stats.tx_errors;
+		stats->tx_packets = port->stats.tx_packets;
+		stats->tx_bytes = port->stats.tx_bytes;
+
+		stats->multicast = port->stats.multicast;
+		stats->rx_missed_errors = port->stats.rx_missed_errors;
+		stats->rx_fifo_errors = port->stats.rx_fifo_errors;
+
+	} while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+
+	/* Racing with hard_start_xmit */
+	do {
+		start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+		stats->tx_dropped = port->stats.tx_dropped;
+
+	} while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+
+	stats->rx_dropped += stats->rx_missed_errors;
+}
+
+static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int max_len = gmac_pick_rx_max_len(new_mtu);
+
+	if (max_len < 0)
+		return -EINVAL;
+
+	gmac_disable_tx_rx(netdev);
+
+	netdev->mtu = new_mtu;
+	gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
+				CONFIG0_MAXLEN_MASK);
+
+	netdev_update_features(netdev);
+
+	gmac_enable_tx_rx(netdev);
+
+	return 0;
+}
+
+static netdev_features_t gmac_fix_features(struct net_device *netdev,
+					   netdev_features_t features)
+{
+	if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+		features &= ~GMAC_OFFLOAD_FEATURES;
+
+	return features;
+}
+
+static int gmac_set_features(struct net_device *netdev,
+			     netdev_features_t features)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	int enable = features & NETIF_F_RXCSUM;
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&port->config_lock, flags);
+
+	reg = readl(port->gmac_base + GMAC_CONFIG0);
+	reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
+	writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+	spin_unlock_irqrestore(&port->config_lock, flags);
+	return 0;
+}
+
+static int gmac_get_sset_count(struct net_device *netdev, int sset)
+{
+	return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
+}
+
+static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
+}
+
+static void gmac_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *estats, u64 *values)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	unsigned int start;
+	u64 *p;
+	int i;
+
+	gmac_update_hw_stats(netdev);
+
+	/* Racing with MIB interrupt */
+	do {
+		p = values;
+		start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+		for (i = 0; i < RX_STATS_NUM; i++)
+			*p++ = port->hw_stats[i];
+
+	} while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+	values = p;
+
+	/* Racing with RX NAPI */
+	do {
+		p = values;
+		start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+		for (i = 0; i < RX_STATUS_NUM; i++)
+			*p++ = port->rx_stats[i];
+		for (i = 0; i < RX_CHKSUM_NUM; i++)
+			*p++ = port->rx_csum_stats[i];
+		*p++ = port->rx_napi_exits;
+
+	} while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+	values = p;
+
+	/* Racing with TX start_xmit */
+	do {
+		p = values;
+		start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+		for (i = 0; i < TX_MAX_FRAGS; i++) {
+			*values++ = port->tx_frag_stats[i];
+			port->tx_frag_stats[i] = 0;
+		}
+		*values++ = port->tx_frags_linearized;
+		*values++ = port->tx_hw_csummed;
+
+	} while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+}
+
+static int gmac_get_ksettings(struct net_device *netdev,
+			      struct ethtool_link_ksettings *cmd)
+{
+	if (!netdev->phydev)
+		return -ENXIO;
+	phy_ethtool_ksettings_get(netdev->phydev, cmd);
+
+	return 0;
+}
+
+static int gmac_set_ksettings(struct net_device *netdev,
+			      const struct ethtool_link_ksettings *cmd)
+{
+	if (!netdev->phydev)
+		return -ENXIO;
+	return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+}
+
+static int gmac_nway_reset(struct net_device *netdev)
+{
+	if (!netdev->phydev)
+		return -ENXIO;
+	return phy_start_aneg(netdev->phydev);
+}
+
+static void gmac_get_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pparam)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	union gmac_config0 config0;
+
+	config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+	pparam->rx_pause = config0.bits.rx_fc_en;
+	pparam->tx_pause = config0.bits.tx_fc_en;
+	pparam->autoneg = true;
+}
+
+static void gmac_get_ringparam(struct net_device *netdev,
+			       struct ethtool_ringparam *rp)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	union gmac_config0 config0;
+
+	config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+	rp->rx_max_pending = 1 << 15;
+	rp->rx_mini_max_pending = 0;
+	rp->rx_jumbo_max_pending = 0;
+	rp->tx_max_pending = 1 << 15;
+
+	rp->rx_pending = 1 << port->rxq_order;
+	rp->rx_mini_pending = 0;
+	rp->rx_jumbo_pending = 0;
+	rp->tx_pending = 1 << port->txq_order;
+}
+
+static int gmac_set_ringparam(struct net_device *netdev,
+			      struct ethtool_ringparam *rp)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+	int err = 0;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (rp->rx_pending) {
+		port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
+		err = geth_resize_freeq(port);
+	}
+	if (rp->tx_pending) {
+		port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
+		port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+	}
+
+	return err;
+}
+
+static int gmac_get_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *ecmd)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+	ecmd->rx_max_coalesced_frames = 1;
+	ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets;
+	ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000;
+
+	return 0;
+}
+
+static int gmac_set_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *ecmd)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+	if (ecmd->tx_max_coalesced_frames < 1)
+		return -EINVAL;
+	if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order)
+		return -EINVAL;
+
+	port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
+	port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
+
+	return 0;
+}
+
+static u32 gmac_get_msglevel(struct net_device *netdev)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+	return port->msg_enable;
+}
+
+static void gmac_set_msglevel(struct net_device *netdev, u32 level)
+{
+	struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+	port->msg_enable = level;
+}
+
+static void gmac_get_drvinfo(struct net_device *netdev,
+			     struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver,  DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
+}
+
+static const struct net_device_ops gmac_351x_ops = {
+	.ndo_init		= gmac_init,
+	.ndo_uninit		= gmac_uninit,
+	.ndo_open		= gmac_open,
+	.ndo_stop		= gmac_stop,
+	.ndo_start_xmit		= gmac_start_xmit,
+	.ndo_tx_timeout		= gmac_tx_timeout,
+	.ndo_set_rx_mode	= gmac_set_rx_mode,
+	.ndo_set_mac_address	= gmac_set_mac_address,
+	.ndo_get_stats64	= gmac_get_stats64,
+	.ndo_change_mtu		= gmac_change_mtu,
+	.ndo_fix_features	= gmac_fix_features,
+	.ndo_set_features	= gmac_set_features,
+};
+
+static const struct ethtool_ops gmac_351x_ethtool_ops = {
+	.get_sset_count	= gmac_get_sset_count,
+	.get_strings	= gmac_get_strings,
+	.get_ethtool_stats = gmac_get_ethtool_stats,
+	.get_link	= ethtool_op_get_link,
+	.get_link_ksettings = gmac_get_ksettings,
+	.set_link_ksettings = gmac_set_ksettings,
+	.nway_reset	= gmac_nway_reset,
+	.get_pauseparam	= gmac_get_pauseparam,
+	.get_ringparam	= gmac_get_ringparam,
+	.set_ringparam	= gmac_set_ringparam,
+	.get_coalesce	= gmac_get_coalesce,
+	.set_coalesce	= gmac_set_coalesce,
+	.get_msglevel	= gmac_get_msglevel,
+	.set_msglevel	= gmac_set_msglevel,
+	.get_drvinfo	= gmac_get_drvinfo,
+};
+
+static irqreturn_t gemini_port_irq_thread(int irq, void *data)
+{
+	unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
+	struct gemini_ethernet_port *port = data;
+	struct gemini_ethernet *geth;
+	unsigned long flags;
+
+	geth = port->geth;
+	/* The queue is half empty so refill it */
+	geth_fill_freeq(geth, true);
+
+	spin_lock_irqsave(&geth->irq_lock, flags);
+	/* ACK queue interrupt */
+	writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+	/* Enable queue interrupt again */
+	irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+	spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t gemini_port_irq(int irq, void *data)
+{
+	struct gemini_ethernet_port *port = data;
+	struct gemini_ethernet *geth;
+	irqreturn_t ret = IRQ_NONE;
+	u32 val, en;
+
+	geth = port->geth;
+	spin_lock(&geth->irq_lock);
+
+	val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+	en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+	if (val & en & SWFQ_EMPTY_INT_BIT) {
+		/* Disable the queue empty interrupt while we work on
+		 * processing the queue. Also disable overrun interrupts
+		 * as there is not much we can do about it here.
+		 */
+		en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
+					   | GMAC1_RX_OVERRUN_INT_BIT);
+		writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+		ret = IRQ_WAKE_THREAD;
+	}
+
+	spin_unlock(&geth->irq_lock);
+
+	return ret;
+}
+
+static void gemini_port_remove(struct gemini_ethernet_port *port)
+{
+	if (port->netdev)
+		unregister_netdev(port->netdev);
+	clk_disable_unprepare(port->pclk);
+	geth_cleanup_freeq(port->geth);
+}
+
+static void gemini_ethernet_init(struct gemini_ethernet *geth)
+{
+	writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+	writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+	writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+	writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+	writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+	/* Interrupt config:
+	 *
+	 *	GMAC0 intr bits ------> int0 ----> eth0
+	 *	GMAC1 intr bits ------> int1 ----> eth1
+	 *	TOE intr -------------> int1 ----> eth1
+	 *	Classification Intr --> int0 ----> eth0
+	 *	Default Q0 -----------> int0 ----> eth0
+	 *	Default Q1 -----------> int1 ----> eth1
+	 *	FreeQ intr -----------> int1 ----> eth1
+	 */
+	writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG);
+	writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG);
+	writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG);
+	writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG);
+	writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG);
+
+	/* edge-triggered interrupts packed to level-triggered one... */
+	writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+	writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+	writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+	writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+	writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+	/* Set up queue */
+	writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+	writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
+	writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG);
+	writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG);
+
+	geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
+	/* This makes the queue resize on probe() so that we
+	 * set up and enable the queue IRQ. FIXME: fragile.
+	 */
+	geth->freeq_order = 1;
+}
+
+static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
+{
+	port->mac_addr[0] =
+		cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0));
+	port->mac_addr[1] =
+		cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1));
+	port->mac_addr[2] =
+		cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2));
+}
+
+static int gemini_ethernet_port_probe(struct platform_device *pdev)
+{
+	char *port_names[2] = { "ethernet0", "ethernet1" };
+	struct gemini_ethernet_port *port;
+	struct device *dev = &pdev->dev;
+	struct gemini_ethernet *geth;
+	struct net_device *netdev;
+	struct resource *gmacres;
+	struct resource *dmares;
+	struct device *parent;
+	unsigned int id;
+	int irq;
+	int ret;
+
+	parent = dev->parent;
+	geth = dev_get_drvdata(parent);
+
+	if (!strcmp(dev_name(dev), "60008000.ethernet-port"))
+		id = 0;
+	else if (!strcmp(dev_name(dev), "6000c000.ethernet-port"))
+		id = 1;
+	else
+		return -ENODEV;
+
+	dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
+
+	netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+	if (!netdev) {
+		dev_err(dev, "Can't allocate ethernet device #%d\n", id);
+		return -ENOMEM;
+	}
+
+	port = netdev_priv(netdev);
+	SET_NETDEV_DEV(netdev, dev);
+	port->netdev = netdev;
+	port->id = id;
+	port->geth = geth;
+	port->dev = dev;
+
+	/* DMA memory */
+	dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!dmares) {
+		dev_err(dev, "no DMA resource\n");
+		return -ENODEV;
+	}
+	port->dma_base = devm_ioremap_resource(dev, dmares);
+	if (IS_ERR(port->dma_base))
+		return PTR_ERR(port->dma_base);
+
+	/* GMAC config memory */
+	gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!gmacres) {
+		dev_err(dev, "no GMAC resource\n");
+		return -ENODEV;
+	}
+	port->gmac_base = devm_ioremap_resource(dev, gmacres);
+	if (IS_ERR(port->gmac_base))
+		return PTR_ERR(port->gmac_base);
+
+	/* Interrupt */
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "no IRQ\n");
+		return irq ? irq : -ENODEV;
+	}
+	port->irq = irq;
+
+	/* Clock the port */
+	port->pclk = devm_clk_get(dev, "PCLK");
+	if (IS_ERR(port->pclk)) {
+		dev_err(dev, "no PCLK\n");
+		return PTR_ERR(port->pclk);
+	}
+	ret = clk_prepare_enable(port->pclk);
+	if (ret)
+		return ret;
+
+	/* Maybe there is a nice ethernet address we should use */
+	gemini_port_save_mac_addr(port);
+
+	/* Reset the port */
+	port->reset = devm_reset_control_get_exclusive(dev, NULL);
+	if (IS_ERR(port->reset)) {
+		dev_err(dev, "no reset\n");
+		return PTR_ERR(port->reset);
+	}
+	reset_control_reset(port->reset);
+	usleep_range(100, 500);
+
+	/* Assign pointer in the main state container */
+	if (!id)
+		geth->port0 = port;
+	else
+		geth->port1 = port;
+	platform_set_drvdata(pdev, port);
+
+	/* Set up and register the netdev */
+	netdev->dev_id = port->id;
+	netdev->irq = irq;
+	netdev->netdev_ops = &gmac_351x_ops;
+	netdev->ethtool_ops = &gmac_351x_ethtool_ops;
+
+	spin_lock_init(&port->config_lock);
+	gmac_clear_hw_stats(netdev);
+
+	netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+	netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+
+	port->freeq_refill = 0;
+	netif_napi_add(netdev, &port->napi, gmac_napi_poll,
+		       DEFAULT_NAPI_WEIGHT);
+
+	if (is_valid_ether_addr((void *)port->mac_addr)) {
+		memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+	} else {
+		dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
+			port->mac_addr[0], port->mac_addr[1],
+			port->mac_addr[2]);
+		dev_info(dev, "using a random ethernet address\n");
+		random_ether_addr(netdev->dev_addr);
+	}
+	gmac_write_mac_address(netdev);
+
+	ret = devm_request_threaded_irq(port->dev,
+					port->irq,
+					gemini_port_irq,
+					gemini_port_irq_thread,
+					IRQF_SHARED,
+					port_names[port->id],
+					port);
+	if (ret)
+		return ret;
+
+	ret = register_netdev(netdev);
+	if (!ret) {
+		netdev_info(netdev,
+			    "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+			    port->irq, &dmares->start,
+			    &gmacres->start);
+		ret = gmac_setup_phy(netdev);
+		if (ret)
+			netdev_info(netdev,
+				    "PHY init failed, deferring to ifup time\n");
+		return 0;
+	}
+
+	port->netdev = NULL;
+	free_netdev(netdev);
+	return ret;
+}
+
+static int gemini_ethernet_port_remove(struct platform_device *pdev)
+{
+	struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
+
+	gemini_port_remove(port);
+	return 0;
+}
+
+static const struct of_device_id gemini_ethernet_port_of_match[] = {
+	{
+		.compatible = "cortina,gemini-ethernet-port",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
+
+static struct platform_driver gemini_ethernet_port_driver = {
+	.driver = {
+		.name = "gemini-ethernet-port",
+		.of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+	},
+	.probe = gemini_ethernet_port_probe,
+	.remove = gemini_ethernet_port_remove,
+};
+
+static int gemini_ethernet_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gemini_ethernet *geth;
+	unsigned int retry = 5;
+	struct resource *res;
+	u32 val;
+
+	/* Global registers */
+	geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
+	if (!geth)
+		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+	geth->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(geth->base))
+		return PTR_ERR(geth->base);
+	geth->dev = dev;
+
+	/* Wait for ports to stabilize */
+	do {
+		udelay(2);
+		val = readl(geth->base + GLOBAL_TOE_VERSION_REG);
+		barrier();
+	} while (!val && --retry);
+	if (!retry) {
+		dev_err(dev, "failed to reset ethernet\n");
+		return -EIO;
+	}
+	dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
+		 (val >> 4) & 0xFFFU, val & 0xFU);
+
+	spin_lock_init(&geth->irq_lock);
+	spin_lock_init(&geth->freeq_lock);
+	gemini_ethernet_init(geth);
+
+	/* The children will use this */
+	platform_set_drvdata(pdev, geth);
+
+	/* Spawn child devices for the two ports */
+	return devm_of_platform_populate(dev);
+}
+
+static int gemini_ethernet_remove(struct platform_device *pdev)
+{
+	struct gemini_ethernet *geth = platform_get_drvdata(pdev);
+
+	gemini_ethernet_init(geth);
+	geth_cleanup_freeq(geth);
+
+	return 0;
+}
+
+static const struct of_device_id gemini_ethernet_of_match[] = {
+	{
+		.compatible = "cortina,gemini-ethernet",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
+
+static struct platform_driver gemini_ethernet_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(gemini_ethernet_of_match),
+	},
+	.probe = gemini_ethernet_probe,
+	.remove = gemini_ethernet_remove,
+};
+
+static int __init gemini_ethernet_module_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&gemini_ethernet_port_driver);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&gemini_ethernet_driver);
+	if (ret) {
+		platform_driver_unregister(&gemini_ethernet_port_driver);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(gemini_ethernet_module_init);
+
+static void __exit gemini_ethernet_module_exit(void)
+{
+	platform_driver_unregister(&gemini_ethernet_driver);
+	platform_driver_unregister(&gemini_ethernet_port_driver);
+}
+module_exit(gemini_ethernet_module_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
new file mode 100644
index 000000000000..0b12f89bf89a
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Register definitions for Gemini GMAC Ethernet device driver
+ *
+ * Copyright (C) 2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2010 Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Copytight (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ */
+#ifndef _GEMINI_ETHERNET_H
+#define _GEMINI_ETHERNET_H
+
+#include <linux/bitops.h>
+
+/* Base Registers */
+#define TOE_NONTOE_QUE_HDR_BASE		0x2000
+#define TOE_TOE_QUE_HDR_BASE		0x3000
+
+/* Queue ID */
+#define TOE_SW_FREE_QID			0x00
+#define TOE_HW_FREE_QID			0x01
+#define TOE_GMAC0_SW_TXQ0_QID		0x02
+#define TOE_GMAC0_SW_TXQ1_QID		0x03
+#define TOE_GMAC0_SW_TXQ2_QID		0x04
+#define TOE_GMAC0_SW_TXQ3_QID		0x05
+#define TOE_GMAC0_SW_TXQ4_QID		0x06
+#define TOE_GMAC0_SW_TXQ5_QID		0x07
+#define TOE_GMAC0_HW_TXQ0_QID		0x08
+#define TOE_GMAC0_HW_TXQ1_QID		0x09
+#define TOE_GMAC0_HW_TXQ2_QID		0x0A
+#define TOE_GMAC0_HW_TXQ3_QID		0x0B
+#define TOE_GMAC1_SW_TXQ0_QID		0x12
+#define TOE_GMAC1_SW_TXQ1_QID		0x13
+#define TOE_GMAC1_SW_TXQ2_QID		0x14
+#define TOE_GMAC1_SW_TXQ3_QID		0x15
+#define TOE_GMAC1_SW_TXQ4_QID		0x16
+#define TOE_GMAC1_SW_TXQ5_QID		0x17
+#define TOE_GMAC1_HW_TXQ0_QID		0x18
+#define TOE_GMAC1_HW_TXQ1_QID		0x19
+#define TOE_GMAC1_HW_TXQ2_QID		0x1A
+#define TOE_GMAC1_HW_TXQ3_QID		0x1B
+#define TOE_GMAC0_DEFAULT_QID		0x20
+#define TOE_GMAC1_DEFAULT_QID		0x21
+#define TOE_CLASSIFICATION_QID(x)	(0x22 + x)	/* 0x22 ~ 0x2F */
+#define TOE_TOE_QID(x)			(0x40 + x)	/* 0x40 ~ 0x7F */
+
+/* TOE DMA Queue Size should be 2^n, n = 6...12
+ * TOE DMA Queues are the following queue types:
+ *		SW Free Queue, HW Free Queue,
+ *		GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
+ * The base address and descriptor number are configured at
+ * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
+ */
+#define GET_WPTR(addr)			readw((addr) + 2)
+#define GET_RPTR(addr)			readw((addr))
+#define SET_WPTR(addr, data)		writew((data), (addr) + 2)
+#define SET_RPTR(addr, data)		writew((data), (addr))
+#define __RWPTR_NEXT(x, mask)		(((unsigned int)(x) + 1) & (mask))
+#define __RWPTR_PREV(x, mask)		(((unsigned int)(x) - 1) & (mask))
+#define __RWPTR_DISTANCE(r, w, mask)	(((unsigned int)(w) - (r)) & (mask))
+#define __RWPTR_MASK(order)		((1 << (order)) - 1)
+#define RWPTR_NEXT(x, order)		__RWPTR_NEXT((x), __RWPTR_MASK((order)))
+#define RWPTR_PREV(x, order)		__RWPTR_PREV((x), __RWPTR_MASK((order)))
+#define RWPTR_DISTANCE(r, w, order)	__RWPTR_DISTANCE((r), (w), \
+						__RWPTR_MASK((order)))
+
+/* Global registers */
+#define GLOBAL_TOE_VERSION_REG		0x0000
+#define GLOBAL_SW_FREEQ_BASE_SIZE_REG	0x0004
+#define GLOBAL_HW_FREEQ_BASE_SIZE_REG	0x0008
+#define GLOBAL_DMA_SKB_SIZE_REG		0x0010
+#define GLOBAL_SWFQ_RWPTR_REG		0x0014
+#define GLOBAL_HWFQ_RWPTR_REG		0x0018
+#define GLOBAL_INTERRUPT_STATUS_0_REG	0x0020
+#define GLOBAL_INTERRUPT_ENABLE_0_REG	0x0024
+#define GLOBAL_INTERRUPT_SELECT_0_REG	0x0028
+#define GLOBAL_INTERRUPT_STATUS_1_REG	0x0030
+#define GLOBAL_INTERRUPT_ENABLE_1_REG	0x0034
+#define GLOBAL_INTERRUPT_SELECT_1_REG	0x0038
+#define GLOBAL_INTERRUPT_STATUS_2_REG	0x0040
+#define GLOBAL_INTERRUPT_ENABLE_2_REG	0x0044
+#define GLOBAL_INTERRUPT_SELECT_2_REG	0x0048
+#define GLOBAL_INTERRUPT_STATUS_3_REG	0x0050
+#define GLOBAL_INTERRUPT_ENABLE_3_REG	0x0054
+#define GLOBAL_INTERRUPT_SELECT_3_REG	0x0058
+#define GLOBAL_INTERRUPT_STATUS_4_REG	0x0060
+#define GLOBAL_INTERRUPT_ENABLE_4_REG	0x0064
+#define GLOBAL_INTERRUPT_SELECT_4_REG	0x0068
+#define GLOBAL_HASH_TABLE_BASE_REG	0x006C
+#define GLOBAL_QUEUE_THRESHOLD_REG	0x0070
+
+/* GMAC 0/1 DMA/TOE register */
+#define GMAC_DMA_CTRL_REG		0x0000
+#define GMAC_TX_WEIGHTING_CTRL_0_REG	0x0004
+#define GMAC_TX_WEIGHTING_CTRL_1_REG	0x0008
+#define GMAC_SW_TX_QUEUE0_PTR_REG	0x000C
+#define GMAC_SW_TX_QUEUE1_PTR_REG	0x0010
+#define GMAC_SW_TX_QUEUE2_PTR_REG	0x0014
+#define GMAC_SW_TX_QUEUE3_PTR_REG	0x0018
+#define GMAC_SW_TX_QUEUE4_PTR_REG	0x001C
+#define GMAC_SW_TX_QUEUE5_PTR_REG	0x0020
+#define GMAC_SW_TX_QUEUE_PTR_REG(i)	(GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_HW_TX_QUEUE0_PTR_REG	0x0024
+#define GMAC_HW_TX_QUEUE1_PTR_REG	0x0028
+#define GMAC_HW_TX_QUEUE2_PTR_REG	0x002C
+#define GMAC_HW_TX_QUEUE3_PTR_REG	0x0030
+#define GMAC_HW_TX_QUEUE_PTR_REG(i)	(GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_DMA_TX_FIRST_DESC_REG	0x0038
+#define GMAC_DMA_TX_CURR_DESC_REG	0x003C
+#define GMAC_DMA_TX_DESC_WORD0_REG	0x0040
+#define GMAC_DMA_TX_DESC_WORD1_REG	0x0044
+#define GMAC_DMA_TX_DESC_WORD2_REG	0x0048
+#define GMAC_DMA_TX_DESC_WORD3_REG	0x004C
+#define GMAC_SW_TX_QUEUE_BASE_REG	0x0050
+#define GMAC_HW_TX_QUEUE_BASE_REG	0x0054
+#define GMAC_DMA_RX_FIRST_DESC_REG	0x0058
+#define GMAC_DMA_RX_CURR_DESC_REG	0x005C
+#define GMAC_DMA_RX_DESC_WORD0_REG	0x0060
+#define GMAC_DMA_RX_DESC_WORD1_REG	0x0064
+#define GMAC_DMA_RX_DESC_WORD2_REG	0x0068
+#define GMAC_DMA_RX_DESC_WORD3_REG	0x006C
+#define GMAC_HASH_ENGINE_REG0		0x0070
+#define GMAC_HASH_ENGINE_REG1		0x0074
+/* matching rule 0 Control register 0 */
+#define GMAC_MR0CR0			0x0078
+#define GMAC_MR0CR1			0x007C
+#define GMAC_MR0CR2			0x0080
+#define GMAC_MR1CR0			0x0084
+#define GMAC_MR1CR1			0x0088
+#define GMAC_MR1CR2			0x008C
+#define GMAC_MR2CR0			0x0090
+#define GMAC_MR2CR1			0x0094
+#define GMAC_MR2CR2			0x0098
+#define GMAC_MR3CR0			0x009C
+#define GMAC_MR3CR1			0x00A0
+#define GMAC_MR3CR2			0x00A4
+/* Support Protocol Register 0 */
+#define GMAC_SPR0			0x00A8
+#define GMAC_SPR1			0x00AC
+#define GMAC_SPR2			0x00B0
+#define GMAC_SPR3			0x00B4
+#define GMAC_SPR4			0x00B8
+#define GMAC_SPR5			0x00BC
+#define GMAC_SPR6			0x00C0
+#define GMAC_SPR7			0x00C4
+/* GMAC Hash/Rx/Tx AHB Weighting register */
+#define GMAC_AHB_WEIGHT_REG		0x00C8
+
+/* TOE GMAC 0/1 register */
+#define GMAC_STA_ADD0			0x0000
+#define GMAC_STA_ADD1			0x0004
+#define GMAC_STA_ADD2			0x0008
+#define GMAC_RX_FLTR			0x000c
+#define GMAC_MCAST_FIL0			0x0010
+#define GMAC_MCAST_FIL1			0x0014
+#define GMAC_CONFIG0			0x0018
+#define GMAC_CONFIG1			0x001c
+#define GMAC_CONFIG2			0x0020
+#define GMAC_CONFIG3			0x0024
+#define GMAC_RESERVED			0x0028
+#define GMAC_STATUS			0x002c
+#define GMAC_IN_DISCARDS		0x0030
+#define GMAC_IN_ERRORS			0x0034
+#define GMAC_IN_MCAST			0x0038
+#define GMAC_IN_BCAST			0x003c
+#define GMAC_IN_MAC1			0x0040	/* for STA 1 MAC Address */
+#define GMAC_IN_MAC2			0x0044	/* for STA 2 MAC Address */
+
+#define RX_STATS_NUM	6
+
+/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */
+union dma_q_base_size {
+	unsigned int bits32;
+	unsigned int base_size;
+};
+
+#define DMA_Q_BASE_MASK		(~0x0f)
+
+/* DMA SKB Buffer register (offset 0x0008) */
+union dma_skb_size {
+	unsigned int bits32;
+	struct bit_0008 {
+		unsigned int sw_skb_size : 16;	/* SW Free poll SKB Size */
+		unsigned int hw_skb_size : 16;	/* HW Free poll SKB Size */
+	} bits;
+};
+
+/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */
+union dma_rwptr {
+	unsigned int bits32;
+	struct bit_000c {
+		unsigned int rptr	: 16;	/* Read Ptr, RO */
+		unsigned int wptr	: 16;	/* Write Ptr, RW */
+	} bits;
+};
+
+/* Interrupt Status Register 0	(offset 0x0020)
+ * Interrupt Mask Register 0	(offset 0x0024)
+ * Interrupt Select Register 0	(offset 0x0028)
+ */
+#define GMAC1_TXDERR_INT_BIT		BIT(31)
+#define GMAC1_TXPERR_INT_BIT		BIT(30)
+#define GMAC0_TXDERR_INT_BIT		BIT(29)
+#define GMAC0_TXPERR_INT_BIT		BIT(28)
+#define GMAC1_RXDERR_INT_BIT		BIT(27)
+#define GMAC1_RXPERR_INT_BIT		BIT(26)
+#define GMAC0_RXDERR_INT_BIT		BIT(25)
+#define GMAC0_RXPERR_INT_BIT		BIT(24)
+#define GMAC1_SWTQ15_FIN_INT_BIT	BIT(23)
+#define GMAC1_SWTQ14_FIN_INT_BIT	BIT(22)
+#define GMAC1_SWTQ13_FIN_INT_BIT	BIT(21)
+#define GMAC1_SWTQ12_FIN_INT_BIT	BIT(20)
+#define GMAC1_SWTQ11_FIN_INT_BIT	BIT(19)
+#define GMAC1_SWTQ10_FIN_INT_BIT	BIT(18)
+#define GMAC0_SWTQ05_FIN_INT_BIT	BIT(17)
+#define GMAC0_SWTQ04_FIN_INT_BIT	BIT(16)
+#define GMAC0_SWTQ03_FIN_INT_BIT	BIT(15)
+#define GMAC0_SWTQ02_FIN_INT_BIT	BIT(14)
+#define GMAC0_SWTQ01_FIN_INT_BIT	BIT(13)
+#define GMAC0_SWTQ00_FIN_INT_BIT	BIT(12)
+#define GMAC1_SWTQ15_EOF_INT_BIT	BIT(11)
+#define GMAC1_SWTQ14_EOF_INT_BIT	BIT(10)
+#define GMAC1_SWTQ13_EOF_INT_BIT	BIT(9)
+#define GMAC1_SWTQ12_EOF_INT_BIT	BIT(8)
+#define GMAC1_SWTQ11_EOF_INT_BIT	BIT(7)
+#define GMAC1_SWTQ10_EOF_INT_BIT	BIT(6)
+#define GMAC0_SWTQ05_EOF_INT_BIT	BIT(5)
+#define GMAC0_SWTQ04_EOF_INT_BIT	BIT(4)
+#define GMAC0_SWTQ03_EOF_INT_BIT	BIT(3)
+#define GMAC0_SWTQ02_EOF_INT_BIT	BIT(2)
+#define GMAC0_SWTQ01_EOF_INT_BIT	BIT(1)
+#define GMAC0_SWTQ00_EOF_INT_BIT	BIT(0)
+
+/* Interrupt Status Register 1	(offset 0x0030)
+ * Interrupt Mask Register 1	(offset 0x0034)
+ * Interrupt Select Register 1	(offset 0x0038)
+ */
+#define TOE_IQ3_FULL_INT_BIT		BIT(31)
+#define TOE_IQ2_FULL_INT_BIT		BIT(30)
+#define TOE_IQ1_FULL_INT_BIT		BIT(29)
+#define TOE_IQ0_FULL_INT_BIT		BIT(28)
+#define TOE_IQ3_INT_BIT			BIT(27)
+#define TOE_IQ2_INT_BIT			BIT(26)
+#define TOE_IQ1_INT_BIT			BIT(25)
+#define TOE_IQ0_INT_BIT			BIT(24)
+#define GMAC1_HWTQ13_EOF_INT_BIT	BIT(23)
+#define GMAC1_HWTQ12_EOF_INT_BIT	BIT(22)
+#define GMAC1_HWTQ11_EOF_INT_BIT	BIT(21)
+#define GMAC1_HWTQ10_EOF_INT_BIT	BIT(20)
+#define GMAC0_HWTQ03_EOF_INT_BIT	BIT(19)
+#define GMAC0_HWTQ02_EOF_INT_BIT	BIT(18)
+#define GMAC0_HWTQ01_EOF_INT_BIT	BIT(17)
+#define GMAC0_HWTQ00_EOF_INT_BIT	BIT(16)
+#define CLASS_RX_INT_BIT(x)		BIT((x + 2))
+#define DEFAULT_Q1_INT_BIT		BIT(1)
+#define DEFAULT_Q0_INT_BIT		BIT(0)
+
+#define TOE_IQ_INT_BITS		(TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
+				 TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
+#define	TOE_IQ_FULL_BITS	(TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
+				 TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
+#define	TOE_IQ_ALL_BITS		(TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
+#define TOE_CLASS_RX_INT_BITS	0xfffc
+
+/* Interrupt Status Register 2	(offset 0x0040)
+ * Interrupt Mask Register 2	(offset 0x0044)
+ * Interrupt Select Register 2	(offset 0x0048)
+ */
+#define TOE_QL_FULL_INT_BIT(x)		BIT(x)
+
+/* Interrupt Status Register 3	(offset 0x0050)
+ * Interrupt Mask Register 3	(offset 0x0054)
+ * Interrupt Select Register 3	(offset 0x0058)
+ */
+#define TOE_QH_FULL_INT_BIT(x)		BIT(x - 32)
+
+/* Interrupt Status Register 4	(offset 0x0060)
+ * Interrupt Mask Register 4	(offset 0x0064)
+ * Interrupt Select Register 4	(offset 0x0068)
+ */
+#define GMAC1_RESERVED_INT_BIT		BIT(31)
+#define GMAC1_MIB_INT_BIT		BIT(30)
+#define GMAC1_RX_PAUSE_ON_INT_BIT	BIT(29)
+#define GMAC1_TX_PAUSE_ON_INT_BIT	BIT(28)
+#define GMAC1_RX_PAUSE_OFF_INT_BIT	BIT(27)
+#define GMAC1_TX_PAUSE_OFF_INT_BIT	BIT(26)
+#define GMAC1_RX_OVERRUN_INT_BIT	BIT(25)
+#define GMAC1_STATUS_CHANGE_INT_BIT	BIT(24)
+#define GMAC0_RESERVED_INT_BIT		BIT(23)
+#define GMAC0_MIB_INT_BIT		BIT(22)
+#define GMAC0_RX_PAUSE_ON_INT_BIT	BIT(21)
+#define GMAC0_TX_PAUSE_ON_INT_BIT	BIT(20)
+#define GMAC0_RX_PAUSE_OFF_INT_BIT	BIT(19)
+#define GMAC0_TX_PAUSE_OFF_INT_BIT	BIT(18)
+#define GMAC0_RX_OVERRUN_INT_BIT	BIT(17)
+#define GMAC0_STATUS_CHANGE_INT_BIT	BIT(16)
+#define CLASS_RX_FULL_INT_BIT(x)	BIT(x + 2)
+#define HWFQ_EMPTY_INT_BIT		BIT(1)
+#define SWFQ_EMPTY_INT_BIT		BIT(0)
+
+#define GMAC0_INT_BITS	(GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
+			 GMAC0_RX_PAUSE_ON_INT_BIT | \
+			 GMAC0_TX_PAUSE_ON_INT_BIT | \
+			 GMAC0_RX_PAUSE_OFF_INT_BIT | \
+			 GMAC0_TX_PAUSE_OFF_INT_BIT | \
+			 GMAC0_RX_OVERRUN_INT_BIT | \
+			 GMAC0_STATUS_CHANGE_INT_BIT)
+#define GMAC1_INT_BITS	(GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
+			 GMAC1_RX_PAUSE_ON_INT_BIT | \
+			 GMAC1_TX_PAUSE_ON_INT_BIT | \
+			 GMAC1_RX_PAUSE_OFF_INT_BIT | \
+			 GMAC1_TX_PAUSE_OFF_INT_BIT | \
+			 GMAC1_RX_OVERRUN_INT_BIT | \
+			 GMAC1_STATUS_CHANGE_INT_BIT)
+
+#define CLASS_RX_FULL_INT_BITS		0xfffc
+
+/* GLOBAL_QUEUE_THRESHOLD_REG	(offset 0x0070) */
+union queue_threshold {
+	unsigned int bits32;
+	struct bit_0070_2 {
+		/*  7:0 Software Free Queue Empty Threshold */
+		unsigned int swfq_empty:8;
+		/* 15:8 Hardware Free Queue Empty Threshold */
+		unsigned int hwfq_empty:8;
+		/* 23:16 */
+		unsigned int intrq:8;
+		/* 31:24 */
+		unsigned int toe_class:8;
+	} bits;
+};
+
+/* GMAC DMA Control Register
+ * GMAC0 offset 0x8000
+ * GMAC1 offset 0xC000
+ */
+union gmac_dma_ctrl {
+	unsigned int bits32;
+	struct bit_8000 {
+		/* bit 1:0 Peripheral Bus Width */
+		unsigned int td_bus:2;
+		/* bit 3:2 TxDMA max burst size for every AHB request */
+		unsigned int td_burst_size:2;
+		/* bit 7:4 TxDMA protection control */
+		unsigned int td_prot:4;
+		/* bit 9:8 Peripheral Bus Width */
+		unsigned int rd_bus:2;
+		/* bit 11:10 DMA max burst size for every AHB request */
+		unsigned int rd_burst_size:2;
+		/* bit 15:12 DMA Protection Control */
+		unsigned int rd_prot:4;
+		/* bit 17:16 */
+		unsigned int rd_insert_bytes:2;
+		/* bit 27:18 */
+		unsigned int reserved:10;
+		/* bit 28 1: Drop, 0: Accept */
+		unsigned int drop_small_ack:1;
+		/* bit 29 Loopback TxDMA to RxDMA */
+		unsigned int loopback:1;
+		/* bit 30 Tx DMA Enable */
+		unsigned int td_enable:1;
+		/* bit 31 Rx DMA Enable */
+		unsigned int rd_enable:1;
+	} bits;
+};
+
+/* GMAC Tx Weighting Control Register 0
+ * GMAC0 offset 0x8004
+ * GMAC1 offset 0xC004
+ */
+union gmac_tx_wcr0 {
+	unsigned int bits32;
+	struct bit_8004 {
+		/* bit 5:0 HW TX Queue 3 */
+		unsigned int hw_tq0:6;
+		/* bit 11:6 HW TX Queue 2 */
+		unsigned int hw_tq1:6;
+		/* bit 17:12 HW TX Queue 1 */
+		unsigned int hw_tq2:6;
+		/* bit 23:18 HW TX Queue 0 */
+		unsigned int hw_tq3:6;
+		/* bit 31:24 */
+		unsigned int reserved:8;
+	} bits;
+};
+
+/* GMAC Tx Weighting Control Register 1
+ * GMAC0 offset 0x8008
+ * GMAC1 offset 0xC008
+ */
+union gmac_tx_wcr1 {
+	unsigned int bits32;
+	struct bit_8008 {
+		/* bit 4:0 SW TX Queue 0 */
+		unsigned int sw_tq0:5;
+		/* bit 9:5 SW TX Queue 1 */
+		unsigned int sw_tq1:5;
+		/* bit 14:10 SW TX Queue 2 */
+		unsigned int sw_tq2:5;
+		/* bit 19:15 SW TX Queue 3 */
+		unsigned int sw_tq3:5;
+		/* bit 24:20 SW TX Queue 4 */
+		unsigned int sw_tq4:5;
+		/* bit 29:25 SW TX Queue 5 */
+		unsigned int sw_tq5:5;
+		/* bit 31:30 */
+		unsigned int reserved:2;
+	} bits;
+};
+
+/* GMAC DMA Tx Description Word 0 Register
+ * GMAC0 offset 0x8040
+ * GMAC1 offset 0xC040
+ */
+union gmac_txdesc_0 {
+	unsigned int bits32;
+	struct bit_8040 {
+		/* bit 15:0 Transfer size */
+		unsigned int buffer_size:16;
+		/* bit 21:16 number of descriptors used for the current frame */
+		unsigned int desc_count:6;
+		/* bit 22 Tx Status, 1: Successful 0: Failed */
+		unsigned int status_tx_ok:1;
+		/* bit 28:23 Tx Status, Reserved bits */
+		unsigned int status_rvd:6;
+		/* bit 29 protocol error during processing this descriptor */
+		unsigned int perr:1;
+		/* bit 30 data error during processing this descriptor */
+		unsigned int derr:1;
+		/* bit 31 */
+		unsigned int reserved:1;
+	} bits;
+};
+
+/* GMAC DMA Tx Description Word 1 Register
+ * GMAC0 offset 0x8044
+ * GMAC1 offset 0xC044
+ */
+union gmac_txdesc_1 {
+	unsigned int bits32;
+	struct txdesc_word1 {
+		/* bit 15: 0 Tx Frame Byte Count */
+		unsigned int byte_count:16;
+		/* bit 16 TSS segmentation use MTU setting */
+		unsigned int mtu_enable:1;
+		/* bit 17 IPV4 Header Checksum Enable */
+		unsigned int ip_chksum:1;
+		/* bit 18 IPV6 Tx Enable */
+		unsigned int ipv6_enable:1;
+		/* bit 19 TCP Checksum Enable */
+		unsigned int tcp_chksum:1;
+		/* bit 20 UDP Checksum Enable */
+		unsigned int udp_chksum:1;
+		/* bit 21 Bypass HW offload engine */
+		unsigned int bypass_tss:1;
+		/* bit 22 Don't update IP length field */
+		unsigned int ip_fixed_len:1;
+		/* bit 31:23 Tx Flag, Reserved */
+		unsigned int reserved:9;
+	} bits;
+};
+
+#define TSS_IP_FIXED_LEN_BIT	BIT(22)
+#define TSS_BYPASS_BIT		BIT(21)
+#define TSS_UDP_CHKSUM_BIT	BIT(20)
+#define TSS_TCP_CHKSUM_BIT	BIT(19)
+#define TSS_IPV6_ENABLE_BIT	BIT(18)
+#define TSS_IP_CHKSUM_BIT	BIT(17)
+#define TSS_MTU_ENABLE_BIT	BIT(16)
+
+#define TSS_CHECKUM_ENABLE	\
+	(TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \
+	 TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT)
+
+/* GMAC DMA Tx Description Word 2 Register
+ * GMAC0 offset 0x8048
+ * GMAC1 offset 0xC048
+ */
+union gmac_txdesc_2 {
+	unsigned int	bits32;
+	unsigned int	buf_adr;
+};
+
+/* GMAC DMA Tx Description Word 3 Register
+ * GMAC0 offset 0x804C
+ * GMAC1 offset 0xC04C
+ */
+union gmac_txdesc_3 {
+	unsigned int bits32;
+	struct txdesc_word3 {
+		/* bit 12: 0 Tx Frame Byte Count */
+		unsigned int mtu_size:13;
+		/* bit 28:13 */
+		unsigned int reserved:16;
+		/* bit 29 End of frame interrupt enable */
+		unsigned int eofie:1;
+		/* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+		unsigned int sof_eof:2;
+	} bits;
+};
+
+#define SOF_EOF_BIT_MASK	0x3fffffff
+#define SOF_BIT			0x80000000
+#define EOF_BIT			0x40000000
+#define EOFIE_BIT		BIT(29)
+#define MTU_SIZE_BIT_MASK	0x1fff
+
+/* GMAC Tx Descriptor */
+struct gmac_txdesc {
+	union gmac_txdesc_0 word0;
+	union gmac_txdesc_1 word1;
+	union gmac_txdesc_2 word2;
+	union gmac_txdesc_3 word3;
+};
+
+/* GMAC DMA Rx Description Word 0 Register
+ * GMAC0 offset 0x8060
+ * GMAC1 offset 0xC060
+ */
+union gmac_rxdesc_0 {
+	unsigned int bits32;
+	struct bit_8060 {
+		/* bit 15:0 number of descriptors used for the current frame */
+		unsigned int buffer_size:16;
+		/* bit 21:16 number of descriptors used for the current frame */
+		unsigned int desc_count:6;
+		/* bit 24:22 Status of rx frame */
+		unsigned int status:4;
+		/* bit 28:26 Check Sum Status */
+		unsigned int chksum_status:3;
+		/* bit 29 protocol error during processing this descriptor */
+		unsigned int perr:1;
+		/* bit 30 data error during processing this descriptor */
+		unsigned int derr:1;
+		/* bit 31 TOE/CIS Queue Full dropped packet to default queue */
+		unsigned int drop:1;
+	} bits;
+};
+
+#define	GMAC_RXDESC_0_T_derr			BIT(30)
+#define	GMAC_RXDESC_0_T_perr			BIT(29)
+#define	GMAC_RXDESC_0_T_chksum_status(x)	BIT(x + 26)
+#define	GMAC_RXDESC_0_T_status(x)		BIT(x + 22)
+#define	GMAC_RXDESC_0_T_desc_count(x)		BIT(x + 16)
+
+#define	RX_CHKSUM_IP_UDP_TCP_OK			0
+#define	RX_CHKSUM_IP_OK_ONLY			1
+#define	RX_CHKSUM_NONE				2
+#define	RX_CHKSUM_IP_ERR_UNKNOWN		4
+#define	RX_CHKSUM_IP_ERR			5
+#define	RX_CHKSUM_TCP_UDP_ERR			6
+#define RX_CHKSUM_NUM				8
+
+#define RX_STATUS_GOOD_FRAME			0
+#define RX_STATUS_TOO_LONG_GOOD_CRC		1
+#define RX_STATUS_RUNT_FRAME			2
+#define RX_STATUS_SFD_NOT_FOUND			3
+#define RX_STATUS_CRC_ERROR			4
+#define RX_STATUS_TOO_LONG_BAD_CRC		5
+#define RX_STATUS_ALIGNMENT_ERROR		6
+#define RX_STATUS_TOO_LONG_BAD_ALIGN		7
+#define RX_STATUS_RX_ERR			8
+#define RX_STATUS_DA_FILTERED			9
+#define RX_STATUS_BUFFER_FULL			10
+#define RX_STATUS_NUM				16
+
+#define RX_ERROR_LENGTH(s) \
+	((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \
+	 (s) == RX_STATUS_TOO_LONG_BAD_CRC || \
+	 (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_OVER(s) \
+	((s) == RX_STATUS_BUFFER_FULL)
+#define RX_ERROR_CRC(s) \
+	((s) == RX_STATUS_CRC_ERROR || \
+	 (s) == RX_STATUS_TOO_LONG_BAD_CRC)
+#define RX_ERROR_FRAME(s) \
+	((s) == RX_STATUS_ALIGNMENT_ERROR || \
+	 (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_FIFO(s) \
+	(0)
+
+/* GMAC DMA Rx Description Word 1 Register
+ * GMAC0 offset 0x8064
+ * GMAC1 offset 0xC064
+ */
+union gmac_rxdesc_1 {
+	unsigned int bits32;
+	struct rxdesc_word1 {
+		/* bit 15: 0 Rx Frame Byte Count */
+		unsigned int byte_count:16;
+		/* bit 31:16 Software ID */
+		unsigned int sw_id:16;
+	} bits;
+};
+
+/* GMAC DMA Rx Description Word 2 Register
+ * GMAC0 offset 0x8068
+ * GMAC1 offset 0xC068
+ */
+union gmac_rxdesc_2 {
+	unsigned int	bits32;
+	unsigned int	buf_adr;
+};
+
+#define RX_INSERT_NONE		0
+#define RX_INSERT_1_BYTE	1
+#define RX_INSERT_2_BYTE	2
+#define RX_INSERT_3_BYTE	3
+
+/* GMAC DMA Rx Description Word 3 Register
+ * GMAC0 offset 0x806C
+ * GMAC1 offset 0xC06C
+ */
+union gmac_rxdesc_3 {
+	unsigned int bits32;
+	struct rxdesc_word3 {
+		/* bit 7: 0 L3 data offset */
+		unsigned int l3_offset:8;
+		/* bit 15: 8 L4 data offset */
+		unsigned int l4_offset:8;
+		/* bit 23: 16 L7 data offset */
+		unsigned int l7_offset:8;
+		/* bit 24 Duplicated ACK detected */
+		unsigned int dup_ack:1;
+		/* bit 25 abnormal case found */
+		unsigned int abnormal:1;
+		/* bit 26 IPV4 option or IPV6 extension header */
+		unsigned int option:1;
+		/* bit 27 Out of Sequence packet */
+		unsigned int out_of_seq:1;
+		/* bit 28 Control Flag is present */
+		unsigned int ctrl_flag:1;
+		/* bit 29 End of frame interrupt enable */
+		unsigned int eofie:1;
+		/* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+		unsigned int sof_eof:2;
+	} bits;
+};
+
+/* GMAC Rx Descriptor, this is simply fitted over the queue registers */
+struct gmac_rxdesc {
+	union gmac_rxdesc_0 word0;
+	union gmac_rxdesc_1 word1;
+	union gmac_rxdesc_2 word2;
+	union gmac_rxdesc_3 word3;
+};
+
+/* GMAC Matching Rule Control Register 0
+ * GMAC0 offset 0x8078
+ * GMAC1 offset 0xC078
+ */
+#define MR_L2_BIT		BIT(31)
+#define MR_L3_BIT		BIT(30)
+#define MR_L4_BIT		BIT(29)
+#define MR_L7_BIT		BIT(28)
+#define MR_PORT_BIT		BIT(27)
+#define MR_PRIORITY_BIT		BIT(26)
+#define MR_DA_BIT		BIT(23)
+#define MR_SA_BIT		BIT(22)
+#define MR_ETHER_TYPE_BIT	BIT(21)
+#define MR_VLAN_BIT		BIT(20)
+#define MR_PPPOE_BIT		BIT(19)
+#define MR_IP_VER_BIT		BIT(15)
+#define MR_IP_HDR_LEN_BIT	BIT(14)
+#define MR_FLOW_LABLE_BIT	BIT(13)
+#define MR_TOS_TRAFFIC_BIT	BIT(12)
+#define MR_SPR_BIT(x)		BIT(x)
+#define MR_SPR_BITS		0xff
+
+/* GMAC_AHB_WEIGHT registers
+ * GMAC0 offset 0x80C8
+ * GMAC1 offset 0xC0C8
+ */
+union gmac_ahb_weight {
+	unsigned int bits32;
+	struct bit_80C8 {
+		/* 4:0 */
+		unsigned int hash_weight:5;
+		/* 9:5 */
+		unsigned int rx_weight:5;
+		/* 14:10 */
+		unsigned int tx_weight:5;
+		/* 19:15 Rx Data Pre Request FIFO Threshold */
+		unsigned int pre_req:5;
+		/* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */
+		unsigned int tq_dv_threshold:5;
+		/* 31:25 */
+		unsigned int reserved:7;
+	} bits;
+};
+
+/* GMAC RX FLTR
+ * GMAC0 Offset 0xA00C
+ * GMAC1 Offset 0xE00C
+ */
+union gmac_rx_fltr {
+	unsigned int bits32;
+	struct bit1_000c {
+		/* Enable receive of unicast frames that are sent to STA
+		 * address
+		 */
+		unsigned int unicast:1;
+		/* Enable receive of multicast frames that pass multicast
+		 * filter
+		 */
+		unsigned int multicast:1;
+		/* Enable receive of broadcast frames */
+		unsigned int broadcast:1;
+		/* Enable receive of all frames */
+		unsigned int promiscuous:1;
+		/* Enable receive of all error frames */
+		unsigned int error:1;
+		unsigned int reserved:27;
+	} bits;
+};
+
+/* GMAC Configuration 0
+ * GMAC0 Offset 0xA018
+ * GMAC1 Offset 0xE018
+ */
+union gmac_config0 {
+	unsigned int bits32;
+	struct bit1_0018 {
+		/* 0: disable transmit */
+		unsigned int dis_tx:1;
+		/* 1: disable receive */
+		unsigned int dis_rx:1;
+		/* 2: transmit data loopback enable */
+		unsigned int loop_back:1;
+		/* 3: flow control also trigged by Rx queues */
+		unsigned int flow_ctrl:1;
+		/* 4-7: adjust IFG from 96+/-56 */
+		unsigned int adj_ifg:4;
+		/* 8-10 maximum receive frame length allowed */
+		unsigned int max_len:3;
+		/* 11: disable back-off function */
+		unsigned int dis_bkoff:1;
+		/* 12: disable 16 collisions abort function */
+		unsigned int dis_col:1;
+		/* 13: speed up timers in simulation */
+		unsigned int sim_test:1;
+		/* 14: RX flow control enable */
+		unsigned int rx_fc_en:1;
+		/* 15: TX flow control enable */
+		unsigned int tx_fc_en:1;
+		/* 16: RGMII in-band status enable */
+		unsigned int rgmii_en:1;
+		/* 17: IPv4 RX Checksum enable */
+		unsigned int ipv4_rx_chksum:1;
+		/* 18: IPv6 RX Checksum enable */
+		unsigned int ipv6_rx_chksum:1;
+		/* 19: Remove Rx VLAN tag */
+		unsigned int rx_tag_remove:1;
+		/* 20 */
+		unsigned int rgmm_edge:1;
+		/* 21 */
+		unsigned int rxc_inv:1;
+		/* 22 */
+		unsigned int ipv6_exthdr_order:1;
+		/* 23 */
+		unsigned int rx_err_detect:1;
+		/* 24 */
+		unsigned int port0_chk_hwq:1;
+		/* 25 */
+		unsigned int port1_chk_hwq:1;
+		/* 26 */
+		unsigned int port0_chk_toeq:1;
+		/* 27 */
+		unsigned int port1_chk_toeq:1;
+		/* 28 */
+		unsigned int port0_chk_classq:1;
+		/* 29 */
+		unsigned int port1_chk_classq:1;
+		/* 30, 31 */
+		unsigned int reserved:2;
+	} bits;
+};
+
+#define CONFIG0_TX_RX_DISABLE	(BIT(1) | BIT(0))
+#define CONFIG0_RX_CHKSUM	(BIT(18) | BIT(17))
+#define CONFIG0_FLOW_RX		BIT(14)
+#define CONFIG0_FLOW_TX		BIT(15)
+#define CONFIG0_FLOW_TX_RX	(BIT(14) | BIT(15))
+#define CONFIG0_FLOW_CTL	(BIT(14) | BIT(15))
+
+#define CONFIG0_MAXLEN_SHIFT	8
+#define CONFIG0_MAXLEN_MASK	(7 << CONFIG0_MAXLEN_SHIFT)
+#define  CONFIG0_MAXLEN_1536	0
+#define  CONFIG0_MAXLEN_1518	1
+#define  CONFIG0_MAXLEN_1522	2
+#define  CONFIG0_MAXLEN_1542	3
+#define  CONFIG0_MAXLEN_9k	4	/* 9212 */
+#define  CONFIG0_MAXLEN_10k	5	/* 10236 */
+#define  CONFIG0_MAXLEN_1518__6	6
+#define  CONFIG0_MAXLEN_1518__7	7
+
+/* GMAC Configuration 1
+ * GMAC0 Offset 0xA01C
+ * GMAC1 Offset 0xE01C
+ */
+union gmac_config1 {
+	unsigned int bits32;
+	struct bit1_001c {
+		/* Flow control set threshold */
+		unsigned int set_threshold:8;
+		/* Flow control release threshold */
+		unsigned int rel_threshold:8;
+		unsigned int reserved:16;
+	} bits;
+};
+
+#define GMAC_FLOWCTRL_SET_MAX		32
+#define GMAC_FLOWCTRL_SET_MIN		0
+#define GMAC_FLOWCTRL_RELEASE_MAX	32
+#define GMAC_FLOWCTRL_RELEASE_MIN	0
+
+/* GMAC Configuration 2
+ * GMAC0 Offset 0xA020
+ * GMAC1 Offset 0xE020
+ */
+union gmac_config2 {
+	unsigned int bits32;
+	struct bit1_0020 {
+		/* Flow control set threshold */
+		unsigned int set_threshold:16;
+		/* Flow control release threshold */
+		unsigned int rel_threshold:16;
+	} bits;
+};
+
+/* GMAC Configuration 3
+ * GMAC0 Offset 0xA024
+ * GMAC1 Offset 0xE024
+ */
+union gmac_config3 {
+	unsigned int bits32;
+	struct bit1_0024 {
+		/* Flow control set threshold */
+		unsigned int set_threshold:16;
+		/* Flow control release threshold */
+		unsigned int rel_threshold:16;
+	} bits;
+};
+
+/* GMAC STATUS
+ * GMAC0 Offset 0xA02C
+ * GMAC1 Offset 0xE02C
+ */
+union gmac_status {
+	unsigned int bits32;
+	struct bit1_002c {
+		/* Link status */
+		unsigned int link:1;
+		/* Link speed(00->2.5M 01->25M 10->125M) */
+		unsigned int speed:2;
+		/* Duplex mode */
+		unsigned int duplex:1;
+		unsigned int reserved_1:1;
+		/* PHY interface type */
+		unsigned int mii_rmii:2;
+		unsigned int reserved_2:25;
+	} bits;
+};
+
+#define GMAC_SPEED_10			0
+#define GMAC_SPEED_100			1
+#define GMAC_SPEED_1000			2
+
+#define GMAC_PHY_MII			0
+#define GMAC_PHY_GMII			1
+#define GMAC_PHY_RGMII_100_10		2
+#define GMAC_PHY_RGMII_1000		3
+
+/* Queue Header
+ *	(1) TOE Queue Header
+ *	(2) Non-TOE Queue Header
+ *	(3) Interrupt Queue Header
+ *
+ * memory Layout
+ *	TOE Queue Header
+ *		     0x60003000 +---------------------------+ 0x0000
+ *				|     TOE Queue 0 Header    |
+ *				|         8 * 4 Bytes	    |
+ *				+---------------------------+ 0x0020
+ *				|     TOE Queue 1 Header    |
+ *				|         8 * 4 Bytes	    |
+ *				+---------------------------+ 0x0040
+ *				|          ......           |
+ *				|                           |
+ *				+---------------------------+
+ *
+ *	Non TOE Queue Header
+ *		     0x60002000 +---------------------------+ 0x0000
+ *				|   Default Queue 0 Header  |
+ *				|         2 * 4 Bytes       |
+ *				+---------------------------+ 0x0008
+ *				|   Default Queue 1 Header  |
+ *				|         2 * 4 Bytes       |
+ *				+---------------------------+ 0x0010
+ *				|   Classification Queue 0  |
+ *				|	  2 * 4 Bytes       |
+ *				+---------------------------+
+ *				|   Classification Queue 1  |
+ *				|	  2 * 4 Bytes       |
+ *				+---------------------------+ (n * 8 + 0x10)
+ *				|		...	    |
+ *				|	  2 * 4 Bytes	    |
+ *				+---------------------------+ (13 * 8 + 0x10)
+ *				|   Classification Queue 13 |
+ *				|	  2 * 4 Bytes	    |
+ *				+---------------------------+ 0x80
+ *				|      Interrupt Queue 0    |
+ *				|	  2 * 4 Bytes	    |
+ *				+---------------------------+
+ *				|      Interrupt Queue 1    |
+ *				|	  2 * 4 Bytes	    |
+ *				+---------------------------+
+ *				|      Interrupt Queue 2    |
+ *				|	  2 * 4 Bytes	    |
+ *				+---------------------------+
+ *				|      Interrupt Queue 3    |
+ *				|	  2 * 4 Bytes	    |
+ *				+---------------------------+
+ *
+ */
+#define TOE_QUEUE_HDR_ADDR(n)	(TOE_TOE_QUE_HDR_BASE + n * 32)
+#define TOE_Q_HDR_AREA_END	(TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1))
+#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x))
+#define TOE_CLASS_Q_HDR_BASE	(TOE_NONTOE_QUE_HDR_BASE + 0x10)
+#define TOE_INTR_Q_HDR_BASE	(TOE_NONTOE_QUE_HDR_BASE + 0x80)
+#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8)
+#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1))
+
+/* NONTOE Queue Header Word 0 */
+union nontoe_qhdr0 {
+	unsigned int bits32;
+	unsigned int base_size;
+};
+
+#define NONTOE_QHDR0_BASE_MASK	(~0x0f)
+
+/* NONTOE Queue Header Word 1 */
+union nontoe_qhdr1 {
+	unsigned int bits32;
+	struct bit_nonqhdr1 {
+		/* bit 15:0 */
+		unsigned int rptr:16;
+		/* bit 31:16 */
+		unsigned int wptr:16;
+	} bits;
+};
+
+/* Non-TOE Queue Header */
+struct nontoe_qhdr {
+	union nontoe_qhdr0 word0;
+	union nontoe_qhdr1 word1;
+};
+
+#endif /* _GEMINI_ETHERNET_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 02dd5246dfae..1a49297224ed 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
 {
 	int i;
-	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+	int num_entries = ARRAY_SIZE(cmd_priv_map);
 	u32 cmd_privileges = adapter->cmd_privileges;
 
 	for (i = 0; i < num_entries; i++)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e180657a02ef..c36c81959198 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,8 +4634,7 @@ int be_update_queues(struct be_adapter *adapter)
 
 	be_schedule_worker(adapter);
 
-	/*
-	 * The IF was destroyed and re-created. We need to clear
+	/* The IF was destroyed and re-created. We need to clear
 	 * all promiscuous flags valid for the destroyed IF.
 	 * Without this promisc mode is not restored during
 	 * be_open() because the driver thinks that it is
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5385074b3b7d..e7381f8ef89d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -20,7 +20,8 @@
 #include <linux/timecounter.h>
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+    defined(CONFIG_ARM64)
 /*
  *	Just figures, Motorola would have to change the offsets for
  *	registers in the same peripheral device on different models
@@ -195,7 +196,7 @@
  *	Evidently, ARM SoCs have the FEC block generated in a
  *	little endian mode so adjust endianness accordingly.
  */
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 #define fec32_to_cpu le32_to_cpu
 #define fec16_to_cpu le16_to_cpu
 #define cpu_to_fec32 cpu_to_le32
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a74300a4459c..7a7f3a42b2aa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -195,7 +195,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+    defined(CONFIG_ARM64)
 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
 #else
 #define	OPT_FRAME_SIZE	0
@@ -1868,6 +1869,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 		ret = clk_prepare_enable(fep->clk_ref);
 		if (ret)
 			goto failed_clk_ref;
+
+		phy_reset_after_clk_enable(ndev->phydev);
 	} else {
 		clk_disable_unprepare(fep->clk_ahb);
 		clk_disable_unprepare(fep->clk_enet_out);
@@ -2107,7 +2110,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
 
 /* List of registers that can be safety be read to dump them with ethtool */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+	defined(CONFIG_ARM64)
 static u32 fec_enet_register_offset[] = {
 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2840,6 +2844,7 @@ fec_enet_open(struct net_device *ndev)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	int ret;
+	bool reset_again;
 
 	ret = pm_runtime_get_sync(&fep->pdev->dev);
 	if (ret < 0)
@@ -2850,6 +2855,17 @@ fec_enet_open(struct net_device *ndev)
 	if (ret)
 		goto clk_enable;
 
+	/* During the first fec_enet_open call the PHY isn't probed at this
+	 * point. Therefore the phy_reset_after_clk_enable() call within
+	 * fec_enet_clk_enable() fails. As we need this reset in order to be
+	 * sure the PHY is working correctly we check if we need to reset again
+	 * later when the PHY is probed
+	 */
+	if (ndev->phydev && ndev->phydev->drv)
+		reset_again = false;
+	else
+		reset_again = true;
+
 	/* I should reset the ring buffers here, but I don't yet know
 	 * a simple way to do that.
 	 */
@@ -2866,6 +2882,12 @@ fec_enet_open(struct net_device *ndev)
 	if (ret)
 		goto err_enet_mii_probe;
 
+	/* Call phy_reset_after_clk_enable() again if it failed during
+	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+	 */
+	if (reset_again)
+		phy_reset_after_clk_enable(ndev->phydev);
+
 	if (fep->quirks & FEC_QUIRK_ERR006687)
 		imx6q_cpuidle_fec_irqs_used();
 
@@ -3119,7 +3141,7 @@ static int fec_enet_init(struct net_device *ndev)
 	unsigned dsize_log2 = __fls(dsize);
 
 	WARN_ON(dsize != (1 << dsize_log2));
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 	fep->rx_align = 0xf;
 	fep->tx_align = 0xf;
 #else
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7f837006bb6a..3bdeb295514b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2932,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 			     struct sk_buff *skb, bool first)
 {
-	unsigned int size = lstatus & BD_LENGTH_MASK;
+	int size = lstatus & BD_LENGTH_MASK;
 	struct page *page = rxb->page;
 	bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
 
@@ -2947,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 		if (last)
 			size -= skb->len;
 
-		/* in case the last fragment consisted only of the FCS */
+		/* Add the last fragment if it contains something other than
+		 * the FCS, otherwise drop it and trim off any part of the FCS
+		 * that was already received.
+		 */
 		if (size > 0)
 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 					rxb->page_offset + RXBUF_ALIGNMENT,
 					size, GFAR_RXB_TRUESIZE);
+		else if (size < 0)
+			pskb_trim(skb, skb->len + size);
 	}
 
 	/* try reuse page */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 30000b6aa7b8..8bcf470ff5f3 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -94,15 +94,6 @@ config HNS3_HCLGE
 	  compatibility layer. The engine would be used in Hisilicon hip08 family of
 	  SoCs and further upcoming SoCs.
 
-config HNS3_ENET
-	tristate "Hisilicon HNS3 Ethernet Device Support"
-	depends on 64BIT && PCI
-	depends on HNS3 && HNS3_HCLGE
-	---help---
-	  This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
-	  family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
-	  devices and their associated operations.
-
 config HNS3_DCB
 	bool "Hisilicon HNS3 Data Center Bridge Support"
 	default n
@@ -112,4 +103,23 @@ config HNS3_DCB
 
 	  If unsure, say N.
 
+config HNS3_HCLGEVF
+    tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+    depends on PCI_MSI
+    depends on HNS3
+	depends on HNS3_HCLGE
+    ---help---
+	  This selects the HNS3 VF drivers network acceleration engine & its hardware
+	  compatibility layer. The engine would be used in Hisilicon hip08 family of
+	  SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+	tristate "Hisilicon HNS3 Ethernet Device Support"
+	depends on 64BIT && PCI
+	depends on HNS3
+	---help---
+	  This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+	  family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+	  devices and their associated operations.
+
 endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8b5cdf490850..cac86e9ae0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
 int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
 			enum hnae_led_state status)
 {
-	if (!mac_cb || !mac_cb->cpld_ctrl)
+	if (!mac_cb)
 		return 0;
 
 	return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 408b63faf9a8..acf29633ec79 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -18,6 +18,7 @@ enum _dsm_op_index {
 	HNS_OP_LED_SET_FUNC             = 0x3,
 	HNS_OP_GET_PORT_TYPE_FUNC       = 0x4,
 	HNS_OP_GET_SFP_STAT_FUNC        = 0x5,
+	HNS_OP_LOCATE_LED_SET_FUNC      = 0x6,
 };
 
 enum _dsm_rst_type {
@@ -43,12 +44,17 @@ static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
 
 static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
 {
-	u32 ret;
-
-	if (dsaf_dev->sub_ctrl)
-		ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
-	else
+	u32 ret = 0;
+	int err;
+
+	if (dsaf_dev->sub_ctrl) {
+		err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+		if (err)
+			dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+				err);
+	} else {
 		ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+	}
 
 	return ret;
 }
@@ -81,6 +87,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
        ACPI_FREE(obj);
 }
 
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+						 u8 op_type, u32 locate,
+						 u32 port)
+{
+	union acpi_object obj_args[2], argv4;
+	union acpi_object *obj;
+
+	obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+	obj_args[0].integer.value = locate;
+	obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+	obj_args[1].integer.value = port;
+
+	argv4.type = ACPI_TYPE_PACKAGE;
+	argv4.package.count = 2;
+	argv4.package.elements = obj_args;
+
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+				&hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+	if (!obj) {
+		dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+			locate, port);
+		return;
+	}
+
+	ACPI_FREE(obj);
+}
+
 static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
 			     u16 speed, int data)
 {
@@ -160,15 +193,23 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
 static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
 			   enum hnae_led_state status)
 {
+	u32 val = 0;
+	int ret;
+
+	if (!mac_cb->cpld_ctrl)
+		return 0;
+
 	switch (status) {
 	case HNAE_LED_ACTIVE:
-		mac_cb->cpld_led_value =
-			dsaf_read_syscon(mac_cb->cpld_ctrl,
-					 mac_cb->cpld_ctrl_reg);
-		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
-			     CPLD_LED_ON_VALUE);
+		ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+				       &val);
+		if (ret)
+			return ret;
+
+		dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
 		dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
-				  mac_cb->cpld_led_value);
+				  val);
+		mac_cb->cpld_led_value = val;
 		break;
 	case HNAE_LED_INACTIVE:
 		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
@@ -184,6 +225,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
 	return 0;
 }
 
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+				enum hnae_led_state status)
+{
+	switch (status) {
+	case HNAE_LED_ACTIVE:
+		hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+						     HNS_OP_LOCATE_LED_SET_FUNC,
+						     CPLD_LED_ON_VALUE,
+						     mac_cb->mac_id);
+		break;
+	case HNAE_LED_INACTIVE:
+		hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+						     HNS_OP_LOCATE_LED_SET_FUNC,
+						     CPLD_LED_DEFAULT_VALUE,
+						     mac_cb->mac_id);
+		break;
+	default:
+		dev_err(mac_cb->dev, "invalid led state: %d!", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 #define RESET_REQ_OR_DREQ 1
 
 static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -505,12 +570,19 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
 
 int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
 {
+	u32 val = 0;
+	int ret;
+
 	if (!mac_cb->cpld_ctrl)
 		return -ENODEV;
 
-	*sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
-					+ MAC_SFP_PORT_OFFSET);
+	ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+			       mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+			       &val);
+	if (ret)
+		return ret;
 
+	*sfp_prsnt = !val;
 	return 0;
 }
 
@@ -560,7 +632,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
 #define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
 	u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
 
-	int sfp_prsnt;
+	int sfp_prsnt = 0;
 	int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
 
 	if (!mac_cb->phy_dev) {
@@ -572,7 +644,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
 	}
 
 	if (mac_cb->serdes_ctrl) {
-		u32 origin;
+		u32 origin = 0;
 
 		if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
 #define HILINK_ACCESS_SEL_CFG		0x40008
@@ -589,7 +661,10 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
 						  HILINK_ACCESS_SEL_CFG, 3);
 		}
 
-		origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+		ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+				       &origin);
+		if (ret)
+			return ret;
 
 		dsaf_set_field(origin, 1ull << 10, 10, en);
 		dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
@@ -660,7 +735,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
 	} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
 		misc_op->cpld_set_led = hns_cpld_set_led_acpi;
 		misc_op->cpld_reset_led = cpld_led_reset_acpi;
-		misc_op->cpld_set_led_id = cpld_set_led_id;
+		misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
 
 		misc_op->dsaf_reset = hns_dsaf_rst_acpi;
 		misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 46a52d9bb196..886cbbf25761 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1034,12 +1034,9 @@ static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
 	regmap_write(base, reg, value);
 }
 
-static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
 {
-	unsigned int val;
-
-	regmap_read(base, reg, &val);
-	return val;
+	return regmap_read(base, reg, val);
 }
 
 #define dsaf_read_dev(a, reg) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index a9349e1f3e51..002534f12b66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -1,7 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Makefile for the HISILICON network device drivers.
 #
 
 obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
 
 obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 000000000000..3e9203ea42a6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define HCLGE_MBX_VF_MSG_DATA_NUM	16
+
+enum HCLGE_MBX_OPCODE {
+	HCLGE_MBX_RESET = 0x01,		/* (VF -> PF) assert reset */
+	HCLGE_MBX_SET_UNICAST,		/* (VF -> PF) set UC addr */
+	HCLGE_MBX_SET_MULTICAST,	/* (VF -> PF) set MC addr */
+	HCLGE_MBX_SET_VLAN,		/* (VF -> PF) set VLAN */
+	HCLGE_MBX_MAP_RING_TO_VECTOR,	/* (VF -> PF) map ring-to-vector */
+	HCLGE_MBX_UNMAP_RING_TO_VECTOR,	/* (VF -> PF) unamp ring-to-vector */
+	HCLGE_MBX_SET_PROMISC_MODE,	/* (VF -> PF) set promiscuous mode */
+	HCLGE_MBX_SET_MACVLAN,		/* (VF -> PF) set unicast filter */
+	HCLGE_MBX_API_NEGOTIATE,	/* (VF -> PF) negotiate API version */
+	HCLGE_MBX_GET_QINFO,		/* (VF -> PF) get queue config */
+	HCLGE_MBX_GET_TCINFO,		/* (VF -> PF) get TC config */
+	HCLGE_MBX_GET_RETA,		/* (VF -> PF) get RETA */
+	HCLGE_MBX_GET_RSS_KEY,		/* (VF -> PF) get RSS key */
+	HCLGE_MBX_GET_MAC_ADDR,		/* (VF -> PF) get MAC addr */
+	HCLGE_MBX_PF_VF_RESP,		/* (PF -> VF) generate respone to VF */
+	HCLGE_MBX_GET_BDNUM,		/* (VF -> PF) get BD num */
+	HCLGE_MBX_GET_BUFSIZE,		/* (VF -> PF) get buffer size */
+	HCLGE_MBX_GET_STREAMID,		/* (VF -> PF) get stream id */
+	HCLGE_MBX_SET_AESTART,		/* (VF -> PF) start ae */
+	HCLGE_MBX_SET_TSOSTATS,		/* (VF -> PF) get tso stats */
+	HCLGE_MBX_LINK_STAT_CHANGE,	/* (PF -> VF) link status has changed */
+	HCLGE_MBX_GET_BASE_CONFIG,	/* (VF -> PF) get config */
+	HCLGE_MBX_BIND_FUNC_QUEUE,	/* (VF -> PF) bind function and queue */
+	HCLGE_MBX_GET_LINK_STATUS,	/* (VF -> PF) get link status */
+	HCLGE_MBX_QUEUE_RESET,		/* (VF -> PF) reset queue */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+	HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0,	/* modify UC mac addr */
+	HCLGE_MBX_MAC_VLAN_UC_ADD,		/* add a new UC mac addr */
+	HCLGE_MBX_MAC_VLAN_UC_REMOVE,		/* remove a new UC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_MODIFY,		/* modify MC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_ADD,		/* add new MC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_REMOVE,		/* remove MC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,	/* config func MTA enable */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+	HCLGE_MBX_VLAN_FILTER = 0,	/* set vlan filter */
+	HCLGE_MBX_VLAN_TX_OFF_CFG,	/* set tx side vlan offload */
+	HCLGE_MBX_VLAN_RX_OFF_CFG,	/* set rx side vlan offload */
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE	16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE	8
+
+struct hclgevf_mbx_resp_status {
+	struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+	u32 origin_mbx_msg;
+	bool received_resp;
+	int resp_status;
+	u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+	u8 rsv;
+	u8 mbx_src_vfid; /* Auto filled by IMP */
+	u8 rsv1[2];
+	u8 msg_len;
+	u8 rsv2[3];
+	u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+};
+
+struct hclge_mbx_pf_to_vf_cmd {
+	u8 dest_vfid;
+	u8 rsv[3];
+	u8 msg_len;
+	u8 rsv1[3];
+	u16 msg[8];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+	(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 5bcb2238acb2..02145f2de820 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
 	const struct pci_device_id *id;
 	struct hnae3_ae_algo *ae_algo;
 	struct hnae3_client *client;
-	int ret = 0;
+	int ret = 0, lock_acquired;
+
+	/* we can get deadlocked if SRIOV is being enabled in context to probe
+	 * and probe gets called again in same context. This can happen when
+	 * pci_enable_sriov() is called to create VFs from PF probes context.
+	 * Therefore, for simplicity uniformly defering further probing in all
+	 * cases where we detect contention.
+	 */
+	lock_acquired = mutex_trylock(&hnae3_common_lock);
+	if (!lock_acquired)
+		return -EPROBE_DEFER;
 
-	mutex_lock(&hnae3_common_lock);
 	list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
 
 	/* Check if there are matched ae_algo */
@@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
 
 		if (!ae_dev->ops) {
 			dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+			ret = -EOPNOTSUPP;
 			goto out_err;
 		}
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67c59e1039f2..fd06bc78c58e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -133,11 +133,16 @@ struct hnae3_vector_info {
 #define HNAE3_RING_TYPE_B 0
 #define HNAE3_RING_TYPE_TX 0
 #define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
 
 struct hnae3_ring_chain_node {
 	struct hnae3_ring_chain_node *next;
 	u32 tqp_index;
 	u32 flag;
+	u32 int_gl_idx;
 };
 
 #define HNAE3_IS_TX_RING(node) \
@@ -274,10 +279,14 @@ struct hnae3_ae_dev {
  *   Get firmware version
  * get_mdix_mode()
  *   Get media typr of phy
+ * enable_vlan_filter()
+ *   Enable vlan filter
  * set_vlan_filter()
  *   Set vlan filter config of Ports
  * set_vf_vlan_filter()
  *   Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ *   Enable/disable hardware strip vlan tag of packets received
  */
 struct hnae3_ae_ops {
 	int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -347,7 +356,8 @@ struct hnae3_ae_ops {
 			    u32 stringset, u8 *data);
 	int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
 
-	void (*get_regs)(struct hnae3_handle *handle, void *data);
+	void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+			 void *data);
 	int (*get_regs_len)(struct hnae3_handle *handle);
 
 	u32 (*get_rss_key_size)(struct hnae3_handle *handle);
@@ -380,12 +390,23 @@ struct hnae3_ae_ops {
 	void (*get_mdix_mode)(struct hnae3_handle *handle,
 			      u8 *tp_mdix_ctrl, u8 *tp_mdix);
 
+	void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
 	int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
 			       u16 vlan_id, bool is_kill);
 	int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
 				  u16 vlan, u8 qos, __be16 proto);
+	int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
 	void (*reset_event)(struct hnae3_handle *handle,
 			    enum hnae3_reset_type reset);
+	void (*get_channels)(struct hnae3_handle *handle,
+			     struct ethtool_channels *ch);
+	void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+				      u16 *free_tqps, u16 *max_rss_size);
+	int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+	void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+				 u32 *flowctrl_adv);
+	int (*set_led_id)(struct hnae3_handle *handle,
+			  enum ethtool_phys_id_state status);
 };
 
 struct hnae3_dcb_ops {
@@ -435,6 +456,8 @@ struct hnae3_knic_private_info {
 	u16 num_tqps;		  /* total number of TQPs in this handle */
 	struct hnae3_queue **tqp;  /* array base of all TQPs in this instance */
 	const struct hnae3_dcb_ops *dcb_ops;
+
+	u16 int_rl_setting;
 };
 
 struct hnae3_roce_private_info {
@@ -452,9 +475,10 @@ struct hnae3_unic_private_info {
 	struct hnae3_queue **tqp;  /* array base of all TQPs of this instance */
 };
 
-#define HNAE3_SUPPORT_MAC_LOOPBACK    1
-#define HNAE3_SUPPORT_PHY_LOOPBACK    2
-#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
+#define HNAE3_SUPPORT_MAC_LOOPBACK    BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK    BIT(1)
+#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF	      BIT(3)
 
 struct hnae3_handle {
 	struct hnae3_client *client;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 925619a7c50a..eb82700da7d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle)
 {
 	struct net_device *dev = handle->kinfo.netdev;
 
-	if (!handle->kinfo.dcb_ops)
+	if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
 		return;
 
 	dev->dcbnl_ops = &hns3_dcbnl_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 59415090ff0f..601b6295d3f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
 	/* required last entry */
 	{0, }
 };
@@ -156,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
 	napi_disable(&tqp_vector->napi);
 }
 
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
-				       u32 gl_value)
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+				 u32 rl_value)
 {
-	/* this defines the configuration for GL (Interrupt Gap Limiter)
-	 * GL defines inter interrupt gap.
-	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
-	 */
-	writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
-	writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
-	writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
-}
+	u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
 
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
-				       u32 rl_value)
-{
 	/* this defines the configuration for RL (Interrupt Rate Limiter).
 	 * Rl defines rate of interrupts i.e. number of interrupts-per-second
 	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
 	 */
-	writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+
+	if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
+	    !tqp_vector->rx_group.gl_adapt_enable)
+		/* According to the hardware, the range of rl_reg is
+		 * 0-59 and the unit is 4.
+		 */
+		rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
+
+	writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+				    u32 gl_value)
+{
+	u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+	writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+				    u32 gl_value)
+{
+	u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+	writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
 }
 
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+				   struct hns3_nic_priv *priv)
 {
+	struct hnae3_handle *h = priv->ae_handle;
+
 	/* initialize the configuration for interrupt coalescing.
 	 * 1. GL (Interrupt Gap Limiter)
 	 * 2. RL (Interrupt Rate Limiter)
 	 */
 
-	/* Default :enable interrupt coalesce */
-	tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+	/* Default: enable interrupt coalescing self-adaptive and GL */
+	tqp_vector->tx_group.gl_adapt_enable = 1;
+	tqp_vector->rx_group.gl_adapt_enable = 1;
+
 	tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
-	hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
-	/* for now we are disabling Interrupt RL - we
-	 * will re-enable later
-	 */
-	hns3_set_vector_coalesc_rl(tqp_vector, 0);
+	tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+
+	hns3_set_vector_coalesce_tx_gl(tqp_vector,
+				       tqp_vector->tx_group.int_gl);
+	hns3_set_vector_coalesce_rx_gl(tqp_vector,
+				       tqp_vector->rx_group.int_gl);
+
+	/* Default: disable RL */
+	h->kinfo.int_rl_setting = 0;
+	hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
 	tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
 	tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
 }
@@ -245,6 +272,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
 	if (ret)
 		goto out_start_err;
 
+	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
 	return 0;
 
 out_start_err:
@@ -284,6 +313,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
 	const struct hnae3_ae_ops *ops;
 	int i;
 
+	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+		return;
+
 	/* stop ae_dev */
 	ops = priv->ae_handle->ae_algo->ops;
 	if (ops->stop)
@@ -721,6 +753,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 }
 
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+				struct hns3_enet_ring *tx_ring,
+				u32 *inner_vlan_flag,
+				u32 *out_vlan_flag,
+				u16 *inner_vtag,
+				u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+	if (skb->protocol == htons(ETH_P_8021Q) &&
+	    !(tx_ring->tqp->handle->kinfo.netdev->features &
+	    NETIF_F_HW_VLAN_CTAG_TX)) {
+		/* When HW VLAN acceleration is turned off, and the stack
+		 * sets the protocol to 802.1q, the driver just need to
+		 * set the protocol to the encapsulated ethertype.
+		 */
+		skb->protocol = vlan_get_protocol(skb);
+		return 0;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		u16 vlan_tag;
+
+		vlan_tag = skb_vlan_tag_get(skb);
+		vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+		/* Based on hw strategy, use out_vtag in two layer tag case,
+		 * and use inner_vtag in one tag case.
+		 */
+		if (skb->protocol == htons(ETH_P_8021Q)) {
+			hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+			*out_vtag = vlan_tag;
+		} else {
+			hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+			*inner_vtag = vlan_tag;
+		}
+	} else if (skb->protocol == htons(ETH_P_8021Q)) {
+		struct vlan_ethhdr *vhdr;
+		int rc;
+
+		rc = skb_cow_head(skb, 0);
+		if (rc < 0)
+			return rc;
+		vhdr = (struct vlan_ethhdr *)skb->data;
+		vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+					<< HNS3_TX_VLAN_PRIO_SHIFT);
+	}
+
+	skb->protocol = vlan_get_protocol(skb);
+	return 0;
+}
+
 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 			  int size, dma_addr_t dma, int frag_end,
 			  enum hns_desc_type type)
@@ -731,6 +815,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 	u16 bdtp_fe_sc_vld_ra_ri = 0;
 	u32 type_cs_vlan_tso = 0;
 	struct sk_buff *skb;
+	u16 inner_vtag = 0;
+	u16 out_vtag = 0;
 	u32 paylen = 0;
 	u16 mss = 0;
 	__be16 protocol;
@@ -754,15 +840,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 		skb = (struct sk_buff *)priv;
 		paylen = skb->len;
 
+		ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+					   &ol_type_vlan_len_msec,
+					   &inner_vtag, &out_vtag);
+		if (unlikely(ret))
+			return ret;
+
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			skb_reset_mac_len(skb);
 			protocol = skb->protocol;
 
-			/* vlan packet*/
-			if (protocol == htons(ETH_P_8021Q)) {
-				protocol = vlan_get_protocol(skb);
-				skb->protocol = protocol;
-			}
 			ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
 			if (ret)
 				return ret;
@@ -788,6 +875,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 			cpu_to_le32(type_cs_vlan_tso);
 		desc->tx.paylen = cpu_to_le32(paylen);
 		desc->tx.mss = cpu_to_le16(mss);
+		desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+		desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
 	}
 
 	/* move ring pointer to next.*/
@@ -1029,25 +1118,50 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
 static int hns3_nic_set_features(struct net_device *netdev,
 				 netdev_features_t features)
 {
+	netdev_features_t changed = netdev->features ^ features;
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = priv->ae_handle;
+	int ret;
 
-	if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
-		priv->ops.fill_desc = hns3_fill_desc_tso;
-		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
-	} else {
-		priv->ops.fill_desc = hns3_fill_desc;
-		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+	if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+		if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+			priv->ops.fill_desc = hns3_fill_desc_tso;
+			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+		} else {
+			priv->ops.fill_desc = hns3_fill_desc;
+			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+		}
+	}
+
+	if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+	    h->ae_algo->ops->enable_vlan_filter) {
+		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+			h->ae_algo->ops->enable_vlan_filter(h, true);
+		else
+			h->ae_algo->ops->enable_vlan_filter(h, false);
+	}
+
+	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+	    h->ae_algo->ops->enable_hw_strip_rxvtag) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+		else
+			ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+		if (ret)
+			return ret;
 	}
 
 	netdev->features = features;
 	return 0;
 }
 
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+				 struct rtnl_link_stats64 *stats)
 {
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
 	int queue_num = priv->ae_handle->kinfo.num_tqps;
+	struct hnae3_handle *handle = priv->ae_handle;
 	struct hns3_enet_ring *ring;
 	unsigned int start;
 	unsigned int idx;
@@ -1055,6 +1169,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 	u64 rx_bytes = 0;
 	u64 tx_pkts = 0;
 	u64 rx_pkts = 0;
+	u64 tx_drop = 0;
+	u64 rx_drop = 0;
+
+	if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+		return;
+
+	handle->ae_algo->ops->update_stats(handle, &netdev->stats);
 
 	for (idx = 0; idx < queue_num; idx++) {
 		/* fetch the tx stats */
@@ -1063,6 +1184,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 			start = u64_stats_fetch_begin_irq(&ring->syncp);
 			tx_bytes += ring->stats.tx_bytes;
 			tx_pkts += ring->stats.tx_pkts;
+			tx_drop += ring->stats.tx_busy;
+			tx_drop += ring->stats.sw_err_cnt;
 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 
 		/* fetch the rx stats */
@@ -1071,6 +1194,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 			start = u64_stats_fetch_begin_irq(&ring->syncp);
 			rx_bytes += ring->stats.rx_bytes;
 			rx_pkts += ring->stats.rx_pkts;
+			rx_drop += ring->stats.non_vld_descs;
+			rx_drop += ring->stats.err_pkt_len;
+			rx_drop += ring->stats.l2_err;
 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 	}
 
@@ -1086,8 +1212,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
 
 	stats->tx_errors = netdev->stats.tx_errors;
-	stats->rx_dropped = netdev->stats.rx_dropped;
-	stats->tx_dropped = netdev->stats.tx_dropped;
+	stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+	stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
 	stats->collisions = netdev->stats.collisions;
 	stats->rx_over_errors = netdev->stats.rx_over_errors;
 	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1317,6 +1443,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
 		return ret;
 	}
 
+	netdev->mtu = new_mtu;
+
 	/* if the netdev was running earlier, bring it up again */
 	if (if_running && hns3_nic_net_open(netdev))
 		ret = -EINVAL;
@@ -1476,6 +1604,8 @@ static struct pci_driver hns3_driver = {
 /* set default feature to hns3 */
 static void hns3_set_default_feature(struct net_device *netdev)
 {
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 
 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1490,6 +1620,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 
 	netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_HW_VLAN_CTAG_FILTER |
+		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1503,11 +1634,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
 	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-		NETIF_F_HW_VLAN_CTAG_FILTER |
+		NETIF_F_HW_VLAN_CTAG_TX |
 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+	if (!(h->flags & HNAE3_SUPPORT_VF))
+		netdev->hw_features |=
+			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
 }
 
 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2083,6 +2218,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 
 	prefetchw(skb->data);
 
+	/* Based on hw strategy, the tag offloaded will be stored at
+	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+	 * in one layer tag case.
+	 */
+	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+		u16 vlan_tag;
+
+		vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+		if (!(vlan_tag & VLAN_VID_MASK))
+			vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+		if (vlan_tag & VLAN_VID_MASK)
+			__vlan_hwaccel_put_tag(skb,
+					       htons(ETH_P_8021Q),
+					       vlan_tag);
+	}
+
 	bnum = 1;
 	if (length <= HNS3_RX_HEAD_SIZE) {
 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2301,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
 
 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
 {
-	u16 rx_int_gl, tx_int_gl;
-	bool rx, tx;
-
-	rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
-	tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
-	rx_int_gl = tqp_vector->rx_group.int_gl;
-	tx_int_gl = tqp_vector->tx_group.int_gl;
-	if (rx && tx) {
-		if (rx_int_gl > tx_int_gl) {
-			tqp_vector->tx_group.int_gl = rx_int_gl;
-			tqp_vector->tx_group.flow_level =
-				tqp_vector->rx_group.flow_level;
-			hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
-		} else {
-			tqp_vector->rx_group.int_gl = tx_int_gl;
-			tqp_vector->rx_group.flow_level =
-				tqp_vector->tx_group.flow_level;
-			hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
-		}
+	struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+	struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+	bool rx_update, tx_update;
+
+	if (rx_group->gl_adapt_enable) {
+		rx_update = hns3_get_new_int_gl(rx_group);
+		if (rx_update)
+			hns3_set_vector_coalesce_rx_gl(tqp_vector,
+						       rx_group->int_gl);
+	}
+
+	if (tx_group->gl_adapt_enable) {
+		tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+		if (tx_update)
+			hns3_set_vector_coalesce_tx_gl(tqp_vector,
+						       tx_group->int_gl);
 	}
 }
 
@@ -2380,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
 		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
 		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
 			     HNAE3_RING_TYPE_TX);
+		hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+			       HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
 
 		cur_chain->next = NULL;
 
@@ -2395,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
 			chain->tqp_index = tx_ring->tqp->tqp_index;
 			hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
 				     HNAE3_RING_TYPE_TX);
+			hnae_set_field(chain->int_gl_idx,
+				       HNAE3_RING_GL_IDX_M,
+				       HNAE3_RING_GL_IDX_S,
+				       HNAE3_RING_GL_TX);
 
 			cur_chain = chain;
 		}
@@ -2406,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
 		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
 		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
 			     HNAE3_RING_TYPE_RX);
+		hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+			       HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
 
 		rx_ring = rx_ring->next;
 	}
@@ -2419,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
 		chain->tqp_index = rx_ring->tqp->tqp_index;
 		hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
 			     HNAE3_RING_TYPE_RX);
+		hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+			       HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
 		cur_chain = chain;
 
 		rx_ring = rx_ring->next;
@@ -2507,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
 		tqp_vector->rx_group.total_packets = 0;
 		tqp_vector->tx_group.total_bytes = 0;
 		tqp_vector->tx_group.total_packets = 0;
-		hns3_vector_gl_rl_init(tqp_vector);
+		hns3_vector_gl_rl_init(tqp_vector, priv);
 		tqp_vector->handle = h;
 
 		ret = hns3_get_vector_ring_chain(tqp_vector,
@@ -2649,6 +2808,19 @@ err:
 	return ret;
 }
 
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+	struct hnae3_handle *h = priv->ae_handle;
+	int i;
+
+	for (i = 0; i < h->kinfo.num_tqps; i++) {
+		devm_kfree(priv->dev, priv->ring_data[i].ring);
+		devm_kfree(priv->dev,
+			   priv->ring_data[i + h->kinfo.num_tqps].ring);
+	}
+	devm_kfree(priv->dev, priv->ring_data);
+}
+
 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
 {
 	int ret;
@@ -2785,8 +2957,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
 			h->ae_algo->ops->reset_queue(h, i);
 
 		hns3_fini_ring(priv->ring_data[i].ring);
+		devm_kfree(priv->dev, priv->ring_data[i].ring);
 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+		devm_kfree(priv->dev,
+			   priv->ring_data[i + h->kinfo.num_tqps].ring);
 	}
+	devm_kfree(priv->dev, priv->ring_data);
 
 	return 0;
 }
@@ -3142,8 +3318,8 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
 
 	switch (type) {
 	case HNAE3_UP_CLIENT:
-                ret = hns3_reset_notify_up_enet(handle);
-                break;
+		ret = hns3_reset_notify_up_enet(handle);
+		break;
 	case HNAE3_DOWN_CLIENT:
 		ret = hns3_reset_notify_down_enet(handle);
 		break;
@@ -3160,6 +3336,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
 	return ret;
 }
 
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	u16 free_tqps, max_rss_size, max_tqps;
+
+	h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+	max_tqps = h->kinfo.num_tc * max_rss_size;
+
+	return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	int ret;
+
+	ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+	if (ret)
+		return ret;
+
+	ret = hns3_get_ring_config(priv);
+	if (ret)
+		return ret;
+
+	ret = hns3_nic_init_vector_data(priv);
+	if (ret)
+		goto err_uninit_vector;
+
+	ret = hns3_init_all_ring(priv);
+	if (ret)
+		goto err_put_ring;
+
+	return 0;
+
+err_put_ring:
+	hns3_put_ring_config(priv);
+err_uninit_vector:
+	hns3_nic_uninit_vector_data(priv);
+	return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+	return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+		      struct ethtool_channels *ch)
+{
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	struct hnae3_knic_private_info *kinfo = &h->kinfo;
+	bool if_running = netif_running(netdev);
+	u32 new_tqp_num = ch->combined_count;
+	u16 org_tqp_num;
+	int ret;
+
+	if (ch->rx_count || ch->tx_count)
+		return -EINVAL;
+
+	if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+	    new_tqp_num < kinfo->num_tc) {
+		dev_err(&netdev->dev,
+			"Change tqps fail, the tqp range is from %d to %d",
+			kinfo->num_tc,
+			hns3_get_max_available_channels(netdev));
+		return -EINVAL;
+	}
+
+	new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+	if (kinfo->num_tqps == new_tqp_num)
+		return 0;
+
+	if (if_running)
+		dev_close(netdev);
+
+	hns3_clear_all_ring(h);
+
+	ret = hns3_nic_uninit_vector_data(priv);
+	if (ret) {
+		dev_err(&netdev->dev,
+			"Unbind vector with tqp fail, nothing is changed");
+		goto open_netdev;
+	}
+
+	hns3_uninit_all_ring(priv);
+
+	org_tqp_num = h->kinfo.num_tqps;
+	ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+	if (ret) {
+		ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+		if (ret) {
+			/* If revert to old tqp failed, fatal error occurred */
+			dev_err(&netdev->dev,
+				"Revert to old tqp num fail, ret=%d", ret);
+			return ret;
+		}
+		dev_info(&netdev->dev,
+			 "Change tqp num fail, Revert to old tqp num");
+	}
+
+open_netdev:
+	if (if_running)
+		dev_open(netdev);
+
+	return ret;
+}
+
 static const struct hnae3_client_ops client_ops = {
 	.init_instance = hns3_client_init,
 	.uninit_instance = hns3_client_uninit,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 8a9de759957b..213f501b30bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
 	HNS3_LM_COUNT = 15
 };
 
-#define HNS3_INT_GL_50K		0x000A
-#define HNS3_INT_GL_20K		0x0019
-#define HNS3_INT_GL_18K		0x001B
-#define HNS3_INT_GL_8K		0x003E
+#define HNS3_INT_GL_MAX			0x1FE0
+#define HNS3_INT_GL_50K			0x0014
+#define HNS3_INT_GL_20K			0x0032
+#define HNS3_INT_GL_18K			0x0036
+#define HNS3_INT_GL_8K			0x007C
+
+#define HNS3_INT_RL_MAX			0x00EC
+#define HNS3_INT_RL_ENABLE_MASK		0x40
 
 struct hns3_enet_ring_group {
 	/* array of pointers to rings */
@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
 	u16 count;
 	enum hns3_flow_level_range flow_level;
 	u16 int_gl;
+	u8 gl_adapt_enable;
 };
 
 struct hns3_enet_tqp_vector {
@@ -594,7 +599,15 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
 #define hns3_get_handle(ndev) \
 	(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
 
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
 void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+		      struct ethtool_channels *ch);
 
 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
 int hns3_init_all_ring(struct hns3_nic_priv *priv);
@@ -604,6 +617,13 @@ int hns3_clean_rx_ring(
 		struct hns3_enet_ring *ring, int budget,
 		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
 
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+				    u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+				    u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+				 u32 rl_value);
+
 #ifdef CONFIG_HNS3_DCB
 void hns3_dcbnl_setup(struct hnae3_handle *handle);
 #else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a21470c72da3..b034c7f24eda 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
 
 struct hns3_stats {
 	char stats_string[ETH_GSTRING_LEN];
-	int stats_size;
 	int stats_offset;
 };
 
 /* tqp related stats */
 #define HNS3_TQP_STAT(_string, _member)	{			\
 	.stats_string = _string,				\
-	.stats_size = FIELD_SIZEOF(struct ring_stats, _member),	\
-	.stats_offset = offsetof(struct hns3_enet_ring, stats),	\
-}								\
+	.stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+			offsetof(struct ring_stats, _member),   \
+}
 
 static const struct hns3_stats hns3_txq_stats[] = {
 	/* Tx per-queue statistics */
-	HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
-	HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
-	HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
-	HNS3_TQP_STAT("tx_pkts", tx_pkts),
-	HNS3_TQP_STAT("tx_bytes", tx_bytes),
-	HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
-	HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+	HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+	HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+	HNS3_TQP_STAT("packets", tx_pkts),
+	HNS3_TQP_STAT("bytes", tx_bytes),
+	HNS3_TQP_STAT("errors", tx_err_cnt),
+	HNS3_TQP_STAT("tx_wake", restart_queue),
 	HNS3_TQP_STAT("tx_busy", tx_busy),
 };
 
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
 
 static const struct hns3_stats hns3_rxq_stats[] = {
 	/* Rx per-queue statistics */
-	HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
-	HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
-	HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
-	HNS3_TQP_STAT("rx_pkts", rx_pkts),
-	HNS3_TQP_STAT("rx_bytes", rx_bytes),
-	HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
-	HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
-	HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
-	HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
-	HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
-	HNS3_TQP_STAT("rx_l2_err", l2_err),
-	HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+	HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+	HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+	HNS3_TQP_STAT("packets", rx_pkts),
+	HNS3_TQP_STAT("bytes", rx_bytes),
+	HNS3_TQP_STAT("errors", rx_err_cnt),
+	HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+	HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+	HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+	HNS3_TQP_STAT("err_bd_num", err_bd_num),
+	HNS3_TQP_STAT("l2_err", l2_err),
+	HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
 };
 
 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
 }
 
 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
-				 u32 stat_count, u32 num_tqps)
+		u32 stat_count, u32 num_tqps, const char *prefix)
 {
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
 	u32 size_left;
 	u32 i, j;
 	u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
 			data[ETH_GSTRING_LEN - 1] = '\0';
 
 			/* first, prepend the prefix string */
-			n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+			n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+				      prefix, i);
 			n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
 			size_left = (ETH_GSTRING_LEN - 1) - n1;
 
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
 static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
 {
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	const char tx_prefix[] = "txq";
+	const char rx_prefix[] = "rxq";
 
 	/* get strings for Tx */
 	data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
-				   kinfo->num_tqps);
+				   kinfo->num_tqps, tx_prefix);
 
 	/* get strings for Rx */
 	data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
-				   kinfo->num_tqps);
+				   kinfo->num_tqps, rx_prefix);
 
 	return data;
 }
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 	struct hns3_enet_ring *ring;
 	u8 *stat;
-	u32 i;
+	int i, j;
 
 	/* get stats for Tx */
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		ring = nic_priv->ring_data[i].ring;
-		for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
-			stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+		for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+			stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
 			*data++ = *(u64 *)stat;
 		}
 	}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
 	/* get stats for Rx */
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
-		for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
-			stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+		for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+			stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
 			*data++ = *(u64 *)stat;
 		}
 	}
@@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
 			&param->rx_pause, &param->tx_pause);
 }
 
+static int hns3_set_pauseparam(struct net_device *netdev,
+			       struct ethtool_pauseparam *param)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (h->ae_algo->ops->set_pauseparam)
+		return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+						       param->rx_pause,
+						       param->tx_pause);
+	return -EOPNOTSUPP;
+}
+
 static int hns3_get_link_ksettings(struct net_device *netdev,
 				   struct ethtool_link_ksettings *cmd)
 {
 	struct hnae3_handle *h = hns3_get_handle(netdev);
+	u32 flowctrl_adv = 0;
 	u32 supported_caps;
 	u32 advertised_caps;
 	u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
 		if (!cmd->base.autoneg)
 			advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
 
+		advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
 		/* now, map driver link modes to ethtool link modes */
 		hns3_driv_to_eth_caps(supported_caps, cmd, false);
 		hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
 	/* 4.mdio_support */
 	cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
 
+	/* 5.get flow control setttings */
+	if (h->ae_algo->ops->get_flowctrl_adv)
+		h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+	if (flowctrl_adv & ADVERTISED_Pause)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     Pause);
+
+	if (flowctrl_adv & ADVERTISED_Asym_Pause)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     Asym_Pause);
+
 	return 0;
 }
 
@@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
 
 	switch (cmd->cmd) {
 	case ETHTOOL_GRXRINGS:
-		cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+		cmd->data = h->kinfo.rss_size;
 		break;
 	case ETHTOOL_GRXFH:
 		return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +878,241 @@ static int hns3_nway_reset(struct net_device *netdev)
 	return genphy_restart_aneg(phy);
 }
 
+static void hns3_get_channels(struct net_device *netdev,
+			      struct ethtool_channels *ch)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (h->ae_algo->ops->get_channels)
+		h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+				       struct ethtool_coalesce *cmd)
+{
+	struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = priv->ae_handle;
+	u16 queue_num = h->kinfo.num_tqps;
+
+	if (queue >= queue_num) {
+		netdev_err(netdev,
+			   "Invalid queue value %d! Queue max id=%d\n",
+			   queue, queue_num - 1);
+		return -EINVAL;
+	}
+
+	tx_vector = priv->ring_data[queue].ring->tqp_vector;
+	rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+	cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
+	cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
+
+	cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
+	cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
+
+	cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+	cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+	return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *cmd)
+{
+	return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+				       struct ethtool_coalesce *cmd)
+{
+	u32 rx_gl, tx_gl;
+
+	if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+		netdev_err(netdev,
+			   "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+			   HNS3_INT_GL_MAX);
+		return -EINVAL;
+	}
+
+	if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+		netdev_err(netdev,
+			   "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+			   HNS3_INT_GL_MAX);
+		return -EINVAL;
+	}
+
+	rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+	if (rx_gl != cmd->rx_coalesce_usecs) {
+		netdev_info(netdev,
+			    "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+			    cmd->rx_coalesce_usecs, rx_gl);
+	}
+
+	tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+	if (tx_gl != cmd->tx_coalesce_usecs) {
+		netdev_info(netdev,
+			    "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+			    cmd->tx_coalesce_usecs, tx_gl);
+	}
+
+	return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+				       struct ethtool_coalesce *cmd)
+{
+	u32 rl;
+
+	if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+		netdev_err(netdev,
+			   "tx_usecs_high must be same as rx_usecs_high.\n");
+		return -EINVAL;
+	}
+
+	if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+		netdev_err(netdev,
+			   "Invalid usecs_high value, usecs_high range is 0-%d\n",
+			   HNS3_INT_RL_MAX);
+		return -EINVAL;
+	}
+
+	rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+	if (rl != cmd->rx_coalesce_usecs_high) {
+		netdev_info(netdev,
+			    "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+			    cmd->rx_coalesce_usecs_high, rl);
+	}
+
+	return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+				    struct ethtool_coalesce *cmd)
+{
+	int ret;
+
+	ret = hns3_check_gl_coalesce_para(netdev, cmd);
+	if (ret) {
+		netdev_err(netdev,
+			   "Check gl coalesce param fail. ret = %d\n", ret);
+		return ret;
+	}
+
+	ret = hns3_check_rl_coalesce_para(netdev, cmd);
+	if (ret) {
+		netdev_err(netdev,
+			   "Check rl coalesce param fail. ret = %d\n", ret);
+		return ret;
+	}
+
+	if (cmd->use_adaptive_tx_coalesce == 1 ||
+	    cmd->use_adaptive_rx_coalesce == 1) {
+		netdev_info(netdev,
+			    "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+			    cmd->use_adaptive_tx_coalesce,
+			    cmd->use_adaptive_rx_coalesce);
+	}
+
+	return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+					struct ethtool_coalesce *cmd,
+					u32 queue)
+{
+	struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = priv->ae_handle;
+	int queue_num = h->kinfo.num_tqps;
+
+	tx_vector = priv->ring_data[queue].ring->tqp_vector;
+	rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+	tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
+	rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+	tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
+	rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
+
+	hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
+	hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
+
+	hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+	hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *cmd)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	u16 queue_num = h->kinfo.num_tqps;
+	int ret;
+	int i;
+
+	ret = hns3_check_coalesce_para(netdev, cmd);
+	if (ret)
+		return ret;
+
+	h->kinfo.int_rl_setting =
+		hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+	for (i = 0; i < queue_num; i++)
+		hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+	return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (!h->ae_algo->ops->get_regs_len)
+		return -EOPNOTSUPP;
+
+	return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+			  struct ethtool_regs *cmd, void *data)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (!h->ae_algo->ops->get_regs)
+		return;
+
+	h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+			    enum ethtool_phys_id_state state)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+		return -EOPNOTSUPP;
+
+	return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+	.get_drvinfo = hns3_get_drvinfo,
+	.get_ringparam = hns3_get_ringparam,
+	.set_ringparam = hns3_set_ringparam,
+	.get_strings = hns3_get_strings,
+	.get_ethtool_stats = hns3_get_stats,
+	.get_sset_count = hns3_get_sset_count,
+	.get_rxnfc = hns3_get_rxnfc,
+	.get_rxfh_key_size = hns3_get_rss_key_size,
+	.get_rxfh_indir_size = hns3_get_rss_indir_size,
+	.get_rxfh = hns3_get_rss,
+	.set_rxfh = hns3_set_rss,
+	.get_link_ksettings = hns3_get_link_ksettings,
+	.get_channels = hns3_get_channels,
+	.get_coalesce = hns3_get_coalesce,
+	.set_coalesce = hns3_set_coalesce,
+};
+
 static const struct ethtool_ops hns3_ethtool_ops = {
 	.self_test = hns3_self_test,
 	.get_drvinfo = hns3_get_drvinfo,
@@ -856,6 +1120,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
 	.get_ringparam = hns3_get_ringparam,
 	.set_ringparam = hns3_set_ringparam,
 	.get_pauseparam = hns3_get_pauseparam,
+	.set_pauseparam = hns3_set_pauseparam,
 	.get_strings = hns3_get_strings,
 	.get_ethtool_stats = hns3_get_stats,
 	.get_sset_count = hns3_get_sset_count,
@@ -868,9 +1133,21 @@ static const struct ethtool_ops hns3_ethtool_ops = {
 	.get_link_ksettings = hns3_get_link_ksettings,
 	.set_link_ksettings = hns3_set_link_ksettings,
 	.nway_reset = hns3_nway_reset,
+	.get_channels = hns3_get_channels,
+	.set_channels = hns3_set_channels,
+	.get_coalesce = hns3_get_coalesce,
+	.set_coalesce = hns3_set_coalesce,
+	.get_regs_len = hns3_get_regs_len,
+	.get_regs = hns3_get_regs,
+	.set_phys_id = hns3_set_phys_id,
 };
 
 void hns3_ethtool_set_ops(struct net_device *netdev)
 {
-	netdev->ethtool_ops = &hns3_ethtool_ops;
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (h->flags & HNAE3_SUPPORT_VF)
+		netdev->ethtool_ops = &hns3vf_ethtool_ops;
+	else
+		netdev->ethtool_ops = &hns3_ethtool_ops;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index d2b20d01a58c..cb8ddd043476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Makefile for the HISILICON network device drivers.
 #
@@ -5,11 +6,6 @@
 ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
 
 hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
-
-obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
-
-hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ce5ed8845042..3fd10a6bec53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -102,6 +102,10 @@ enum hclge_opcode_type {
 	HCLGE_OPC_STATS_64_BIT		= 0x0030,
 	HCLGE_OPC_STATS_32_BIT		= 0x0031,
 	HCLGE_OPC_STATS_MAC		= 0x0032,
+
+	HCLGE_OPC_QUERY_REG_NUM		= 0x0040,
+	HCLGE_OPC_QUERY_32_BIT_REG	= 0x0041,
+	HCLGE_OPC_QUERY_64_BIT_REG	= 0x0042,
 	/* Device management command */
 
 	/* MAC commond */
@@ -111,6 +115,7 @@ enum hclge_opcode_type {
 	HCLGE_OPC_QUERY_LINK_STATUS	= 0x0307,
 	HCLGE_OPC_CONFIG_MAX_FRM_SIZE	= 0x0308,
 	HCLGE_OPC_CONFIG_SPEED_DUP	= 0x0309,
+	HCLGE_OPC_STATS_MAC_TRAFFIC	= 0x0314,
 	/* MACSEC command */
 
 	/* PFC/Pause CMD*/
@@ -180,6 +185,10 @@ enum hclge_opcode_type {
 	/* Promisuous mode command */
 	HCLGE_OPC_CFG_PROMISC_MODE	= 0x0E01,
 
+	/* Vlan offload command */
+	HCLGE_OPC_VLAN_PORT_TX_CFG	= 0x0F01,
+	HCLGE_OPC_VLAN_PORT_RX_CFG	= 0x0F02,
+
 	/* Interrupts cmd */
 	HCLGE_OPC_ADD_RING_TO_VECTOR	= 0x1503,
 	HCLGE_OPC_DEL_RING_TO_VECTOR	= 0x1504,
@@ -191,6 +200,7 @@ enum hclge_opcode_type {
 	HCLGE_OPC_MAC_VLAN_INSERT	    = 0x1003,
 	HCLGE_OPC_MAC_ETHTYPE_ADD	    = 0x1010,
 	HCLGE_OPC_MAC_ETHTYPE_REMOVE	= 0x1011,
+	HCLGE_OPC_MAC_VLAN_MASK_SET	= 0x1012,
 
 	/* Multicast linear table cmd */
 	HCLGE_OPC_MTA_MAC_MODE_CFG	    = 0x1020,
@@ -218,6 +228,9 @@ enum hclge_opcode_type {
 
 	/* Mailbox cmd */
 	HCLGEVF_OPC_MBX_PF_TO_VF	= 0x2000,
+
+	/* Led command */
+	HCLGE_OPC_LED_STATUS_CFG	= 0xB000,
 };
 
 #define HCLGE_TQP_REG_OFFSET		0x80000
@@ -399,6 +412,8 @@ struct hclge_pf_res_cmd {
 #define HCLGE_CFG_MAC_ADDR_H_M	GENMASK(15, 0)
 #define HCLGE_CFG_DEFAULT_SPEED_S	16
 #define HCLGE_CFG_DEFAULT_SPEED_M	GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S	24
+#define HCLGE_CFG_RSS_SIZE_M	GENMASK(31, 24)
 
 struct hclge_cfg_param_cmd {
 	__le32 offset;
@@ -549,8 +564,6 @@ struct hclge_config_auto_neg_cmd {
 	u8      rsv[20];
 };
 
-#define HCLGE_MAC_MIN_MTU		64
-#define HCLGE_MAC_MAX_MTU		9728
 #define HCLGE_MAC_UPLINK_PORT		0x100
 
 struct hclge_config_max_frm_size_cmd {
@@ -587,6 +600,37 @@ struct hclge_mac_vlan_tbl_entry_cmd {
 	u8      rsv2[6];
 };
 
+#define HCLGE_VLAN_MASK_EN_B		0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+	u8 rsv0[2];
+	u8 vlan_mask;
+	u8 rsv1;
+	u8 mac_mask[6];
+	u8 rsv2[14];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B		BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B		BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B		BIT(2)
+#define HCLGE_MAC_ETHERTYPE_LLDP		0x88cc
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+	u8      flags;
+	u8      resp_code;
+	__le16  vlan_tag;
+	__le32  mac_addr_hi32;
+	__le16  mac_addr_lo16;
+	__le16  rsv1;
+	__le16  ethter_type;
+	__le16  egress_port;
+	__le16  egress_queue;
+	u8      sw_port_id_aware;
+	u8      rsv2;
+	u8      i_port_bitmap;
+	u8      i_port_direction;
+	u8      rsv3[2];
+};
+
 #define HCLGE_CFG_MTA_MAC_SEL_S		0x0
 #define HCLGE_CFG_MTA_MAC_SEL_M		GENMASK(1, 0)
 #define HCLGE_CFG_MTA_MAC_EN_B		0x7
@@ -658,6 +702,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
 	u8  vf_bitmap[16];
 };
 
+#define HCLGE_ACCEPT_TAG_B		0
+#define HCLGE_ACCEPT_UNTAG_B		1
+#define HCLGE_PORT_INS_TAG1_EN_B	2
+#define HCLGE_PORT_INS_TAG2_EN_B	3
+#define HCLGE_CFG_NIC_ROCE_SEL_B	4
+struct hclge_vport_vtag_tx_cfg_cmd {
+	u8 vport_vlan_cfg;
+	u8 vf_offset;
+	u8 rsv1[2];
+	__le16 def_vlan_tag1;
+	__le16 def_vlan_tag2;
+	u8 vf_bitmap[8];
+	u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B		0
+#define HCLGE_REM_TAG2_EN_B		1
+#define HCLGE_SHOW_TAG1_EN_B		2
+#define HCLGE_SHOW_TAG2_EN_B		3
+struct hclge_vport_vtag_rx_cfg_cmd {
+	u8 vport_vlan_cfg;
+	u8 vf_offset;
+	u8 rsv1[6];
+	u8 vf_bitmap[8];
+	u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+	__le16 ot_vlan_type;
+	__le16 in_vlan_type;
+	u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+	__le16 ot_fst_vlan_type;
+	__le16 ot_sec_vlan_type;
+	__le16 in_fst_vlan_type;
+	__le16 in_sec_vlan_type;
+	u8 rsv[16];
+};
+
 struct hclge_cfg_com_tqp_queue_cmd {
 	__le16 tqp_id;
 	__le16 stream_id;
@@ -726,6 +811,23 @@ struct hclge_reset_cmd {
 #define HCLGE_NIC_CMQ_DESC_NUM		1024
 #define HCLGE_NIC_CMQ_DESC_NUM_S	3
 
+#define HCLGE_LED_PORT_SPEED_STATE_S	0
+#define HCLGE_LED_PORT_SPEED_STATE_M	GENMASK(5, 0)
+#define HCLGE_LED_ACTIVITY_STATE_S	0
+#define HCLGE_LED_ACTIVITY_STATE_M	GENMASK(1, 0)
+#define HCLGE_LED_LINK_STATE_S		0
+#define HCLGE_LED_LINK_STATE_M		GENMASK(1, 0)
+#define HCLGE_LED_LOCATE_STATE_S	0
+#define HCLGE_LED_LOCATE_STATE_M	GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+	u8 port_speed_led_config;
+	u8 link_led_config;
+	u8 activity_led_config;
+	u8 locate_led_config;
+	u8 rsv[20];
+};
+
 int hclge_cmd_init(struct hclge_dev *hdev);
 static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
 {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806a52c3..32bc6f68e297 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,12 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
-
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
 #include "hclge_cmd.h"
 #include "hclge_dcb.h"
 #include "hclge_main.h"
+#include "hclge_mbx.h"
 #include "hclge_mdio.h"
 #include "hclge_tm.h"
 #include "hnae3.h"
@@ -34,8 +36,10 @@
 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
 				     enum hclge_mta_dmac_sel_type mta_mac_sel,
 				     bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
 static int hclge_init_vlan_config(struct hclge_dev *hdev);
 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_update_led_status(struct hclge_dev *hdev);
 
 static struct hnae3_ae_algo ae_algo;
 
@@ -278,8 +282,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
 	{"mac_tx_undersize_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
-	{"mac_tx_overrsize_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+	{"mac_tx_oversize_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
 	{"mac_tx_64_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
 	{"mac_tx_65_127_oct_pkt_num",
@@ -292,8 +296,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
 	{"mac_tx_1024_1518_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
-	{"mac_tx_1519_max_oct_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+	{"mac_tx_1519_2047_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+	{"mac_tx_2048_4095_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+	{"mac_tx_4096_8191_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+	{"mac_tx_8192_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+	{"mac_tx_8192_9216_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+	{"mac_tx_9217_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+	{"mac_tx_12288_16383_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+	{"mac_tx_1519_max_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+	{"mac_tx_1519_max_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
 	{"mac_rx_total_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
 	{"mac_rx_total_oct_num",
@@ -314,8 +334,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
 	{"mac_rx_undersize_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
-	{"mac_rx_overrsize_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+	{"mac_rx_oversize_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
 	{"mac_rx_64_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
 	{"mac_rx_65_127_oct_pkt_num",
@@ -328,33 +348,59 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
 	{"mac_rx_1024_1518_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
-	{"mac_rx_1519_max_oct_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
-
-	{"mac_trans_fragment_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
-	{"mac_trans_undermin_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
-	{"mac_trans_jabber_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
-	{"mac_trans_err_all_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
-	{"mac_trans_from_app_good_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
-	{"mac_trans_from_app_bad_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
-	{"mac_rcv_fragment_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
-	{"mac_rcv_undermin_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
-	{"mac_rcv_jabber_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
-	{"mac_rcv_fcs_err_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
-	{"mac_rcv_send_app_good_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
-	{"mac_rcv_send_app_bad_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+	{"mac_rx_1519_2047_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+	{"mac_rx_2048_4095_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+	{"mac_rx_4096_8191_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+	{"mac_rx_8192_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+	{"mac_rx_8192_9216_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+	{"mac_rx_9217_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+	{"mac_rx_12288_16383_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+	{"mac_rx_1519_max_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+	{"mac_rx_1519_max_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+	{"mac_tx_fragment_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+	{"mac_tx_undermin_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+	{"mac_tx_jabber_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+	{"mac_tx_err_all_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+	{"mac_tx_from_app_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+	{"mac_tx_from_app_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+	{"mac_rx_fragment_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+	{"mac_rx_undermin_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+	{"mac_rx_jabber_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+	{"mac_rx_fcs_err_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+	{"mac_rx_send_app_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+	{"mac_rx_send_app_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+	{
+		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
+		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+		.i_port_bitmap = 0x1,
+	},
 };
 
 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -460,9 +506,41 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
 	return 0;
 }
 
+static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
+{
+	struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
+	struct hclge_desc desc;
+	__le64 *desc_data;
+	int ret;
+
+	/* for fiber port, need to query the total rx/tx packets statstics,
+	 * used for data transferring checking.
+	 */
+	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+		return 0;
+
+	if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+		return 0;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Get MAC total pkt stats fail, ret = %d\n", ret);
+
+		return ret;
+	}
+
+	desc_data = (__le64 *)(&desc.data[0]);
+	mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
+	mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
+
+	return 0;
+}
+
 static int hclge_mac_update_stats(struct hclge_dev *hdev)
 {
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
 #define HCLGE_RTN_DATA_NUM 4
 
 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -524,7 +602,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
 			return ret;
 		}
 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
-			le32_to_cpu(desc[0].data[4]);
+			le32_to_cpu(desc[0].data[1]);
 	}
 
 	for (i = 0; i < kinfo->num_tqps; i++) {
@@ -544,7 +622,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
 			return ret;
 		}
 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
-			le32_to_cpu(desc[0].data[4]);
+			le32_to_cpu(desc[0].data[1]);
 	}
 
 	return 0;
@@ -586,7 +664,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
 			struct hclge_tqp, q);
-		snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+		snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
 			 tqp->index);
 		buff = buff + ETH_GSTRING_LEN;
 	}
@@ -594,7 +672,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
 			struct hclge_tqp, q);
-		snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+		snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
 			 tqp->index);
 		buff = buff + ETH_GSTRING_LEN;
 	}
@@ -642,23 +720,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
 	net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
 	net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
 
-	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
-	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
-	net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
 
 	net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
 	net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
 
-	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
 	net_stats->rx_length_errors =
 		hw_stats->mac_stats.mac_rx_undersize_pkt_num;
 	net_stats->rx_length_errors +=
-		hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
 	net_stats->rx_over_errors =
-		hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
 }
 
 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -698,6 +775,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
 	struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
 	int status;
 
+	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+		return;
+
 	status = hclge_mac_update_stats(hdev);
 	if (status)
 		dev_err(&hdev->pdev->dev,
@@ -723,6 +803,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
 			status);
 
 	hclge_update_netstat(hw_stats, net_stats);
+
+	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
 }
 
 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -981,6 +1063,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
 	cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
 					    HCLGE_CFG_DEFAULT_SPEED_M,
 					    HCLGE_CFG_DEFAULT_SPEED_S);
+	cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+					   HCLGE_CFG_RSS_SIZE_M,
+					   HCLGE_CFG_RSS_SIZE_S);
+
 	for (i = 0; i < ETH_ALEN; i++)
 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
 
@@ -1058,7 +1144,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
 	hdev->base_tqp_pid = 0;
-	hdev->rss_size_max = 1;
+	hdev->rss_size_max = cfg.rss_size_max;
 	hdev->rx_buf_len = cfg.rx_buf_len;
 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
 	hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1181,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 	for (i = 0; i < hdev->tm_info.num_tc; i++)
 		hnae_set_bit(hdev->hw_tc_map, i, 1);
 
-	if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
-		hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-	else
-		hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
 	return ret;
 }
@@ -2132,28 +2215,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
 	return 0;
 }
 
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
-	struct hclge_mac *mac = &hdev->hw.mac;
-	struct hclge_query_an_speed_dup_cmd *req;
-	struct hclge_desc desc;
-	int ret;
-
-	req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
-	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-	if (ret) {
-		dev_err(&hdev->pdev->dev,
-			"autoneg result query cmd failed %d.\n", ret);
-		return ret;
-	}
-
-	mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
-	return 0;
-}
-
 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
 {
 	struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2250,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
 
-	hclge_query_autoneg_result(hdev);
+	if (phydev)
+		return phydev->autoneg;
 
 	return hdev->hw.mac.autoneg;
 }
 
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+					   bool mask_vlan,
+					   u8 *mac_mask)
+{
+	struct hclge_mac_vlan_mask_entry_cmd *req;
+	struct hclge_desc desc;
+	int status;
+
+	req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+	hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+		     mask_vlan ? 1 : 0);
+	ether_addr_copy(req->mac_mask, mac_mask);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+			status);
+
+	return status;
+}
+
 static int hclge_mac_init(struct hclge_dev *hdev)
 {
+	struct hnae3_handle *handle = &hdev->vport[0].nic;
+	struct net_device *netdev = handle->kinfo.netdev;
 	struct hclge_mac *mac = &hdev->hw.mac;
+	u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+	int mtu;
 	int ret;
 
 	ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2314,45 @@ static int hclge_mac_init(struct hclge_dev *hdev)
 		return ret;
 	}
 
-	return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+	ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"set mta filter mode fail ret=%d\n", ret);
+		return ret;
+	}
+
+	ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"set default mac_vlan_mask fail ret=%d\n", ret);
+		return ret;
+	}
+
+	if (netdev)
+		mtu = netdev->mtu;
+	else
+		mtu = ETH_DATA_LEN;
+
+	ret = hclge_set_mtu(handle, mtu);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"set mtu failed ret=%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->rst_service_task);
 }
 
 static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2350,6 +2479,7 @@ static void hclge_service_timer(struct timer_list *t)
 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
 
 	mod_timer(&hdev->service_timer, jiffies + HZ);
+	hdev->hw_stats.stats_timer++;
 	hclge_task_schedule(hdev);
 }
 
@@ -2362,6 +2492,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
 }
 
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+	u32 rst_src_reg;
+	u32 cmdq_src_reg;
+
+	/* fetch the events from their corresponding regs */
+	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+	/* Assumption: If by any chance reset and mailbox events are reported
+	 * together then we will only process reset event in this go and will
+	 * defer the processing of the mailbox events. Since, we would have not
+	 * cleared RX CMDQ event this time we would receive again another
+	 * interrupt from H/W just for the mailbox.
+	 */
+
+	/* check for vector0 reset event sources */
+	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
+	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
+	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
+	/* check for vector0 mailbox(=CMDQ RX) event source */
+	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+		*clearval = cmdq_src_reg;
+		return HCLGE_VECTOR0_EVENT_MBX;
+	}
+
+	return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+				    u32 regclr)
+{
+	switch (event_type) {
+	case HCLGE_VECTOR0_EVENT_RST:
+		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+		break;
+	case HCLGE_VECTOR0_EVENT_MBX:
+		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+		break;
+	}
+}
+
 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 {
 	writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2558,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
 {
 	struct hclge_dev *hdev = data;
+	u32 event_cause;
+	u32 clearval;
 
 	hclge_enable_vector(&hdev->misc_vector, false);
-	if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
-		schedule_work(&hdev->service_task);
+	event_cause = hclge_check_event_cause(hdev, &clearval);
+
+	/* vector 0 interrupt is shared with reset and mailbox source events.*/
+	switch (event_cause) {
+	case HCLGE_VECTOR0_EVENT_RST:
+		hclge_reset_task_schedule(hdev);
+		break;
+	case HCLGE_VECTOR0_EVENT_MBX:
+		/* If we are here then,
+		 * 1. Either we are not handling any mbx task and we are not
+		 *    scheduled as well
+		 *                        OR
+		 * 2. We could be handling a mbx task but nothing more is
+		 *    scheduled.
+		 * In both cases, we should schedule mbx task as there are more
+		 * mbx messages reported by this interrupt.
+		 */
+		hclge_mbx_task_schedule(hdev);
+
+	default:
+		dev_dbg(&hdev->pdev->dev,
+			"received unknown or unhandled event of vector0\n");
+		break;
+	}
+
+	/* we should clear the source of interrupt */
+	hclge_clear_event_cause(hdev, event_cause, clearval);
+	hclge_enable_vector(&hdev->misc_vector, true);
 
 	return IRQ_HANDLED;
 }
@@ -2404,9 +2620,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
 
 	hclge_get_misc_vector(hdev);
 
-	ret = devm_request_irq(&hdev->pdev->dev,
-			       hdev->misc_vector.vector_irq,
-			       hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+	/* this would be explicitly freed in the end */
+	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+			  0, "hclge_misc", hdev);
 	if (ret) {
 		hclge_free_vector(hdev, 0);
 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2632,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
 	return ret;
 }
 
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+	free_irq(hdev->misc_vector.vector_irq, hdev);
+	hclge_free_vector(hdev, 0);
+}
+
 static int hclge_notify_client(struct hclge_dev *hdev,
 			       enum hnae3_reset_notify_type type)
 {
@@ -2471,12 +2693,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
 		cnt++;
 	}
 
-	/* must clear reset status register to
-	 * prevent driver detect reset interrupt again
-	 */
-	reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
-	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
 		dev_warn(&hdev->pdev->dev,
 			 "Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2721,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
 	return ret;
 }
 
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
 {
 	struct pci_dev *pdev = hdev->pdev;
 	u32 val;
 
-	switch (type) {
+	switch (hdev->reset_type) {
 	case HNAE3_GLOBAL_RESET:
 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
 		hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2742,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
 	case HNAE3_FUNC_RESET:
 		dev_info(&pdev->dev, "PF Reset requested\n");
 		hclge_func_reset_cmd(hdev, 0);
+		/* schedule again to check later */
+		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+		hclge_reset_task_schedule(hdev);
 		break;
 	default:
 		dev_warn(&pdev->dev,
-			 "Unsupported reset type: %d\n", type);
+			 "Unsupported reset type: %d\n", hdev->reset_type);
 		break;
 	}
 }
 
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+						   unsigned long *addr)
 {
 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
-	u32 rst_reg_val;
 
-	rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
-	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+	/* return the highest priority reset level amongst all */
+	if (test_bit(HNAE3_GLOBAL_RESET, addr))
 		rst_level = HNAE3_GLOBAL_RESET;
-	else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+	else if (test_bit(HNAE3_CORE_RESET, addr))
 		rst_level = HNAE3_CORE_RESET;
-	else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+	else if (test_bit(HNAE3_IMP_RESET, addr))
 		rst_level = HNAE3_IMP_RESET;
+	else if (test_bit(HNAE3_FUNC_RESET, addr))
+		rst_level = HNAE3_FUNC_RESET;
+
+	/* now, clear all other resets */
+	clear_bit(HNAE3_GLOBAL_RESET, addr);
+	clear_bit(HNAE3_CORE_RESET, addr);
+	clear_bit(HNAE3_IMP_RESET, addr);
+	clear_bit(HNAE3_FUNC_RESET, addr);
 
 	return rst_level;
 }
 
+static void hclge_reset(struct hclge_dev *hdev)
+{
+	/* perform reset of the stack & ae device for a client */
+
+	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+	if (!hclge_reset_wait(hdev)) {
+		rtnl_lock();
+		hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+		hclge_reset_ae_dev(hdev->ae_dev);
+		hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+		rtnl_unlock();
+	} else {
+		/* schedule again to check pending resets later */
+		set_bit(hdev->reset_type, &hdev->reset_pending);
+		hclge_reset_task_schedule(hdev);
+	}
+
+	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
 static void hclge_reset_event(struct hnae3_handle *handle,
 			      enum hnae3_reset_type reset)
 {
@@ -2563,14 +2811,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
 	case HNAE3_FUNC_RESET:
 	case HNAE3_CORE_RESET:
 	case HNAE3_GLOBAL_RESET:
-		if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
-			dev_err(&hdev->pdev->dev, "Already in reset state");
-			return;
-		}
-		hdev->reset_type = reset;
-		set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
-		set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-		schedule_work(&hdev->service_task);
+		/* request reset & schedule reset task */
+		set_bit(reset, &hdev->reset_request);
+		hclge_reset_task_schedule(hdev);
 		break;
 	default:
 		dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2823,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
 
 static void hclge_reset_subtask(struct hclge_dev *hdev)
 {
-	bool do_reset;
+	/* check if there is any ongoing reset in the hardware. This status can
+	 * be checked from reset_pending. If there is then, we need to wait for
+	 * hardware to complete reset.
+	 *    a. If we are able to figure out in reasonable time that hardware
+	 *       has fully resetted then, we can proceed with driver, client
+	 *       reset.
+	 *    b. else, we can come back later to check this status so re-sched
+	 *       now.
+	 */
+	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+	if (hdev->reset_type != HNAE3_NONE_RESET)
+		hclge_reset(hdev);
 
-	do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+	/* check if we got any *new* reset requests to be honored */
+	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+	if (hdev->reset_type != HNAE3_NONE_RESET)
+		hclge_do_reset(hdev);
 
-	/* Reset is detected by interrupt */
-	if (hdev->reset_type == HNAE3_NONE_RESET)
-		hdev->reset_type = hclge_detected_reset_event(hdev);
+	hdev->reset_type = HNAE3_NONE_RESET;
+}
 
-	if (hdev->reset_type == HNAE3_NONE_RESET)
+static void hclge_reset_service_task(struct work_struct *work)
+{
+	struct hclge_dev *hdev =
+		container_of(work, struct hclge_dev, rst_service_task);
+
+	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
 		return;
 
-	switch (hdev->reset_type) {
-	case HNAE3_FUNC_RESET:
-	case HNAE3_CORE_RESET:
-	case HNAE3_GLOBAL_RESET:
-	case HNAE3_IMP_RESET:
-		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
 
-		if (do_reset)
-			hclge_do_reset(hdev, hdev->reset_type);
-		else
-			set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+	hclge_reset_subtask(hdev);
 
-		if (!hclge_reset_wait(hdev)) {
-			hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-			hclge_reset_ae_dev(hdev->ae_dev);
-			hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
-			clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
-		}
-		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
-		break;
-	default:
-		dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
-			hdev->reset_type);
-		break;
-	}
-	hdev->reset_type = HNAE3_NONE_RESET;
+	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
 }
 
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct work_struct *work)
 {
-	hclge_reset_subtask(hdev);
-	hclge_enable_vector(&hdev->misc_vector, true);
+	struct hclge_dev *hdev =
+		container_of(work, struct hclge_dev, mbx_service_task);
+
+	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+		return;
+
+	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+	hclge_mbx_handler(hdev);
+
+	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
 }
 
 static void hclge_service_task(struct work_struct *work)
@@ -2630,10 +2879,20 @@ static void hclge_service_task(struct work_struct *work)
 	struct hclge_dev *hdev =
 		container_of(work, struct hclge_dev, service_task);
 
-	hclge_misc_irq_service_task(hdev);
+	/* The total rx/tx packets statstics are wanted to be updated
+	 * per second. Both hclge_update_stats_for_all() and
+	 * hclge_mac_get_traffic_stats() can do it.
+	 */
+	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+		hclge_update_stats_for_all(hdev);
+		hdev->hw_stats.stats_timer = 0;
+	} else {
+		hclge_mac_get_traffic_stats(hdev);
+	}
+
 	hclge_update_speed_duplex(hdev);
 	hclge_update_link_status(hdev);
-	hclge_update_stats_for_all(hdev);
+	hclge_update_led_status(hdev);
 	hclge_service_complete(hdev);
 }
 
@@ -3174,49 +3433,53 @@ err:
 	return ret;
 }
 
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
-				   struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+				int vector_id, bool en,
+				struct hnae3_ring_chain_node *ring_chain)
 {
 	struct hclge_dev *hdev = vport->back;
-	struct hclge_ctrl_vector_chain_cmd *req;
 	struct hnae3_ring_chain_node *node;
 	struct hclge_desc desc;
-	int ret;
+	struct hclge_ctrl_vector_chain_cmd *req
+		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+	enum hclge_cmd_status status;
+	enum hclge_opcode_type op;
+	u16 tqp_type_and_id;
 	int i;
 
-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
-	req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+	hclge_cmd_setup_basic_desc(&desc, op, false);
 	req->int_vector_id = vector_id;
 
 	i = 0;
 	for (node = ring_chain; node; node = node->next) {
-		u16 type_and_id = 0;
-
-		hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+		hnae_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
+			       HCLGE_INT_TYPE_S,
 			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-		hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
-			       node->tqp_index);
-		hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
+		hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+			       HCLGE_TQP_ID_S, node->tqp_index);
+		hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
 			       HCLGE_INT_GL_IDX_S,
-			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-		req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
-		req->vfid = vport->vport_id;
-
+			       hnae_get_field(node->int_gl_idx,
+					      HNAE3_RING_GL_IDX_M,
+					      HNAE3_RING_GL_IDX_S));
+		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+			req->vfid = vport->vport_id;
 
-			ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-			if (ret) {
+			status = hclge_cmd_send(&hdev->hw, &desc, 1);
+			if (status) {
 				dev_err(&hdev->pdev->dev,
 					"Map TQP fail, status is %d.\n",
-					ret);
-				return ret;
+					status);
+				return -EIO;
 			}
 			i = 0;
 
 			hclge_cmd_setup_basic_desc(&desc,
-						   HCLGE_OPC_ADD_RING_TO_VECTOR,
+						   op,
 						   false);
 			req->int_vector_id = vector_id;
 		}
@@ -3224,21 +3487,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
 
 	if (i > 0) {
 		req->int_cause_num = i;
-
-		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-		if (ret) {
+		req->vfid = vport->vport_id;
+		status = hclge_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
 			dev_err(&hdev->pdev->dev,
-				"Map TQP fail, status is %d.\n", ret);
-			return ret;
+				"Map TQP fail, status is %d.\n", status);
+			return -EIO;
 		}
 	}
 
 	return 0;
 }
 
-static int hclge_map_handle_ring_to_vector(
-		struct hnae3_handle *handle, int vector,
-		struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+				    int vector,
+				    struct hnae3_ring_chain_node *ring_chain)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3510,20 @@ static int hclge_map_handle_ring_to_vector(
 	vector_id = hclge_get_vector_index(hdev, vector);
 	if (vector_id < 0) {
 		dev_err(&hdev->pdev->dev,
-			"Get vector index fail. ret =%d\n", vector_id);
+			"Get vector index fail. vector_id =%d\n", vector_id);
 		return vector_id;
 	}
 
-	return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
 }
 
-static int hclge_unmap_ring_from_vector(
-	struct hnae3_handle *handle, int vector,
-	struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+				       int vector,
+				       struct hnae3_ring_chain_node *ring_chain)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
-	struct hclge_ctrl_vector_chain_cmd *req;
-	struct hnae3_ring_chain_node *node;
-	struct hclge_desc desc;
-	int i, vector_id;
-	int ret;
+	int vector_id, ret;
 
 	vector_id = hclge_get_vector_index(hdev, vector);
 	if (vector_id < 0) {
@@ -3273,54 +3532,17 @@ static int hclge_unmap_ring_from_vector(
 		return vector_id;
 	}
 
-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
-	req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
-	req->int_vector_id = vector_id;
-
-	i = 0;
-	for (node = ring_chain; node; node = node->next) {
-		u16 type_and_id = 0;
-
-		hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
-			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-		hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
-			       node->tqp_index);
-		hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
-			       HCLGE_INT_GL_IDX_S,
-			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
-		req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
-		req->vfid = vport->vport_id;
-
-		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
-			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
-			ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-			if (ret) {
-				dev_err(&hdev->pdev->dev,
-					"Unmap TQP fail, status is %d.\n",
-					ret);
-				return ret;
-			}
-			i = 0;
-			hclge_cmd_setup_basic_desc(&desc,
-						   HCLGE_OPC_DEL_RING_TO_VECTOR,
-						   false);
-			req->int_vector_id = vector_id;
-		}
+	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+	if (ret) {
+		dev_err(&handle->pdev->dev,
+			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+			vector_id,
+			ret);
+		return ret;
 	}
 
-	if (i > 0) {
-		req->int_cause_num = i;
-
-		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-		if (ret) {
-			dev_err(&hdev->pdev->dev,
-				"Unmap TQP fail, status is %d.\n", ret);
-			return ret;
-		}
-	}
+	/* Free this MSIX or MSI vector */
+	hclge_free_vector(hdev, vector_id);
 
 	return 0;
 }
@@ -4077,6 +4299,91 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
 	return status;
 }
 
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+					      u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD		0
+#define HCLGE_ETHERTYPE_ALREADY_ADD		1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT		3
+
+	int return_status;
+
+	if (cmdq_resp) {
+		dev_err(&hdev->pdev->dev,
+			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+			cmdq_resp);
+		return -EIO;
+	}
+
+	switch (resp_code) {
+	case HCLGE_ETHERTYPE_SUCCESS_ADD:
+	case HCLGE_ETHERTYPE_ALREADY_ADD:
+		return_status = 0;
+		break;
+	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+		dev_err(&hdev->pdev->dev,
+			"add mac ethertype failed for manager table overflow.\n");
+		return_status = -EIO;
+		break;
+	case HCLGE_ETHERTYPE_KEY_CONFLICT:
+		dev_err(&hdev->pdev->dev,
+			"add mac ethertype failed for key conflict.\n");
+		return_status = -EIO;
+		break;
+	default:
+		dev_err(&hdev->pdev->dev,
+			"add mac ethertype failed for undefined, code=%d.\n",
+			resp_code);
+		return_status = -EIO;
+	}
+
+	return return_status;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+	struct hclge_desc desc;
+	u8 resp_code;
+	u16 retval;
+	int ret;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"add mac ethertype failed for cmd_send, ret =%d.\n",
+			ret);
+		return ret;
+	}
+
+	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+	retval = le16_to_cpu(desc.retval);
+
+	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"add mac ethertype failed, ret =%d.\n",
+				ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4090,6 +4397,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
 	const unsigned char *new_addr = (const unsigned char *)p;
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
+	int ret;
 
 	/* mac addr check */
 	if (is_zero_ether_addr(new_addr) ||
@@ -4101,14 +4409,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
 		return -EINVAL;
 	}
 
-	hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+	ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+	if (ret)
+		dev_warn(&hdev->pdev->dev,
+			 "remove old uc mac address fail, ret =%d.\n",
+			 ret);
 
-	if (!hclge_add_uc_addr(handle, new_addr)) {
-		ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
-		return 0;
+	ret = hclge_add_uc_addr(handle, new_addr);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"add uc mac address fail, ret =%d.\n",
+			ret);
+
+		ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"restore uc mac address fail, ret =%d.\n",
+				ret);
+		}
+
+		return -EIO;
 	}
 
-	return -EIO;
+	ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"configure mac pause address fail, ret =%d.\n",
+			ret);
+		return -EIO;
+	}
+
+	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+	return 0;
 }
 
 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4134,6 +4467,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
 	return 0;
 }
 
+#define HCLGE_FILTER_TYPE_VF		0
+#define HCLGE_FILTER_TYPE_PORT		1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
 			     bool is_kill, u16 vlan, u8 qos, __be16 proto)
 {
@@ -4250,43 +4594,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
 	return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
 }
 
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+	struct hclge_vport_vtag_tx_cfg_cmd *req;
+	struct hclge_dev *hdev = vport->back;
+	struct hclge_desc desc;
+	int status;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+		     vcfg->accept_tag ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+		     vcfg->accept_untag ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+		     vcfg->insert_tag1_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+		     vcfg->insert_tag2_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+	req->vf_bitmap[req->vf_offset] =
+		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Send port txvlan cfg command fail, ret =%d\n",
+			status);
+
+	return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+	struct hclge_vport_vtag_rx_cfg_cmd *req;
+	struct hclge_dev *hdev = vport->back;
+	struct hclge_desc desc;
+	int status;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+		     vcfg->strip_tag1_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+		     vcfg->strip_tag2_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+		     vcfg->vlan1_vlan_prionly ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+		     vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+	req->vf_bitmap[req->vf_offset] =
+		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Send port rxvlan cfg command fail, ret =%d\n",
+			status);
+
+	return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+	struct hclge_desc desc;
+	int status;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+	rx_req->ot_fst_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+	rx_req->ot_sec_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+	rx_req->in_fst_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+	rx_req->in_sec_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status) {
+		dev_err(&hdev->pdev->dev,
+			"Send rxvlan protocol type command fail, ret =%d\n",
+			status);
+		return status;
+	}
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Send txvlan protocol type command fail, ret =%d\n",
+			status);
+
+	return status;
+}
+
 static int hclge_init_vlan_config(struct hclge_dev *hdev)
 {
-#define HCLGE_VLAN_TYPE_VF_TABLE   0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_DEF_VLAN_TYPE		0x8100
+
 	struct hnae3_handle *handle;
+	struct hclge_vport *vport;
 	int ret;
+	int i;
 
-	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
-					 true);
+	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
 	if (ret)
 		return ret;
 
-	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
-					 true);
+	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
 	if (ret)
 		return ret;
 
+	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+	ret = hclge_set_vlan_protocol_type(hdev);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < hdev->num_alloc_vport; i++) {
+		vport = &hdev->vport[i];
+		vport->txvlan_cfg.accept_tag = true;
+		vport->txvlan_cfg.accept_untag = true;
+		vport->txvlan_cfg.insert_tag1_en = false;
+		vport->txvlan_cfg.insert_tag2_en = false;
+		vport->txvlan_cfg.default_tag1 = 0;
+		vport->txvlan_cfg.default_tag2 = 0;
+
+		ret = hclge_set_vlan_tx_offload_cfg(vport);
+		if (ret)
+			return ret;
+
+		vport->rxvlan_cfg.strip_tag1_en = false;
+		vport->rxvlan_cfg.strip_tag2_en = true;
+		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+		ret = hclge_set_vlan_rx_offload_cfg(vport);
+		if (ret)
+			return ret;
+	}
+
 	handle = &hdev->vport[0].nic;
 	return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+
+	vport->rxvlan_cfg.strip_tag1_en = false;
+	vport->rxvlan_cfg.strip_tag2_en = enable;
+	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+	return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_config_max_frm_size_cmd *req;
 	struct hclge_dev *hdev = vport->back;
 	struct hclge_desc desc;
+	int max_frm_size;
 	int ret;
 
-	if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+	    max_frm_size > HCLGE_MAC_MAX_FRAME)
 		return -EINVAL;
 
-	hdev->mps = new_mtu;
+	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
 
 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
-	req->max_frm_size = cpu_to_le16(new_mtu);
+	req->max_frm_size = cpu_to_le16(max_frm_size);
 
 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 	if (ret) {
@@ -4294,6 +4799,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 		return ret;
 	}
 
+	hdev->mps = max_frm_size;
+
 	return 0;
 }
 
@@ -4341,7 +4848,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
 	return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
 }
 
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4899,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
 	return hdev->fw_version;
 }
 
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+				   u32 *flowctrl_adv)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+
+	if (!phydev)
+		return;
+
+	*flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+			 (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+
+	if (!phydev)
+		return;
+
+	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+	if (rx_en)
+		phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+	if (tx_en)
+		phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+	int ret;
+
+	if (rx_en && tx_en)
+		hdev->fc_mode_last_time = HCLGE_FC_FULL;
+	else if (rx_en && !tx_en)
+		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+	else if (!rx_en && tx_en)
+		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+	else
+		hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+		return 0;
+
+	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+			ret);
+		return ret;
+	}
+
+	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+
+	return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+	u16 remote_advertising = 0;
+	u16 local_advertising = 0;
+	u32 rx_pause, tx_pause;
+	u8 flowctl;
+
+	if (!phydev->link || !phydev->autoneg)
+		return 0;
+
+	if (phydev->advertising & ADVERTISED_Pause)
+		local_advertising = ADVERTISE_PAUSE_CAP;
+
+	if (phydev->advertising & ADVERTISED_Asym_Pause)
+		local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+	if (phydev->pause)
+		remote_advertising = LPA_PAUSE_CAP;
+
+	if (phydev->asym_pause)
+		remote_advertising |= LPA_PAUSE_ASYM;
+
+	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+					   remote_advertising);
+	tx_pause = flowctl & FLOW_CTRL_TX;
+	rx_pause = flowctl & FLOW_CTRL_RX;
+
+	if (phydev->duplex == HCLGE_MAC_HALF) {
+		tx_pause = 0;
+		rx_pause = 0;
+	}
+
+	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
 				 u32 *rx_en, u32 *tx_en)
 {
@@ -4421,6 +5022,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
 	}
 }
 
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+				u32 rx_en, u32 tx_en)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+	u32 fc_autoneg;
+
+	/* Only support flow control negotiation for netdev with
+	 * phy attached for now.
+	 */
+	if (!phydev)
+		return -EOPNOTSUPP;
+
+	fc_autoneg = hclge_get_autoneg(handle);
+	if (auto_neg != fc_autoneg) {
+		dev_info(&hdev->pdev->dev,
+			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+		dev_info(&hdev->pdev->dev,
+			 "Priority flow control enabled. Cannot set link flow control.\n");
+		return -EOPNOTSUPP;
+	}
+
+	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+	if (!fc_autoneg)
+		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+	return phy_start_aneg(phydev);
+}
+
 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
 					  u8 *auto_neg, u32 *speed, u8 *duplex)
 {
@@ -4661,6 +5297,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 	hdev->pdev = pdev;
 	hdev->ae_dev = ae_dev;
 	hdev->reset_type = HNAE3_NONE_RESET;
+	hdev->reset_request = 0;
+	hdev->reset_pending = 0;
 	ae_dev->priv = hdev;
 
 	ret = hclge_pci_init(hdev);
@@ -4768,16 +5406,28 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 		return ret;
 	}
 
+	ret = init_mgr_tbl(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+		return ret;
+	}
+
 	hclge_dcb_ops_set(hdev);
 
 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
 	INIT_WORK(&hdev->service_task, hclge_service_task);
+	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
 
 	/* Enable MISC vector(vector0) */
 	hclge_enable_vector(&hdev->misc_vector, true);
 
 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
+	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
 
 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
 	return 0;
@@ -4889,25 +5539,471 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
 		del_timer_sync(&hdev->service_timer);
 	if (hdev->service_task.func)
 		cancel_work_sync(&hdev->service_task);
+	if (hdev->rst_service_task.func)
+		cancel_work_sync(&hdev->rst_service_task);
+	if (hdev->mbx_service_task.func)
+		cancel_work_sync(&hdev->mbx_service_task);
 
 	if (mac->phydev)
 		mdiobus_unregister(mac->mdio_bus);
 
 	/* Disable MISC vector(vector0) */
 	hclge_enable_vector(&hdev->misc_vector, false);
-	hclge_free_vector(hdev, 0);
 	hclge_destroy_cmd_queue(&hdev->hw);
+	hclge_misc_irq_uninit(hdev);
 	hclge_pci_uninit(hdev);
 	ae_dev->priv = NULL;
 }
 
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+			       struct ethtool_channels *ch)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+
+	ch->max_combined = hclge_get_max_channels(handle);
+	ch->other_count = 1;
+	ch->max_other = 1;
+	ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+					u16 *free_tqps, u16 *max_rss_size)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	u16 temp_tqps = 0;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		if (!hdev->htqp[i].alloced)
+			temp_tqps++;
+	}
+	*free_tqps = temp_tqps;
+	*max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+	struct hclge_dev *hdev = vport->back;
+	int i;
+
+	for (i = 0; i < kinfo->num_tqps; i++) {
+		struct hclge_tqp *tqp =
+			container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+		tqp->q.handle = NULL;
+		tqp->q.tqp_index = 0;
+		tqp->alloced = false;
+	}
+
+	devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+	kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+	struct hclge_dev *hdev = vport->back;
+	int cur_rss_size = kinfo->rss_size;
+	int cur_tqps = kinfo->num_tqps;
+	u16 tc_offset[HCLGE_MAX_TC_NUM];
+	u16 tc_valid[HCLGE_MAX_TC_NUM];
+	u16 tc_size[HCLGE_MAX_TC_NUM];
+	u16 roundup_size;
+	u32 *rss_indir;
+	int ret, i;
+
+	hclge_release_tqp(vport);
+
+	ret = hclge_knic_setup(vport, new_tqps_num);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+		return ret;
+	}
+
+	ret = hclge_map_tqp_to_vport(hdev, vport);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+		return ret;
+	}
+
+	ret = hclge_tm_schd_init(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+		return ret;
+	}
+
+	roundup_size = roundup_pow_of_two(kinfo->rss_size);
+	roundup_size = ilog2(roundup_size);
+	/* Set the RSS TC mode according to the new RSS size */
+	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+		tc_valid[i] = 0;
+
+		if (!(hdev->hw_tc_map & BIT(i)))
+			continue;
+
+		tc_valid[i] = 1;
+		tc_size[i] = roundup_size;
+		tc_offset[i] = kinfo->rss_size * i;
+	}
+	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+	if (ret)
+		return ret;
+
+	/* Reinitializes the rss indirect table according to the new RSS size */
+	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+	if (!rss_indir)
+		return -ENOMEM;
+
+	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+		rss_indir[i] = i % kinfo->rss_size;
+
+	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+	if (ret)
+		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+			ret);
+
+	kfree(rss_indir);
+
+	if (!ret)
+		dev_info(&hdev->pdev->dev,
+			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+			 cur_rss_size, kinfo->rss_size,
+			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+	return ret;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+			      u32 *regs_num_64_bit)
+{
+	struct hclge_desc desc;
+	u32 total_num;
+	int ret;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Query register number cmd failed, ret = %d.\n", ret);
+		return ret;
+	}
+
+	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
+	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+	total_num = *regs_num_32_bit + *regs_num_64_bit;
+	if (!total_num)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+				 void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+
+	struct hclge_desc *desc;
+	u32 *reg_val = data;
+	__le32 *desc_data;
+	int cmd_num;
+	int i, k, n;
+	int ret;
+
+	if (regs_num == 0)
+		return 0;
+
+	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Query 32 bit register cmd failed, ret = %d.\n", ret);
+		kfree(desc);
+		return ret;
+	}
+
+	for (i = 0; i < cmd_num; i++) {
+		if (i == 0) {
+			desc_data = (__le32 *)(&desc[i].data[0]);
+			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+		} else {
+			desc_data = (__le32 *)(&desc[i]);
+			n = HCLGE_32_BIT_REG_RTN_DATANUM;
+		}
+		for (k = 0; k < n; k++) {
+			*reg_val++ = le32_to_cpu(*desc_data++);
+
+			regs_num--;
+			if (!regs_num)
+				break;
+		}
+	}
+
+	kfree(desc);
+	return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+				 void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+
+	struct hclge_desc *desc;
+	u64 *reg_val = data;
+	__le64 *desc_data;
+	int cmd_num;
+	int i, k, n;
+	int ret;
+
+	if (regs_num == 0)
+		return 0;
+
+	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Query 64 bit register cmd failed, ret = %d.\n", ret);
+		kfree(desc);
+		return ret;
+	}
+
+	for (i = 0; i < cmd_num; i++) {
+		if (i == 0) {
+			desc_data = (__le64 *)(&desc[i].data[0]);
+			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+		} else {
+			desc_data = (__le64 *)(&desc[i]);
+			n = HCLGE_64_BIT_REG_RTN_DATANUM;
+		}
+		for (k = 0; k < n; k++) {
+			*reg_val++ = le64_to_cpu(*desc_data++);
+
+			regs_num--;
+			if (!regs_num)
+				break;
+		}
+	}
+
+	kfree(desc);
+	return 0;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	u32 regs_num_32_bit, regs_num_64_bit;
+	int ret;
+
+	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Get register number failed, ret = %d.\n", ret);
+		return -EOPNOTSUPP;
+	}
+
+	return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+			   void *data)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	u32 regs_num_32_bit, regs_num_64_bit;
+	int ret;
+
+	*version = hdev->fw_version;
+
+	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Get register number failed, ret = %d.\n", ret);
+		return;
+	}
+
+	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"Get 32 bit register failed, ret = %d.\n", ret);
+		return;
+	}
+
+	data = (u32 *)data + regs_num_32_bit;
+	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
+				    data);
+	if (ret)
+		dev_err(&hdev->pdev->dev,
+			"Get 64 bit register failed, ret = %d.\n", ret);
+}
+
+static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
+				    u8 act_led_status, u8 link_led_status,
+				    u8 locate_led_status)
+{
+	struct hclge_set_led_state_cmd *req;
+	struct hclge_desc desc;
+	int ret;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+	req = (struct hclge_set_led_state_cmd *)desc.data;
+	hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
+		       HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
+	hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
+		       HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
+	hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
+		       HCLGE_LED_LINK_STATE_S, link_led_status);
+	hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+		       HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		dev_err(&hdev->pdev->dev,
+			"Send set led state cmd error, ret =%d\n", ret);
+
+	return ret;
+}
+
+enum hclge_led_status {
+	HCLGE_LED_OFF,
+	HCLGE_LED_ON,
+	HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+			    enum ethtool_phys_id_state status)
+{
+#define BLINK_FREQUENCY		2
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+	int ret = 0;
+
+	if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+		return -EOPNOTSUPP;
+
+	switch (status) {
+	case ETHTOOL_ID_ACTIVE:
+		ret = hclge_set_led_status_sfp(hdev,
+					       HCLGE_LED_NO_CHANGE,
+					       HCLGE_LED_NO_CHANGE,
+					       HCLGE_LED_NO_CHANGE,
+					       HCLGE_LED_ON);
+		break;
+	case ETHTOOL_ID_INACTIVE:
+		ret = hclge_set_led_status_sfp(hdev,
+					       HCLGE_LED_NO_CHANGE,
+					       HCLGE_LED_NO_CHANGE,
+					       HCLGE_LED_NO_CHANGE,
+					       HCLGE_LED_OFF);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+enum hclge_led_port_speed {
+	HCLGE_SPEED_LED_FOR_1G,
+	HCLGE_SPEED_LED_FOR_10G,
+	HCLGE_SPEED_LED_FOR_25G,
+	HCLGE_SPEED_LED_FOR_40G,
+	HCLGE_SPEED_LED_FOR_50G,
+	HCLGE_SPEED_LED_FOR_100G,
+};
+
+static u8 hclge_led_get_speed_status(u32 speed)
+{
+	u8 speed_led;
+
+	switch (speed) {
+	case HCLGE_MAC_SPEED_1G:
+		speed_led = HCLGE_SPEED_LED_FOR_1G;
+		break;
+	case HCLGE_MAC_SPEED_10G:
+		speed_led = HCLGE_SPEED_LED_FOR_10G;
+		break;
+	case HCLGE_MAC_SPEED_25G:
+		speed_led = HCLGE_SPEED_LED_FOR_25G;
+		break;
+	case HCLGE_MAC_SPEED_40G:
+		speed_led = HCLGE_SPEED_LED_FOR_40G;
+		break;
+	case HCLGE_MAC_SPEED_50G:
+		speed_led = HCLGE_SPEED_LED_FOR_50G;
+		break;
+	case HCLGE_MAC_SPEED_100G:
+		speed_led = HCLGE_SPEED_LED_FOR_100G;
+		break;
+	default:
+		speed_led = HCLGE_LED_NO_CHANGE;
+	}
+
+	return speed_led;
+}
+
+static int hclge_update_led_status(struct hclge_dev *hdev)
+{
+	u8 port_speed_status, link_status, activity_status;
+	u64 rx_pkts, tx_pkts;
+
+	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+		return 0;
+
+	port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
+
+	rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
+	tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
+	if (rx_pkts != hdev->rx_pkts_for_led ||
+	    tx_pkts != hdev->tx_pkts_for_led)
+		activity_status = HCLGE_LED_ON;
+	else
+		activity_status = HCLGE_LED_OFF;
+	hdev->rx_pkts_for_led = rx_pkts;
+	hdev->tx_pkts_for_led = tx_pkts;
+
+	if (hdev->hw.mac.link)
+		link_status = HCLGE_LED_ON;
+	else
+		link_status = HCLGE_LED_OFF;
+
+	return hclge_set_led_status_sfp(hdev, port_speed_status,
+					activity_status, link_status,
+					HCLGE_LED_NO_CHANGE);
+}
+
 static const struct hnae3_ae_ops hclge_ops = {
 	.init_ae_dev = hclge_init_ae_dev,
 	.uninit_ae_dev = hclge_uninit_ae_dev,
 	.init_client_instance = hclge_init_client_instance,
 	.uninit_client_instance = hclge_uninit_client_instance,
-	.map_ring_to_vector = hclge_map_handle_ring_to_vector,
-	.unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+	.map_ring_to_vector = hclge_map_ring_to_vector,
+	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
 	.get_vector = hclge_get_vector,
 	.set_promisc_mode = hclge_set_promisc_mode,
 	.set_loopback = hclge_set_loopback,
@@ -4934,6 +6030,7 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.set_autoneg = hclge_set_autoneg,
 	.get_autoneg = hclge_get_autoneg,
 	.get_pauseparam = hclge_get_pauseparam,
+	.set_pauseparam = hclge_set_pauseparam,
 	.set_mtu = hclge_set_mtu,
 	.reset_queue = hclge_reset_tqp,
 	.get_stats = hclge_get_stats,
@@ -4942,9 +6039,18 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.get_sset_count = hclge_get_sset_count,
 	.get_fw_version = hclge_get_fw_version,
 	.get_mdix_mode = hclge_get_mdix_mode,
+	.enable_vlan_filter = hclge_enable_vlan_filter,
 	.set_vlan_filter = hclge_set_port_vlan_filter,
 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
 	.reset_event = hclge_reset_event,
+	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+	.set_channels = hclge_set_channels,
+	.get_channels = hclge_get_channels,
+	.get_flowctrl_adv = hclge_get_flowctrl_adv,
+	.get_regs_len = hclge_get_regs_len,
+	.get_regs = hclge_get_regs,
+	.set_led_id = hclge_set_led_id,
 };
 
 static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7027814ea5d7..d99a76a9557c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,10 @@
 #define HCLGE_PHY_MDIX_STATUS_B	(6)
 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B	(11)
 
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD           64
+#define HCLGE_VF_NUM_PER_BYTE          8
+
 /* Reset related Registers */
 #define HCLGE_MISC_RESET_STS_REG	0x20700
 #define HCLGE_GLOBAL_RESET_REG		0x20A00
@@ -92,6 +96,16 @@
 #define HCLGE_VECTOR0_CORERESET_INT_B	6
 #define HCLGE_VECTOR0_IMPRESET_INT_B	7
 
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG	0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME		64
+#define HCLGE_MAC_MAX_FRAME		9728
+
 enum HCLGE_DEV_STATE {
 	HCLGE_STATE_REINITING,
 	HCLGE_STATE_DOWN,
@@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE {
 	HCLGE_STATE_REMOVING,
 	HCLGE_STATE_SERVICE_INITED,
 	HCLGE_STATE_SERVICE_SCHED,
+	HCLGE_STATE_RST_SERVICE_SCHED,
+	HCLGE_STATE_RST_HANDLING,
+	HCLGE_STATE_MBX_SERVICE_SCHED,
 	HCLGE_STATE_MBX_HANDLING,
-	HCLGE_STATE_MBX_IRQ,
-	HCLGE_STATE_RESET_INT,
+	HCLGE_STATE_STATISTICS_UPDATING,
 	HCLGE_STATE_MAX
 };
 
+enum hclge_evt_cause {
+	HCLGE_VECTOR0_EVENT_RST,
+	HCLGE_VECTOR0_EVENT_MBX,
+	HCLGE_VECTOR0_EVENT_OTHER,
+};
+
 #define HCLGE_MPF_ENBALE 1
 struct hclge_caps {
 	u16 num_tqp;
@@ -208,6 +230,7 @@ struct hclge_cfg {
 	u8 tc_num;
 	u16 tqp_desc_num;
 	u16 rx_buf_len;
+	u16 rss_size_max;
 	u8 phy_addr;
 	u8 media_type;
 	u8 mac_addr[ETH_ALEN];
@@ -364,14 +387,23 @@ struct hclge_mac_stats {
 	u64 mac_tx_multi_pkt_num;
 	u64 mac_tx_broad_pkt_num;
 	u64 mac_tx_undersize_pkt_num;
-	u64 mac_tx_overrsize_pkt_num;
+	u64 mac_tx_oversize_pkt_num;
 	u64 mac_tx_64_oct_pkt_num;
 	u64 mac_tx_65_127_oct_pkt_num;
 	u64 mac_tx_128_255_oct_pkt_num;
 	u64 mac_tx_256_511_oct_pkt_num;
 	u64 mac_tx_512_1023_oct_pkt_num;
 	u64 mac_tx_1024_1518_oct_pkt_num;
-	u64 mac_tx_1519_max_oct_pkt_num;
+	u64 mac_tx_1519_2047_oct_pkt_num;
+	u64 mac_tx_2048_4095_oct_pkt_num;
+	u64 mac_tx_4096_8191_oct_pkt_num;
+	u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+	u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+	u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+	u64 mac_tx_12288_16383_oct_pkt_num;
+	u64 mac_tx_1519_max_good_oct_pkt_num;
+	u64 mac_tx_1519_max_bad_oct_pkt_num;
+
 	u64 mac_rx_total_pkt_num;
 	u64 mac_rx_total_oct_num;
 	u64 mac_rx_good_pkt_num;
@@ -382,33 +414,52 @@ struct hclge_mac_stats {
 	u64 mac_rx_multi_pkt_num;
 	u64 mac_rx_broad_pkt_num;
 	u64 mac_rx_undersize_pkt_num;
-	u64 mac_rx_overrsize_pkt_num;
+	u64 mac_rx_oversize_pkt_num;
 	u64 mac_rx_64_oct_pkt_num;
 	u64 mac_rx_65_127_oct_pkt_num;
 	u64 mac_rx_128_255_oct_pkt_num;
 	u64 mac_rx_256_511_oct_pkt_num;
 	u64 mac_rx_512_1023_oct_pkt_num;
 	u64 mac_rx_1024_1518_oct_pkt_num;
-	u64 mac_rx_1519_max_oct_pkt_num;
-
-	u64 mac_trans_fragment_pkt_num;
-	u64 mac_trans_undermin_pkt_num;
-	u64 mac_trans_jabber_pkt_num;
-	u64 mac_trans_err_all_pkt_num;
-	u64 mac_trans_from_app_good_pkt_num;
-	u64 mac_trans_from_app_bad_pkt_num;
-	u64 mac_rcv_fragment_pkt_num;
-	u64 mac_rcv_undermin_pkt_num;
-	u64 mac_rcv_jabber_pkt_num;
-	u64 mac_rcv_fcs_err_pkt_num;
-	u64 mac_rcv_send_app_good_pkt_num;
-	u64 mac_rcv_send_app_bad_pkt_num;
+	u64 mac_rx_1519_2047_oct_pkt_num;
+	u64 mac_rx_2048_4095_oct_pkt_num;
+	u64 mac_rx_4096_8191_oct_pkt_num;
+	u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+	u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+	u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+	u64 mac_rx_12288_16383_oct_pkt_num;
+	u64 mac_rx_1519_max_good_oct_pkt_num;
+	u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+	u64 mac_tx_fragment_pkt_num;
+	u64 mac_tx_undermin_pkt_num;
+	u64 mac_tx_jabber_pkt_num;
+	u64 mac_tx_err_all_pkt_num;
+	u64 mac_tx_from_app_good_pkt_num;
+	u64 mac_tx_from_app_bad_pkt_num;
+	u64 mac_rx_fragment_pkt_num;
+	u64 mac_rx_undermin_pkt_num;
+	u64 mac_rx_jabber_pkt_num;
+	u64 mac_rx_fcs_err_pkt_num;
+	u64 mac_rx_send_app_good_pkt_num;
+	u64 mac_rx_send_app_bad_pkt_num;
 };
 
+#define HCLGE_STATS_TIMER_INTERVAL	(60 * 5)
 struct hclge_hw_stats {
 	struct hclge_mac_stats      mac_stats;
 	struct hclge_64_bit_stats   all_64_bit_stats;
 	struct hclge_32_bit_stats   all_32_bit_stats;
+	u32 stats_timer;
+};
+
+struct hclge_vlan_type_cfg {
+	u16 rx_ot_fst_vlan_type;
+	u16 rx_ot_sec_vlan_type;
+	u16 rx_in_fst_vlan_type;
+	u16 rx_in_sec_vlan_type;
+	u16 tx_ot_vlan_type;
+	u16 tx_in_vlan_type;
 };
 
 struct hclge_dev {
@@ -420,6 +471,8 @@ struct hclge_dev {
 	unsigned long state;
 
 	enum hnae3_reset_type reset_type;
+	unsigned long reset_request;	/* reset has been requested */
+	unsigned long reset_pending;	/* client rst is pending to be served */
 	u32 fw_version;
 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
 	u16 num_tqps;			/* Num task queue pairs of this PF */
@@ -469,6 +522,8 @@ struct hclge_dev {
 	unsigned long service_timer_previous;
 	struct timer_list service_timer;
 	struct work_struct service_task;
+	struct work_struct rst_service_task;
+	struct work_struct mbx_service_task;
 
 	bool cur_promisc;
 	int num_alloc_vfs;	/* Actual number of VFs allocated */
@@ -493,6 +548,29 @@ struct hclge_dev {
 	enum hclge_mta_dmac_sel_type mta_mac_sel_type;
 	bool enable_mta; /* Mutilcast filter enable */
 	bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+	struct hclge_vlan_type_cfg vlan_type_cfg;
+
+	u64 rx_pkts_for_led;
+	u64 tx_pkts_for_led;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+	bool accept_tag;	/* Whether accept tagged packet from host */
+	bool accept_untag;	/* Whether accept untagged packet from host */
+	bool insert_tag1_en;	/* Whether insert inner vlan tag */
+	bool insert_tag2_en;	/* Whether insert outer vlan tag */
+	u16  default_tag1;	/* The default inner vlan tag to insert */
+	u16  default_tag2;	/* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+	bool strip_tag1_en;	/* Whether strip inner vlan tag */
+	bool strip_tag2_en;	/* Whether strip outer vlan tag */
+	bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+	bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
 };
 
 struct hclge_vport {
@@ -507,6 +585,9 @@ struct hclge_vport {
 	u16 bw_limit;		/* VSI BW Limit (0 = disabled) */
 	u8  dwrr;
 
+	struct hclge_tx_vtag_cfg  txvlan_cfg;
+	struct hclge_rx_vtag_cfg  rxvlan_cfg;
+
 	int vport_id;
 	struct hclge_dev *back;  /* Back reference to associated dev */
 	struct hnae3_handle nic;
@@ -529,8 +610,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
 			      u8 func_id,
 			      bool enable);
 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
-				   struct hnae3_ring_chain_node *ring_chain);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+				int vector_id, bool en,
+				struct hnae3_ring_chain_node *ring_chain);
+
 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
 {
 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -544,4 +627,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
 
 int hclge_buffer_alloc(struct hclge_dev *hdev);
 int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 #endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 000000000000..f38fc5ce9f51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ *		  message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+				struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+				int resp_status,
+				u8 *resp_data, u16 resp_data_len)
+{
+	struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+	struct hclge_dev *hdev = vport->back;
+	enum hclge_cmd_status status;
+	struct hclge_desc desc;
+
+	resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+	if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+		dev_err(&hdev->pdev->dev,
+			"PF fail to gen resp to VF len %d exceeds max len %d\n",
+			resp_data_len,
+			HCLGE_MBX_MAX_RESP_DATA_SIZE);
+	}
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+	resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+	resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+
+	resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
+	resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
+	resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
+	resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+
+	if (resp_data && resp_data_len > 0)
+		memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"PF failed(=%d) to send response to VF\n", status);
+
+	return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+			      u16 mbx_opcode, u8 dest_vfid)
+{
+	struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+	struct hclge_dev *hdev = vport->back;
+	enum hclge_cmd_status status;
+	struct hclge_desc desc;
+
+	resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+	resp_pf_to_vf->dest_vfid = dest_vfid;
+	resp_pf_to_vf->msg_len = msg_len;
+	resp_pf_to_vf->msg[0] = mbx_opcode;
+
+	memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"PF failed(=%d) to send mailbox message to VF\n",
+			status);
+
+	return status;
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+	struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+	chain = head->next;
+
+	while (chain) {
+		chain_tmp = chain->next;
+		kzfree(chain);
+		chain = chain_tmp;
+	}
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5] ~ msg[14]: other ring type and tqp id
+ */
+static int hclge_get_ring_chain_from_mbx(
+			struct hclge_mbx_vf_to_pf_cmd *req,
+			struct hnae3_ring_chain_node *ring_chain,
+			struct hclge_vport *vport)
+{
+#define HCLGE_RING_NODE_VARIABLE_NUM		3
+#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM	3
+	struct hnae3_ring_chain_node *cur_chain, *new_chain;
+	int ring_num;
+	int i;
+
+	ring_num = req->msg[2];
+
+	hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+	ring_chain->tqp_index =
+			hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+	hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+		       HCLGE_INT_GL_IDX_S,
+		       req->msg[5]);
+
+	cur_chain = ring_chain;
+
+	for (i = 1; i < ring_num; i++) {
+		new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+		if (!new_chain)
+			goto err;
+
+		hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+			     req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+			     HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
+
+		new_chain->tqp_index =
+		hclge_get_queue_id(vport->nic.kinfo.tqp
+			[req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+			HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
+
+		hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+			       HCLGE_INT_GL_IDX_S,
+			       req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+			       HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]);
+
+		cur_chain->next = new_chain;
+		cur_chain = new_chain;
+	}
+
+	return 0;
+err:
+	hclge_free_vector_ring_chain(ring_chain);
+	return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+					     struct hclge_mbx_vf_to_pf_cmd *req)
+{
+	struct hnae3_ring_chain_node ring_chain;
+	int vector_id = req->msg[1];
+	int ret;
+
+	memset(&ring_chain, 0, sizeof(ring_chain));
+	ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+	if (ret)
+		return ret;
+
+	ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+	if (ret)
+		return ret;
+
+	hclge_free_vector_ring_chain(&ring_chain);
+
+	return 0;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+				     struct hclge_mbx_vf_to_pf_cmd *req)
+{
+	bool en = req->msg[1] ? true : false;
+	struct hclge_promisc_param param;
+
+	/* always enable broadcast promisc bit */
+	hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+	return hclge_cmd_set_promisc_mode(vport->back, &param);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				    bool gen_resp)
+{
+	const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+	struct hclge_dev *hdev = vport->back;
+	int status;
+
+	if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+		const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+
+		hclge_rm_uc_addr_common(vport, old_addr);
+		status = hclge_add_uc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+		status = hclge_add_uc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+		status = hclge_rm_uc_addr_common(vport, mac_addr);
+	} else {
+		dev_err(&hdev->pdev->dev,
+			"failed to set unicast mac addr, unknown subcode %d\n",
+			mbx_req->msg[1]);
+		return -EIO;
+	}
+
+	if (gen_resp)
+		hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+	return 0;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				    bool gen_resp)
+{
+	const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+	struct hclge_dev *hdev = vport->back;
+	int status;
+
+	if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+		status = hclge_add_mc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+		status = hclge_rm_mc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
+		u8 func_id = vport->vport_id;
+		bool enable = mbx_req->msg[2];
+
+		status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+	} else {
+		dev_err(&hdev->pdev->dev,
+			"failed to set mcast mac addr, unknown subcode %d\n",
+			mbx_req->msg[1]);
+		return -EIO;
+	}
+
+	if (gen_resp)
+		hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+	return 0;
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+				 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				 bool gen_resp)
+{
+	struct hclge_dev *hdev = vport->back;
+	int status = 0;
+
+	if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+		u16 vlan, proto;
+		bool is_kill;
+
+		is_kill = !!mbx_req->msg[2];
+		memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
+		memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+		status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+						  is_kill, vlan, 0,
+						  cpu_to_be16(proto));
+	}
+
+	if (gen_resp)
+		status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+	return status;
+}
+
+static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
+			       struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+			       bool gen_resp)
+{
+	struct hclge_dev *hdev = vport->back;
+	int ret;
+
+	ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+				   sizeof(u8));
+
+	return ret;
+}
+
+static int hclge_get_vf_queue_info(struct hclge_vport *vport,
+				   struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				   bool gen_resp)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN		8
+	u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+	struct hclge_dev *hdev = vport->back;
+
+	/* get the queue related info */
+	memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
+	memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
+	memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
+	memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+
+	return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+				    HCLGE_TQPS_RSS_INFO_LEN);
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+			       struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+	struct hclge_dev *hdev = vport->back;
+	u16 link_status;
+	u8 msg_data[2];
+	u8 dest_vfid;
+
+	/* mac.link can only be 0 or 1 */
+	link_status = (u16)hdev->hw.mac.link;
+	memcpy(&msg_data[0], &link_status, sizeof(u16));
+	dest_vfid = mbx_req->mbx_src_vfid;
+
+	/* send this requested info to VF */
+	return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+				  HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_reset_vf_queue(struct hclge_vport *vport,
+				 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+	u16 queue_id;
+
+	memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+
+	hclge_reset_tqp(&vport->nic, queue_id);
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+	struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclge_vport *vport;
+	struct hclge_desc *desc;
+	int ret;
+
+	/* handle all the mailbox requests in the queue */
+	while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
+			    HCLGE_CMDQ_RX_OUTVLD_B)) {
+		desc = &crq->desc[crq->next_to_use];
+		req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+		vport = &hdev->vport[req->mbx_src_vfid];
+
+		switch (req->msg[0]) {
+		case HCLGE_MBX_MAP_RING_TO_VECTOR:
+			ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+								req);
+			break;
+		case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+			ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+								req);
+			break;
+		case HCLGE_MBX_SET_PROMISC_MODE:
+			ret = hclge_set_vf_promisc_mode(vport, req);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to set VF promisc mode\n",
+					ret);
+			break;
+		case HCLGE_MBX_SET_UNICAST:
+			ret = hclge_set_vf_uc_mac_addr(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to set VF UC MAC Addr\n",
+					ret);
+			break;
+		case HCLGE_MBX_SET_MULTICAST:
+			ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to set VF MC MAC Addr\n",
+					ret);
+			break;
+		case HCLGE_MBX_SET_VLAN:
+			ret = hclge_set_vf_vlan_cfg(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to config VF's VLAN\n",
+					ret);
+			break;
+		case HCLGE_MBX_GET_QINFO:
+			ret = hclge_get_vf_queue_info(vport, req, true);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to get Q info for VF\n",
+					ret);
+			break;
+		case HCLGE_MBX_GET_TCINFO:
+			ret = hclge_get_vf_tcinfo(vport, req, true);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to get TC info for VF\n",
+					ret);
+			break;
+		case HCLGE_MBX_GET_LINK_STATUS:
+			ret = hclge_get_link_info(vport, req);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to get link stat for VF\n",
+					ret);
+			break;
+		case HCLGE_MBX_QUEUE_RESET:
+			hclge_reset_vf_queue(vport, req);
+			break;
+		default:
+			dev_err(&hdev->pdev->dev,
+				"un-supported mailbox message, code = %d\n",
+				req->msg[0]);
+			break;
+		}
+		hclge_mbx_ring_ptr_move_crq(crq);
+	}
+
+	/* Write back CMDQ_RQ header pointer, M7 need this pointer */
+	hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 7069e9408d7d..c1dea3a47bdd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -17,6 +17,7 @@
 #define HCLGE_PHY_SUPPORTED_FEATURES	(SUPPORTED_Autoneg | \
 					 SUPPORTED_TP | \
 					 SUPPORTED_Pause | \
+					 SUPPORTED_Asym_Pause | \
 					 PHY_10BT_FEATURES | \
 					 PHY_100BT_FEATURES | \
 					 PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
 	ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
 	if (ret)
 		netdev_err(netdev, "failed to adjust link.\n");
+
+	ret = hclge_cfg_flowctrl(hdev);
+	if (ret)
+		netdev_err(netdev, "failed to configure flow control.\n");
 }
 
 int hclge_mac_start_phy(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 7bfa2e5497cb..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
 	HCLGE_SHAPER_LVL_PF	= 1,
 };
 
-#define HCLGE_SHAPER_BS_U_DEF	1
-#define HCLGE_SHAPER_BS_S_DEF	4
+#define HCLGE_SHAPER_BS_U_DEF	5
+#define HCLGE_SHAPER_BS_S_DEF	20
 
 #define HCLGE_ETHER_MAX_RATE	100000
 
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
 	return 0;
 }
 
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
 {
 	struct hclge_desc desc;
 
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
 	return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+				     u8 pause_trans_gap, u16 pause_trans_time)
+{
+	struct hclge_cfg_pause_param_cmd *pause_param;
+	struct hclge_desc desc;
+
+	pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+	ether_addr_copy(pause_param->mac_addr, addr);
+	pause_param->pause_trans_gap = pause_trans_gap;
+	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+	return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+	struct hclge_cfg_pause_param_cmd *pause_param;
+	struct hclge_desc desc;
+	u16 trans_time;
+	u8 trans_gap;
+	int ret;
+
+	pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		return ret;
+
+	trans_gap = pause_param->pause_trans_gap;
+	trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+	return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+					 trans_time);
+}
+
 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
 {
 	u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
 	return hclge_tm_schd_mode_hw(hdev);
 }
 
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+	struct hclge_mac *mac = &hdev->hw.mac;
+
+	return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+					 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+					 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
 {
 	u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
 	int ret;
 	u8 i;
 
-	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
-		return hclge_mac_pause_setup_hw(hdev);
+	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+		ret = hclge_mac_pause_setup_hw(hdev);
+		if (ret)
+			return ret;
+
+		return hclge_mac_pause_param_setup_hw(hdev);
+	}
 
 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
 	if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bf59961918ab..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
 
 #define HCLGE_TM_PORT_BASE_MODE_MSK	BIT(0)
 
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP	0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME	0xFFFF
+
 /* SP or DWRR */
 #define HCLGE_TM_TX_SCHD_DWRR_MSK	BIT(0)
 #define HCLGE_TM_TX_SCHD_SP_MSK		(0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
 	u8 pri_en_bitmap;
 };
 
+struct hclge_cfg_pause_param_cmd {
+	u8 mac_addr[ETH_ALEN];
+	u8 pause_trans_gap;
+	u8 rsvd;
+	__le16 pause_trans_time;
+};
+
 struct hclge_port_shapping_cmd {
 	__le32 port_shapping_para;
 };
@@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_map_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
 #endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 000000000000..fb93bbd35845
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 000000000000..85985e731311
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
+#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
+					DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+	int ntc = ring->next_to_clean;
+	int ntu = ring->next_to_use;
+	int used;
+
+	used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+	return ring->desc_num - used - 1;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+	struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+	u16 ntc = csq->next_to_clean;
+	struct hclgevf_desc *desc;
+	int clean = 0;
+	u32 head;
+
+	desc = &csq->desc[ntc];
+	head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+	while (head != ntc) {
+		memset(desc, 0, sizeof(*desc));
+		ntc++;
+		if (ntc == csq->desc_num)
+			ntc = 0;
+		desc = &csq->desc[ntc];
+		clean++;
+	}
+	csq->next_to_clean = ntc;
+
+	return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+	u32 head;
+
+	head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+	return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+	u16 spec_opcode[] = {0x30, 0x31, 0x32};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+		if (spec_opcode[i] == opcode)
+			return true;
+	}
+
+	return false;
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+	int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+	ring->desc = kzalloc(size, GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
+					     size, DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
+		ring->desc_dma_addr = 0;
+		kfree(ring->desc);
+		ring->desc = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+	dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
+			 ring->desc_num * sizeof(ring->desc[0]),
+			 hclgevf_ring_to_dma_dir(ring));
+
+	ring->desc_dma_addr = 0;
+	kfree(ring->desc);
+	ring->desc = NULL;
+}
+
+static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
+				  struct hclgevf_cmq_ring *ring)
+{
+	struct hclgevf_hw *hw = &hdev->hw;
+	int ring_type = ring->flag;
+	u32 reg_val;
+	int ret;
+
+	ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+	spin_lock_init(&ring->lock);
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+	ring->dev = hdev;
+
+	/* allocate CSQ/CRQ descriptor */
+	ret = hclgevf_alloc_cmd_desc(ring);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+			(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+		return ret;
+	}
+
+	/* initialize the hardware registers with csq/crq dma-address,
+	 * descriptor number, head & tail pointers
+	 */
+	switch (ring_type) {
+	case HCLGEVF_TYPE_CSQ:
+		reg_val = (u32)ring->desc_dma_addr;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+		break;
+	case HCLGEVF_TYPE_CRQ:
+		reg_val = (u32)ring->desc_dma_addr;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+		break;
+	}
+
+	return 0;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+				  enum hclgevf_opcode_type opcode, bool is_read)
+{
+	memset(desc, 0, sizeof(struct hclgevf_desc));
+	desc->opcode = cpu_to_le16(opcode);
+	desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+				 HCLGEVF_CMD_FLAG_IN);
+	if (is_read)
+		desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+	else
+		desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+	struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+	struct hclgevf_desc *desc_to_use;
+	bool complete = false;
+	u32 timeout = 0;
+	int handle = 0;
+	int status = 0;
+	u16 retval;
+	u16 opcode;
+	int ntc;
+
+	spin_lock_bh(&hw->cmq.csq.lock);
+
+	if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+		spin_unlock_bh(&hw->cmq.csq.lock);
+		return -EBUSY;
+	}
+
+	/* Record the location of desc in the ring for this time
+	 * which will be use for hardware to write back
+	 */
+	ntc = hw->cmq.csq.next_to_use;
+	opcode = le16_to_cpu(desc[0].opcode);
+	while (handle < num) {
+		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+		*desc_to_use = desc[handle];
+		(hw->cmq.csq.next_to_use)++;
+		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+			hw->cmq.csq.next_to_use = 0;
+		handle++;
+	}
+
+	/* Write to hardware */
+	hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+			  hw->cmq.csq.next_to_use);
+
+	/* If the command is sync, wait for the firmware to write back,
+	 * if multi descriptors to be sent, use the first one to check
+	 */
+	if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+		do {
+			if (hclgevf_cmd_csq_done(hw))
+				break;
+			udelay(1);
+			timeout++;
+		} while (timeout < hw->cmq.tx_timeout);
+	}
+
+	if (hclgevf_cmd_csq_done(hw)) {
+		complete = true;
+		handle = 0;
+
+		while (handle < num) {
+			/* Get the result of hardware write back */
+			desc_to_use = &hw->cmq.csq.desc[ntc];
+			desc[handle] = *desc_to_use;
+
+			if (likely(!hclgevf_is_special_opcode(opcode)))
+				retval = le16_to_cpu(desc[handle].retval);
+			else
+				retval = le16_to_cpu(desc[0].retval);
+
+			if ((enum hclgevf_cmd_return_status)retval ==
+			    HCLGEVF_CMD_EXEC_SUCCESS)
+				status = 0;
+			else
+				status = -EIO;
+			hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+			ntc++;
+			handle++;
+			if (ntc == hw->cmq.csq.desc_num)
+				ntc = 0;
+		}
+	}
+
+	if (!complete)
+		status = -EAGAIN;
+
+	/* Clean the command send queue */
+	handle = hclgevf_cmd_csq_clean(hw);
+	if (handle != num) {
+		dev_warn(&hdev->pdev->dev,
+			 "cleaned %d, need to clean %d\n", handle, num);
+	}
+
+	spin_unlock_bh(&hw->cmq.csq.lock);
+
+	return status;
+}
+
+static int  hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
+					       u32 *version)
+{
+	struct hclgevf_query_version_cmd *resp;
+	struct hclgevf_desc desc;
+	int status;
+
+	resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+	status = hclgevf_cmd_send(hw, &desc, 1);
+	if (!status)
+		*version = le32_to_cpu(resp->firmware);
+
+	return status;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+	u32 version;
+	int ret;
+
+	/* setup Tx write back timeout */
+	hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+
+	/* setup queue CSQ/CRQ rings */
+	hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
+	ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize CSQ ring\n", ret);
+		return ret;
+	}
+
+	hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
+	ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize CRQ ring\n", ret);
+		goto err_csq;
+	}
+
+	/* get firmware version */
+	ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to query firmware version\n", ret);
+		goto err_crq;
+	}
+	hdev->fw_version = version;
+
+	dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+	return 0;
+err_crq:
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+err_csq:
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+
+	return ret;
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 000000000000..2caca9317f8c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT		200
+#define HCLGEVF_CMDQ_RX_INVLD_B		0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B	1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+	__le16 opcode;
+	__le16 flag;
+	__le16 retval;
+	__le16 rsv;
+	__le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+	dma_addr_t dma;
+	void *va;
+	u32 length;
+};
+
+struct hclgevf_cmq_ring {
+	dma_addr_t desc_dma_addr;
+	struct hclgevf_desc *desc;
+	struct hclgevf_desc_cb *desc_cb;
+	struct hclgevf_dev  *dev;
+	u32 head;
+	u32 tail;
+
+	u16 buf_size;
+	u16 desc_num;
+	int next_to_use;
+	int next_to_clean;
+	u8 flag;
+	spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+	HCLGEVF_CMD_EXEC_SUCCESS	= 0,
+	HCLGEVF_CMD_NO_AUTH	= 1,
+	HCLGEVF_CMD_NOT_EXEC	= 2,
+	HCLGEVF_CMD_QUEUE_FULL	= 3,
+};
+
+enum hclgevf_cmd_status {
+	HCLGEVF_STATUS_SUCCESS	= 0,
+	HCLGEVF_ERR_CSQ_FULL	= -1,
+	HCLGEVF_ERR_CSQ_TIMEOUT	= -2,
+	HCLGEVF_ERR_CSQ_ERROR	= -3
+};
+
+struct hclgevf_cmq {
+	struct hclgevf_cmq_ring csq;
+	struct hclgevf_cmq_ring crq;
+	u16 tx_timeout; /* Tx timeout */
+	enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT		0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT	1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT		2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT		3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT		4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT		5
+
+#define HCLGEVF_CMD_FLAG_IN		BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT		BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT		BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR		BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR	BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR	BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+	/* Generic command */
+	HCLGEVF_OPC_QUERY_FW_VER	= 0x0001,
+	/* TQP command */
+	HCLGEVF_OPC_QUERY_TX_STATUS	= 0x0B03,
+	HCLGEVF_OPC_QUERY_RX_STATUS	= 0x0B13,
+	HCLGEVF_OPC_CFG_COM_TQP_QUEUE	= 0x0B20,
+	/* RSS cmd */
+	HCLGEVF_OPC_RSS_GENERIC_CONFIG	= 0x0D01,
+	HCLGEVF_OPC_RSS_INDIR_TABLE	= 0x0D07,
+	HCLGEVF_OPC_RSS_TC_MODE		= 0x0D08,
+	/* Mailbox cmd */
+	HCLGEVF_OPC_MBX_VF_TO_PF	= 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET		0x80000
+#define HCLGEVF_TQP_REG_SIZE		0x200
+
+struct hclgevf_tqp_map {
+	__le16 tqp_id;	/* Absolute tqp id for in this pf */
+	u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF		0
+#define HCLGEVF_TQP_MAP_TYPE_VF		1
+#define HCLGEVF_TQP_MAP_TYPE_B		0
+#define HCLGEVF_TQP_MAP_EN_B		1
+	u8 tqp_flag;	/* Indicate it's pf or vf tqp */
+	__le16 tqp_vid; /* Virtual id in this pf/vf */
+	u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD	10
+
+enum hclgevf_int_type {
+	HCLGEVF_INT_TX = 0,
+	HCLGEVF_INT_RX,
+	HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+	u8 int_vector_id;
+	u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S	0
+#define HCLGEVF_INT_TYPE_M	0x3
+#define HCLGEVF_TQP_ID_S	2
+#define HCLGEVF_TQP_ID_M	(0x3fff << HCLGEVF_TQP_ID_S)
+	__le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+	u8 vfid;
+	u8 resv;
+};
+
+struct hclgevf_query_version_cmd {
+	__le32 firmware;
+	__le32 firmware_rsv[5];
+};
+
+#define HCLGEVF_RSS_HASH_KEY_OFFSET	4
+#define HCLGEVF_RSS_HASH_KEY_NUM	16
+struct hclgevf_rss_config_cmd {
+	u8 hash_config;
+	u8 rsv[7];
+	u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+	u8 ipv4_tcp_en;
+	u8 ipv4_udp_en;
+	u8 ipv4_stcp_en;
+	u8 ipv4_fragment_en;
+	u8 ipv6_tcp_en;
+	u8 ipv6_udp_en;
+	u8 ipv6_stcp_en;
+	u8 ipv6_fragment_en;
+	u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE	16
+
+struct hclgevf_rss_indirection_table_cmd {
+	u16 start_table_index;
+	u16 rss_set_bitmap;
+	u8 rsv[4];
+	u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S		0
+#define HCLGEVF_RSS_TC_OFFSET_M		(0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S		12
+#define HCLGEVF_RSS_TC_SIZE_M		(0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B		15
+#define HCLGEVF_MAX_TC_NUM		8
+struct hclgevf_rss_tc_mode_cmd {
+	u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+	u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B	0
+#define HCLGEVF_LINK_STATUS	BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+	u8 status;
+	u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK	0x3ff
+#define HCLGEVF_TQP_ENABLE_B	0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+	__le16 tqp_id;
+	__le16 stream_id;
+	u8 enable;
+	u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+	__le16 tqp_id;
+	__le16 tx_tail;
+	__le16 tx_head;
+	__le16 fbd_num;
+	__le16 ring_offset;
+	u8 rsv[14];
+};
+
+#define HCLGEVF_TYPE_CRQ		0
+#define HCLGEVF_TYPE_CSQ		1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG	0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG	0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG	0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG	0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG	0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG	0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG	0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG	0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG	0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG	0x27028
+#define HCLGEVF_NIC_CMQ_EN_B		16
+#define HCLGEVF_NIC_CMQ_ENABLE		BIT(HCLGEVF_NIC_CMQ_EN_B)
+#define HCLGEVF_NIC_CMQ_DESC_NUM	1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S	3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG	0x27100
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+	writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+	u8 __iomem *reg_addr = READ_ONCE(base);
+
+	return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+	hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+	hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+	((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+				  enum hclgevf_opcode_type opcode,
+				  bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 000000000000..0d89965f7928
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME	"hclgevf"
+
+static struct hnae3_ae_algo ae_algovf;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+	/* required last entry */
+	{0, }
+};
+
+static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
+	struct hnae3_handle *handle)
+{
+	return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_queue *queue;
+	struct hclgevf_desc desc;
+	struct hclgevf_tqp *tqp;
+	int status;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		queue = handle->kinfo.tqp[i];
+		tqp = container_of(queue, struct hclgevf_tqp, q);
+		hclgevf_cmd_setup_basic_desc(&desc,
+					     HCLGEVF_OPC_QUERY_RX_STATUS,
+					     true);
+
+		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"Query tqp stat fail, status = %d,queue = %d\n",
+				status,	i);
+			return status;
+		}
+		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+			le32_to_cpu(desc.data[1]);
+
+		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+					     true);
+
+		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"Query tqp stat fail, status = %d,queue = %d\n",
+				status, i);
+			return status;
+		}
+		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+			le32_to_cpu(desc.data[1]);
+	}
+
+	return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_tqp *tqp;
+	u64 *buff = data;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+	}
+	for (i = 0; i < kinfo->num_tqps; i++) {
+		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+	}
+
+	return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hdev->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 *buff = data;
+	int i = 0;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+			struct hclgevf_tqp, q);
+		snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+			 tqp->index);
+		buff += ETH_GSTRING_LEN;
+	}
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+			struct hclgevf_tqp, q);
+		snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+			 tqp->index);
+		buff += ETH_GSTRING_LEN;
+	}
+
+	return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+				 struct net_device_stats *net_stats)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int status;
+
+	status = hclgevf_tqps_update_stats(handle);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"VF update of TQPS stats fail, status = %d.\n",
+			status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+	if (strset == ETH_SS_TEST)
+		return -EOPNOTSUPP;
+	else if (strset == ETH_SS_STATS)
+		return hclgevf_tqps_get_sset_count(handle, strset);
+
+	return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+				u8 *data)
+{
+	u8 *p = (char *)data;
+
+	if (strset == ETH_SS_STATS)
+		p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+	hclgevf_tqps_get_stats(handle, data);
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+	u8 resp_msg;
+	int status;
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
+				      true, &resp_msg, sizeof(u8));
+	if (status) {
+		dev_err(&hdev->pdev->dev,
+			"VF request to get TC info from PF failed %d",
+			status);
+		return status;
+	}
+
+	hdev->hw_tc_map = resp_msg;
+
+	return 0;
+}
+
+static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN	8
+	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+	int status;
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
+				      true, resp_msg,
+				      HCLGEVF_TQPS_RSS_INFO_LEN);
+	if (status) {
+		dev_err(&hdev->pdev->dev,
+			"VF request to get tqp info from PF failed %d",
+			status);
+		return status;
+	}
+
+	memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
+	memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
+	memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
+	memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+
+	return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_tqp *tqp;
+	int i;
+
+	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
+	if (!hdev->htqp)
+		return -ENOMEM;
+
+	tqp = hdev->htqp;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		tqp->dev = &hdev->pdev->dev;
+		tqp->index = i;
+
+		tqp->q.ae_algo = &ae_algovf;
+		tqp->q.buf_size = hdev->rx_buf_len;
+		tqp->q.desc_num = hdev->num_desc;
+		tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+			i * HCLGEVF_TQP_REG_SIZE;
+
+		tqp++;
+	}
+
+	return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *nic = &hdev->nic;
+	struct hnae3_knic_private_info *kinfo;
+	u16 new_tqps = hdev->num_tqps;
+	int i;
+
+	kinfo = &nic->kinfo;
+	kinfo->num_tc = 0;
+	kinfo->num_desc = hdev->num_desc;
+	kinfo->rx_buf_len = hdev->rx_buf_len;
+	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+		if (hdev->hw_tc_map & BIT(i))
+			kinfo->num_tc++;
+
+	kinfo->rss_size
+		= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+	new_tqps = kinfo->rss_size * kinfo->num_tc;
+	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+				  sizeof(struct hnae3_queue *), GFP_KERNEL);
+	if (!kinfo->tqp)
+		return -ENOMEM;
+
+	for (i = 0; i < kinfo->num_tqps; i++) {
+		hdev->htqp[i].q.handle = &hdev->nic;
+		hdev->htqp[i].q.tqp_index = i;
+		kinfo->tqp[i] = &hdev->htqp[i].q;
+	}
+
+	return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+	int status;
+	u8 resp_msg;
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
+				      0, false, &resp_msg, sizeof(u8));
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+	struct hnae3_handle *handle = &hdev->nic;
+	struct hnae3_client *client;
+
+	client = handle->client;
+
+	if (link_state != hdev->hw.mac.link) {
+		client->ops->link_status_change(handle, !!link_state);
+		hdev->hw.mac.link = link_state;
+	}
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *nic = &hdev->nic;
+	int ret;
+
+	nic->ae_algo = &ae_algovf;
+	nic->pdev = hdev->pdev;
+	nic->numa_node_mask = hdev->numa_node_mask;
+	nic->flags |= HNAE3_SUPPORT_VF;
+
+	if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
+		dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
+			hdev->ae_dev->dev_type);
+		return -EINVAL;
+	}
+
+	ret = hclgevf_knic_setup(hdev);
+	if (ret)
+		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+			ret);
+	return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+	hdev->num_msi_left += 1;
+	hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+			      struct hnae3_vector_info *vector_info)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_vector_info *vector = vector_info;
+	int alloc = 0;
+	int i, j;
+
+	vector_num = min(hdev->num_msi_left, vector_num);
+
+	for (j = 0; j < vector_num; j++) {
+		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+				vector->vector = pci_irq_vector(hdev->pdev, i);
+				vector->io_addr = hdev->hw.io_base +
+					HCLGEVF_VECTOR_REG_BASE +
+					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+				hdev->vector_status[i] = 0;
+				hdev->vector_irq[i] = vector->vector;
+
+				vector++;
+				alloc++;
+
+				break;
+			}
+		}
+	}
+	hdev->num_msi_left -= alloc;
+	hdev->num_msi_used += alloc;
+
+	return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+	int i;
+
+	for (i = 0; i < hdev->num_msi; i++)
+		if (vector == hdev->vector_irq[i])
+			return i;
+
+	return -EINVAL;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+	return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+	return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+	struct hclgevf_rss_indirection_table_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+	int i, j;
+
+	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+	for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+					     false);
+		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+			req->rss_result[j] =
+				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"VF failed(=%d) to set RSS indirection table\n",
+				status);
+			return status;
+		}
+	}
+
+	return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
+{
+	struct hclgevf_rss_tc_mode_cmd *req;
+	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+	u16 tc_size[HCLGEVF_MAX_TC_NUM];
+	struct hclgevf_desc desc;
+	u16 roundup_size;
+	int status;
+	int i;
+
+	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+	roundup_size = roundup_pow_of_two(rss_size);
+	roundup_size = ilog2(roundup_size);
+
+	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+		tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+		tc_size[i] = roundup_size;
+		tc_offset[i] = rss_size * i;
+	}
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+		hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+			     (tc_valid[i] & 0x1));
+		hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+			       HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+		hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+			       HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+	}
+	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"VF failed(=%d) to set rss tc mode\n", status);
+
+	return status;
+}
+
+static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
+				  u8 *key)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_config_cmd *req;
+	int lkup_times = key ? 3 : 1;
+	struct hclgevf_desc desc;
+	int key_offset;
+	int key_size;
+	int status;
+
+	req = (struct hclgevf_rss_config_cmd *)desc.data;
+	lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
+
+	for (key_offset = 0; key_offset < lkup_times; key_offset++) {
+		hclgevf_cmd_setup_basic_desc(&desc,
+					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+					     true);
+		req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"failed to get hardware RSS cfg, status = %d\n",
+				status);
+			return status;
+		}
+
+		if (key_offset == 2)
+			key_size =
+			HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+		else
+			key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+		if (key)
+			memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
+			       req->hash_key,
+			       key_size);
+	}
+
+	if (hash) {
+		if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
+			*hash = ETH_RSS_HASH_TOP;
+		else
+			*hash = ETH_RSS_HASH_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+			   u8 *hfunc)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+	int i;
+
+	if (indir)
+		for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+			indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+	return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+			   const  u8 *key, const  u8 hfunc)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+	int i;
+
+	/* update the shadow RSS table with user specified qids */
+	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+		rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+	/* update the hardware */
+	return hclgevf_set_rss_indir_table(hdev);
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+	return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+				       int vector,
+				       struct hnae3_ring_chain_node *ring_chain)
+{
+#define HCLGEVF_RING_NODE_VARIABLE_NUM		3
+#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM	3
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_ring_chain_node *node;
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclgevf_desc desc;
+	int i, vector_id;
+	int status;
+	u8 type;
+
+	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+	vector_id = hclgevf_get_vector_index(hdev, vector);
+	if (vector_id < 0) {
+		dev_err(&handle->pdev->dev,
+			"Get vector index fail. ret =%d\n", vector_id);
+		return vector_id;
+	}
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+	type = en ?
+		HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+	req->msg[0] = type;
+	req->msg[1] = vector_id; /* vector_id should be id in VF */
+
+	i = 0;
+	for (node = ring_chain; node; node = node->next) {
+		i++;
+		/* msg[2] is cause num */
+		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
+				hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
+				node->tqp_index;
+		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
+				hnae_get_field(node->int_gl_idx,
+					       HNAE3_RING_GL_IDX_M,
+					       HNAE3_RING_GL_IDX_S);
+
+		if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
+		    HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
+		    HCLGEVF_RING_NODE_VARIABLE_NUM) {
+			req->msg[2] = i;
+
+			status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+			if (status) {
+				dev_err(&hdev->pdev->dev,
+					"Map TQP fail, status is %d.\n",
+					status);
+				return status;
+			}
+			i = 0;
+			hclgevf_cmd_setup_basic_desc(&desc,
+						     HCLGEVF_OPC_MBX_VF_TO_PF,
+						     false);
+			req->msg[0] = type;
+			req->msg[1] = vector_id;
+		}
+	}
+
+	if (i > 0) {
+		req->msg[2] = i;
+
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"Map TQP fail, status is %d.\n", status);
+			return status;
+		}
+	}
+
+	return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+				      struct hnae3_ring_chain_node *ring_chain)
+{
+	return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+				struct hnae3_handle *handle,
+				int vector,
+				struct hnae3_ring_chain_node *ring_chain)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int ret, vector_id;
+
+	vector_id = hclgevf_get_vector_index(hdev, vector);
+	if (vector_id < 0) {
+		dev_err(&handle->pdev->dev,
+			"Get vector index fail. ret =%d\n", vector_id);
+		return vector_id;
+	}
+
+	ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+	if (ret) {
+		dev_err(&handle->pdev->dev,
+			"Unmap ring from vector fail. vector=%d, ret =%d\n",
+			vector_id,
+			ret);
+		return ret;
+	}
+
+	hclgevf_free_vector(hdev, vector);
+
+	return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+{
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+
+	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+	req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
+	req->msg[1] = en;
+
+	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Set promisc mode fail, status is %d.\n", status);
+
+	return status;
+}
+
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	hclgevf_cmd_set_promisc_mode(hdev, en);
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+			      int stream_id, bool enable)
+{
+	struct hclgevf_cfg_com_tqp_queue_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+
+	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+				     false);
+	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+	req->stream_id = cpu_to_le16(stream_id);
+	req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+
+	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"TQP enable fail, status =%d.\n", status);
+
+	return status;
+}
+
+static int hclgevf_get_queue_id(struct hnae3_queue *queue)
+{
+	struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
+
+	return tqp->index;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_queue *queue;
+	struct hclgevf_tqp *tqp;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		queue = handle->kinfo.tqp[i];
+		tqp = container_of(queue, struct hclgevf_tqp, q);
+		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+	}
+}
+
+static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg[2] = {0};
+
+	msg[0] = en;
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+				    HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
+				    msg, 1, false, NULL, 0);
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+	u8 *new_mac_addr = (u8 *)p;
+	u8 msg_data[ETH_ALEN * 2];
+	int status;
+
+	ether_addr_copy(msg_data, new_mac_addr);
+	ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+				      HCLGE_MBX_MAC_VLAN_UC_MODIFY,
+				      msg_data, ETH_ALEN * 2,
+				      false, NULL, 0);
+	if (!status)
+		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+	return status;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+			       const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+				    HCLGE_MBX_MAC_VLAN_UC_ADD,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+			      const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+				    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+			       const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+				    HCLGE_MBX_MAC_VLAN_MC_ADD,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+			      const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+				    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+				   __be16 proto, u16 vlan_id,
+				   bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+
+	if (vlan_id > 4095)
+		return -EINVAL;
+
+	if (proto != htons(ETH_P_8021Q))
+		return -EPROTONOSUPPORT;
+
+	msg_data[0] = is_kill;
+	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+	memcpy(&msg_data[3], &proto, sizeof(proto));
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+				    HCLGE_MBX_VLAN_FILTER, msg_data,
+				    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+}
+
+static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg_data[2];
+
+	memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+	hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
+			     NULL, 0);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+	vector->vector_irq = pci_irq_vector(hdev->pdev,
+					    HCLGEVF_MISC_VECTOR_NUM);
+	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+	/* vector status always valid for Vector 0 */
+	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+	hdev->num_msi_left -= 1;
+	hdev->num_msi_used += 1;
+}
+
+static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+	if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+{
+	if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
+	    !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->service_task);
+}
+
+static void hclgevf_service_timer(struct timer_list *t)
+{
+	struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
+
+	mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
+	hclgevf_task_schedule(hdev);
+}
+
+static void hclgevf_mailbox_service_task(struct work_struct *work)
+{
+	struct hclgevf_dev *hdev;
+
+	hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+
+	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+		return;
+
+	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+	hclgevf_mbx_handler(hdev);
+
+	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+	struct hclgevf_dev *hdev;
+
+	hdev = container_of(work, struct hclgevf_dev, service_task);
+
+	/* request the link status from the PF. PF would be able to tell VF
+	 * about such updates in future so we might remove this later
+	 */
+	hclgevf_request_link_info(hdev);
+
+	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+{
+	u32 cmdq_src_reg;
+
+	/* fetch the events from their corresponding regs */
+	cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
+					HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+
+	/* check for vector0 mailbox(=CMDQ RX) event source */
+	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+		*clearval = cmdq_src_reg;
+		return true;
+	}
+
+	dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+
+	return false;
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+	writel(en ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+	struct hclgevf_dev *hdev = data;
+	u32 clearval;
+
+	hclgevf_enable_vector(&hdev->misc_vector, false);
+	if (!hclgevf_check_event_cause(hdev, &clearval))
+		goto skip_sched;
+
+	/* schedule the VF mailbox service task, if not already scheduled */
+	hclgevf_mbx_task_schedule(hdev);
+
+	hclgevf_clear_event_cause(hdev, clearval);
+
+skip_sched:
+	hclgevf_enable_vector(&hdev->misc_vector, true);
+
+	return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+	int ret;
+
+	/* get queue configuration from PF */
+	ret = hclge_get_queue_info(hdev);
+	if (ret)
+		return ret;
+	/* get tc configuration from PF */
+	return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *roce = &hdev->roce;
+	struct hnae3_handle *nic = &hdev->nic;
+
+	roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+
+	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+	    hdev->num_msi_left == 0)
+		return -EINVAL;
+
+	roce->rinfo.base_vector =
+		hdev->vector_status[hdev->num_msi_used];
+
+	roce->rinfo.netdev = nic->kinfo.netdev;
+	roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+	roce->pdev = nic->pdev;
+	roce->ae_algo = nic->ae_algo;
+	roce->numa_node_mask = nic->numa_node_mask;
+
+	return 0;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+	int i, ret;
+
+	rss_cfg->rss_size = hdev->rss_size_max;
+
+	/* Initialize RSS indirect table for each vport */
+	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+		rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
+
+	ret = hclgevf_set_rss_indir_table(hdev);
+	if (ret)
+		return ret;
+
+	return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+	/* other vlan config(like, VLAN TX/RX offload) would also be added
+	 * here later
+	 */
+	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+				       false);
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int i, queue_id;
+
+	for (i = 0; i < handle->kinfo.num_tqps; i++) {
+		/* ring enable */
+		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+		if (queue_id < 0) {
+			dev_warn(&hdev->pdev->dev,
+				 "Get invalid queue id, ignore it\n");
+			continue;
+		}
+
+		hclgevf_tqp_enable(hdev, queue_id, 0, true);
+	}
+
+	/* reset tqp stats */
+	hclgevf_reset_tqp_stats(handle);
+
+	hclgevf_request_link_info(hdev);
+
+	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+	mod_timer(&hdev->service_timer, jiffies + HZ);
+
+	return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int i, queue_id;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		/* Ring disable */
+		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+		if (queue_id < 0) {
+			dev_warn(&hdev->pdev->dev,
+				 "Get invalid queue id, ignore it\n");
+			continue;
+		}
+
+		hclgevf_tqp_enable(hdev, queue_id, 0, false);
+	}
+
+	/* reset tqp stats */
+	hclgevf_reset_tqp_stats(handle);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+	/* setup tasks for the MBX */
+	INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
+	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+
+	/* setup tasks for service timer */
+	timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
+
+	INIT_WORK(&hdev->service_task, hclgevf_service_task);
+	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+
+	mutex_init(&hdev->mbx_resp.mbx_mutex);
+
+	/* bring the device down */
+	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+	if (hdev->service_timer.function)
+		del_timer_sync(&hdev->service_timer);
+	if (hdev->service_task.func)
+		cancel_work_sync(&hdev->service_task);
+	if (hdev->mbx_service_task.func)
+		cancel_work_sync(&hdev->mbx_service_task);
+
+	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int vectors;
+	int i;
+
+	hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+
+	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+					PCI_IRQ_MSI | PCI_IRQ_MSIX);
+	if (vectors < 0) {
+		dev_err(&pdev->dev,
+			"failed(%d) to allocate MSI/MSI-X vectors\n",
+			vectors);
+		return vectors;
+	}
+	if (vectors < hdev->num_msi)
+		dev_warn(&hdev->pdev->dev,
+			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+			 hdev->num_msi, vectors);
+
+	hdev->num_msi = vectors;
+	hdev->num_msi_left = vectors;
+	hdev->base_msi_vector = pdev->irq;
+
+	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+					   sizeof(u16), GFP_KERNEL);
+	if (!hdev->vector_status) {
+		pci_free_irq_vectors(pdev);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < hdev->num_msi; i++)
+		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+					sizeof(int), GFP_KERNEL);
+	if (!hdev->vector_irq) {
+		pci_free_irq_vectors(pdev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+	int ret = 0;
+
+	hclgevf_get_misc_vector(hdev);
+
+	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+			  0, "hclgevf_cmd", hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+			hdev->misc_vector.vector_irq);
+		return ret;
+	}
+
+	/* enable misc. vector(vector 0) */
+	hclgevf_enable_vector(&hdev->misc_vector, true);
+
+	return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+	/* disable misc vector(vector 0) */
+	hclgevf_enable_vector(&hdev->misc_vector, false);
+	free_irq(hdev->misc_vector.vector_irq, hdev);
+	hclgevf_free_vector(hdev, 0);
+}
+
+static int hclgevf_init_instance(struct hclgevf_dev *hdev,
+				 struct hnae3_client *client)
+{
+	int ret;
+
+	switch (client->type) {
+	case HNAE3_CLIENT_KNIC:
+		hdev->nic_client = client;
+		hdev->nic.client = client;
+
+		ret = client->ops->init_instance(&hdev->nic);
+		if (ret)
+			return ret;
+
+		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+			struct hnae3_client *rc = hdev->roce_client;
+
+			ret = hclgevf_init_roce_base_info(hdev);
+			if (ret)
+				return ret;
+			ret = rc->ops->init_instance(&hdev->roce);
+			if (ret)
+				return ret;
+		}
+		break;
+	case HNAE3_CLIENT_UNIC:
+		hdev->nic_client = client;
+		hdev->nic.client = client;
+
+		ret = client->ops->init_instance(&hdev->nic);
+		if (ret)
+			return ret;
+		break;
+	case HNAE3_CLIENT_ROCE:
+		hdev->roce_client = client;
+		hdev->roce.client = client;
+
+		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+			ret = hclgevf_init_roce_base_info(hdev);
+			if (ret)
+				return ret;
+
+			ret = client->ops->init_instance(&hdev->roce);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
+				    struct hnae3_client *client)
+{
+	/* un-init roce, if it exists */
+	if (hdev->roce_client)
+		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+
+	/* un-init nic/unic, if this was not called by roce client */
+	if ((client->ops->uninit_instance) &&
+	    (client->type != HNAE3_CLIENT_ROCE))
+		client->ops->uninit_instance(&hdev->nic, 0);
+}
+
+static int hclgevf_register_client(struct hnae3_client *client,
+				   struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	return hclgevf_init_instance(hdev, client);
+}
+
+static void hclgevf_unregister_client(struct hnae3_client *client,
+				      struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	hclgevf_uninit_instance(hdev, client);
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	struct hclgevf_hw *hw;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable PCI device\n");
+		goto err_no_drvdata;
+	}
+
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+		goto err_disable_device;
+	}
+
+	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+	if (ret) {
+		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+		goto err_disable_device;
+	}
+
+	pci_set_master(pdev);
+	hw = &hdev->hw;
+	hw->hdev = hdev;
+	hw->io_base = pci_iomap(pdev, 2, 0);
+	if (!hw->io_base) {
+		dev_err(&pdev->dev, "can't map configuration register space\n");
+		ret = -ENOMEM;
+		goto err_clr_master;
+	}
+
+	return 0;
+
+err_clr_master:
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+err_no_drvdata:
+	pci_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	pci_iounmap(pdev, hdev->hw.io_base);
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+	struct pci_dev *pdev = ae_dev->pdev;
+	struct hclgevf_dev *hdev;
+	int ret;
+
+	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+	if (!hdev)
+		return -ENOMEM;
+
+	hdev->pdev = pdev;
+	hdev->ae_dev = ae_dev;
+	ae_dev->priv = hdev;
+
+	ret = hclgevf_pci_init(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "PCI initialization failed\n");
+		return ret;
+	}
+
+	ret = hclgevf_init_msi(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+		goto err_irq_init;
+	}
+
+	hclgevf_state_init(hdev);
+
+	ret = hclgevf_misc_irq_init(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+			ret);
+		goto err_misc_irq_init;
+	}
+
+	ret = hclgevf_cmd_init(hdev);
+	if (ret)
+		goto err_cmd_init;
+
+	ret = hclgevf_configure(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_alloc_tqps(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_set_handle_info(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+		goto err_config;
+	}
+
+	/* Initialize VF's MTA */
+	hdev->accept_mta_mc = true;
+	ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to set mta filter mode\n", ret);
+		goto err_config;
+	}
+
+	/* Initialize RSS for this VF */
+	ret = hclgevf_rss_init_hw(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize RSS\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_init_vlan_config(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize VLAN config\n", ret);
+		goto err_config;
+	}
+
+	pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+
+	return 0;
+
+err_config:
+	hclgevf_cmd_uninit(hdev);
+err_cmd_init:
+	hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+	hclgevf_state_uninit(hdev);
+	hclgevf_uninit_msi(hdev);
+err_irq_init:
+	hclgevf_pci_uninit(hdev);
+	return ret;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	hclgevf_cmd_uninit(hdev);
+	hclgevf_misc_irq_uninit(hdev);
+	hclgevf_state_uninit(hdev);
+	hclgevf_uninit_msi(hdev);
+	hclgevf_pci_uninit(hdev);
+	ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *nic = &hdev->nic;
+	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+				 struct ethtool_channels *ch)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	ch->max_combined = hclgevf_get_max_channels(hdev);
+	ch->other_count = 0;
+	ch->max_other = 0;
+	ch->combined_count = hdev->num_tqps;
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+	.init_ae_dev = hclgevf_init_ae_dev,
+	.uninit_ae_dev = hclgevf_uninit_ae_dev,
+	.init_client_instance = hclgevf_register_client,
+	.uninit_client_instance = hclgevf_unregister_client,
+	.start = hclgevf_ae_start,
+	.stop = hclgevf_ae_stop,
+	.map_ring_to_vector = hclgevf_map_ring_to_vector,
+	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+	.get_vector = hclgevf_get_vector,
+	.reset_queue = hclgevf_reset_tqp,
+	.set_promisc_mode = hclgevf_set_promisc_mode,
+	.get_mac_addr = hclgevf_get_mac_addr,
+	.set_mac_addr = hclgevf_set_mac_addr,
+	.add_uc_addr = hclgevf_add_uc_addr,
+	.rm_uc_addr = hclgevf_rm_uc_addr,
+	.add_mc_addr = hclgevf_add_mc_addr,
+	.rm_mc_addr = hclgevf_rm_mc_addr,
+	.get_stats = hclgevf_get_stats,
+	.update_stats = hclgevf_update_stats,
+	.get_strings = hclgevf_get_strings,
+	.get_sset_count = hclgevf_get_sset_count,
+	.get_rss_key_size = hclgevf_get_rss_key_size,
+	.get_rss_indir_size = hclgevf_get_rss_indir_size,
+	.get_rss = hclgevf_get_rss,
+	.set_rss = hclgevf_set_rss,
+	.get_tc_size = hclgevf_get_tc_size,
+	.get_fw_version = hclgevf_get_fw_version,
+	.set_vlan_filter = hclgevf_set_vlan_filter,
+	.get_channels = hclgevf_get_channels,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+	.ops = &hclgevf_ops,
+	.name = HCLGEVF_NAME,
+	.pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+	pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+	return hnae3_register_ae_algo(&ae_algovf);
+}
+
+static void hclgevf_exit(void)
+{
+	hnae3_unregister_ae_algo(&ae_algovf);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 000000000000..a63bee4a3674
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_ROCEE_VECTOR_NUM	0
+#define HCLGEVF_MISC_VECTOR_NUM		0
+
+#define HCLGEVF_INVALID_VPORT		0xffff
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM	(32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE		0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE	0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET	0x4
+#define HCLGEVF_VECTOR_VF_OFFSET		0x100000
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG	0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B	1
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES	10
+
+#define HCLGEVF_RSS_IND_TBL_SIZE		512
+#define HCLGEVF_RSS_SET_BITMAP_MSK	0xffff
+#define HCLGEVF_RSS_KEY_SIZE		40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ	0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE	1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC	2
+#define HCLGEVF_RSS_HASH_ALGO_MASK	0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+	(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+	/* device states */
+	HCLGEVF_STATE_DOWN,
+	HCLGEVF_STATE_DISABLED,
+	/* task states */
+	HCLGEVF_STATE_SERVICE_SCHED,
+	HCLGEVF_STATE_MBX_SERVICE_SCHED,
+	HCLGEVF_STATE_MBX_HANDLING,
+};
+
+#define HCLGEVF_MPF_ENBALE 1
+
+struct hclgevf_mac {
+	u8 mac_addr[ETH_ALEN];
+	int link;
+};
+
+struct hclgevf_hw {
+	void __iomem *io_base;
+	int num_vec;
+	struct hclgevf_cmq cmq;
+	struct hclgevf_mac mac;
+	void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
+	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
+	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+	struct device *dev;	/* device for DMA mapping */
+	struct hnae3_queue q;
+	struct hlcgevf_tqp_stats tqp_stats;
+	u16 index;		/* global index in a NIC controller */
+
+	bool alloced;
+};
+
+struct hclgevf_cfg {
+	u8 vmdq_vport_num;
+	u8 tc_num;
+	u16 tqp_desc_num;
+	u16 rx_buf_len;
+	u8 phy_addr;
+	u8 media_type;
+	u8 mac_addr[ETH_ALEN];
+	u32 numa_node_map;
+};
+
+struct hclgevf_rss_cfg {
+	u8  rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+	u32 hash_algo;
+	u32 rss_size;
+	u8 hw_tc_map;
+	u8  rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+};
+
+struct hclgevf_misc_vector {
+	u8 __iomem *addr;
+	int vector_irq;
+};
+
+struct hclgevf_dev {
+	struct pci_dev *pdev;
+	struct hnae3_ae_dev *ae_dev;
+	struct hclgevf_hw hw;
+	struct hclgevf_misc_vector misc_vector;
+	struct hclgevf_rss_cfg rss_cfg;
+	unsigned long state;
+
+	u32 fw_version;
+	u16 num_tqps;		/* num task queue pairs of this PF */
+
+	u16 alloc_rss_size;	/* allocated RSS task queue */
+	u16 rss_size_max;	/* HW defined max RSS task queue */
+
+	u16 num_alloc_vport;	/* num vports this driver supports */
+	u32 numa_node_mask;
+	u16 rx_buf_len;
+	u16 num_desc;
+	u8 hw_tc_map;
+
+	u16 num_msi;
+	u16 num_msi_left;
+	u16 num_msi_used;
+	u32 base_msi_vector;
+	u16 *vector_status;
+	int *vector_irq;
+
+	bool accept_mta_mc; /* whether to accept mta filter multicast */
+	struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+
+	struct timer_list service_timer;
+	struct work_struct service_task;
+	struct work_struct mbx_service_task;
+
+	struct hclgevf_tqp *htqp;
+
+	struct hnae3_handle nic;
+	struct hnae3_handle roce;
+
+	struct hnae3_client *nic_client;
+	struct hnae3_client *roce_client;
+	u32 flag;
+};
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+			 const u8 *msg_data, u8 msg_len, bool need_resp,
+			 u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 000000000000..e39cad285fa9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+	/* this function should be called with mbx_resp.mbx_mutex held
+	 * to prtect the received_response from race condition
+	 */
+	hdev->mbx_resp.received_resp  = false;
+	hdev->mbx_resp.origin_mbx_msg = 0;
+	hdev->mbx_resp.resp_status    = 0;
+	memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+				u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES	500
+#define HCLGEVF_SLEEP_USCOEND	1000
+	struct hclgevf_mbx_resp_status *mbx_resp;
+	u16 r_code0, r_code1;
+	int i = 0;
+
+	if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+		dev_err(&hdev->pdev->dev,
+			"VF mbx response len(=%d) exceeds maximum(=%d)\n",
+			resp_len,
+			HCLGE_MBX_MAX_RESP_DATA_SIZE);
+		return -EINVAL;
+	}
+
+	while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+		udelay(HCLGEVF_SLEEP_USCOEND);
+		i++;
+	}
+
+	if (i >= HCLGEVF_MAX_TRY_TIMES) {
+		dev_err(&hdev->pdev->dev,
+			"VF could not get mbx resp(=%d) from PF in %d tries\n",
+			hdev->mbx_resp.received_resp, i);
+		return -EIO;
+	}
+
+	mbx_resp = &hdev->mbx_resp;
+	r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+	r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+	if (resp_data)
+		memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+	hclgevf_reset_mbx_resp_status(hdev);
+
+	if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+		dev_err(&hdev->pdev->dev,
+			"VF could not match resp code(code0=%d,code1=%d), %d",
+			code0, code1, mbx_resp->resp_status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+			 const u8 *msg_data, u8 msg_len, bool need_resp,
+			 u8 *resp_data, u16 resp_len)
+{
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+
+	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+	/* first two bytes are reserved for code & subcode */
+	if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+		dev_err(&hdev->pdev->dev,
+			"VF send mbx msg fail, msg len %d exceeds max len %d\n",
+			msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+		return -EINVAL;
+	}
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+	req->msg[0] = code;
+	req->msg[1] = subcode;
+	memcpy(&req->msg[2], msg_data, msg_len);
+
+	/* synchronous send */
+	if (need_resp) {
+		mutex_lock(&hdev->mbx_resp.mbx_mutex);
+		hclgevf_reset_mbx_resp_status(hdev);
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"VF failed(=%d) to send mbx message to PF\n",
+				status);
+			mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+			return status;
+		}
+
+		status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+					      resp_len);
+		mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+	} else {
+		/* asynchronous send */
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"VF failed(=%d) to send mbx message to PF\n",
+				status);
+			return status;
+		}
+	}
+
+	return status;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_mbx_resp_status *resp;
+	struct hclge_mbx_pf_to_vf_cmd *req;
+	struct hclgevf_cmq_ring *crq;
+	struct hclgevf_desc *desc;
+	u16 link_status, flag;
+	u8 *temp;
+	int i;
+
+	resp = &hdev->mbx_resp;
+	crq = &hdev->hw.cmq.crq;
+
+	flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+	while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+		desc = &crq->desc[crq->next_to_use];
+		req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+		switch (req->msg[0]) {
+		case HCLGE_MBX_PF_VF_RESP:
+			if (resp->received_resp)
+				dev_warn(&hdev->pdev->dev,
+					 "VF mbx resp flag not clear(%d)\n",
+					 req->msg[1]);
+			resp->received_resp = true;
+
+			resp->origin_mbx_msg = (req->msg[1] << 16);
+			resp->origin_mbx_msg |= req->msg[2];
+			resp->resp_status = req->msg[3];
+
+			temp = (u8 *)&req->msg[4];
+			for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+				resp->additional_info[i] = *temp;
+				temp++;
+			}
+			break;
+		case HCLGE_MBX_LINK_STAT_CHANGE:
+			link_status = le16_to_cpu(req->msg[1]);
+
+			/* update upper layer with new link link status */
+			hclgevf_update_link_status(hdev, link_status);
+
+			break;
+		default:
+			dev_err(&hdev->pdev->dev,
+				"VF received unsupported(%d) mbx msg from PF\n",
+				req->msg[0]);
+			break;
+		}
+		hclge_mbx_ring_ptr_move_crq(crq);
+		flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+	}
+
+	/* Write back CMDQ_RQ header pointer, M7 need this pointer */
+	hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+			  crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 241db3199b88..354c0982847b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
 
 static inline int emac_phy_supports_gige(int phy_mode)
 {
-	return  phy_mode == PHY_MODE_GMII ||
-		phy_mode == PHY_MODE_RGMII ||
-		phy_mode == PHY_MODE_SGMII ||
-		phy_mode == PHY_MODE_TBI ||
-		phy_mode == PHY_MODE_RTBI;
+	return  phy_interface_mode_is_rgmii(phy_mode) ||
+		phy_mode == PHY_INTERFACE_MODE_GMII ||
+		phy_mode == PHY_INTERFACE_MODE_SGMII ||
+		phy_mode == PHY_INTERFACE_MODE_TBI ||
+		phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline int emac_phy_gpcs(int phy_mode)
 {
-	return  phy_mode == PHY_MODE_SGMII ||
-		phy_mode == PHY_MODE_TBI ||
-		phy_mode == PHY_MODE_RTBI;
+	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
+		phy_mode == PHY_INTERFACE_MODE_TBI ||
+		phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline void emac_tx_enable(struct emac_instance *dev)
@@ -2871,7 +2871,7 @@ static int emac_init_config(struct emac_instance *dev)
 	/* PHY mode needs some decoding */
 	dev->phy_mode = of_get_phy_mode(np);
 	if (dev->phy_mode < 0)
-		dev->phy_mode = PHY_MODE_NA;
+		dev->phy_mode = PHY_INTERFACE_MODE_NA;
 
 	/* Check EMAC version */
 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3174,7 +3174,7 @@ static int emac_probe(struct platform_device *ofdev)
 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
 
-	if (dev->phy_mode == PHY_MODE_SGMII)
+	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
 
 	if (dev->phy.address >= 0)
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index c26d2631ca30..e2f80cca9bed 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -104,19 +104,6 @@ struct emac_regs {
 	} u1;
 };
 
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA	PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII	PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII	PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII	PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII	PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI	PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII	PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI	PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII	PHY_INTERFACE_MODE_SGMII
-
 /* EMACx_MR0 */
 #define EMAC_MR0_RXI			0x80000000
 #define EMAC_MR0_TXI			0x40000000
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 35865d05fccd..aa070c063e48 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
 	if ((val & BMCR_ISOLATE) && limit > 0)
 		gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
 
-	if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+	if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
 		/* Configure GPCS interface to recommended setting for SGMII */
 		gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
 		gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
 	epcr &= ~EPCR_MODE_MASK;
 
 	switch (phy->mode) {
-	case PHY_MODE_TBI:
+	case PHY_INTERFACE_MODE_TBI:
 		epcr |= EPCR_TBI_MODE;
 		break;
-	case PHY_MODE_RTBI:
+	case PHY_INTERFACE_MODE_RTBI:
 		epcr |= EPCR_RTBI_MODE;
 		break;
-	case PHY_MODE_GMII:
+	case PHY_INTERFACE_MODE_GMII:
 		epcr |= EPCR_GMII_MODE;
 		break;
-	case PHY_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII:
 	default:
 		epcr |= EPCR_RGMII_MODE;
 	}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c4a1ac38bba8..00f5999de3cf 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,43 +52,28 @@
 /* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
 static inline int rgmii_valid_mode(int phy_mode)
 {
-	return  phy_mode == PHY_MODE_GMII ||
-		phy_mode == PHY_MODE_MII ||
-		phy_mode == PHY_MODE_RGMII ||
-		phy_mode == PHY_MODE_TBI ||
-		phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
-	switch (mode) {
-	case PHY_MODE_RGMII:
-		return "RGMII";
-	case PHY_MODE_TBI:
-		return "TBI";
-	case PHY_MODE_GMII:
-		return "GMII";
-	case PHY_MODE_MII:
-		return "MII";
-	case PHY_MODE_RTBI:
-		return "RTBI";
-	default:
-		BUG();
-	}
+	return  phy_interface_mode_is_rgmii(phy_mode) ||
+		phy_mode == PHY_INTERFACE_MODE_GMII ||
+		phy_mode == PHY_INTERFACE_MODE_MII ||
+		phy_mode == PHY_INTERFACE_MODE_TBI ||
+		phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline u32 rgmii_mode_mask(int mode, int input)
 {
 	switch (mode) {
-	case PHY_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
 		return RGMII_FER_RGMII(input);
-	case PHY_MODE_TBI:
+	case PHY_INTERFACE_MODE_TBI:
 		return RGMII_FER_TBI(input);
-	case PHY_MODE_GMII:
+	case PHY_INTERFACE_MODE_GMII:
 		return RGMII_FER_GMII(input);
-	case PHY_MODE_MII:
+	case PHY_INTERFACE_MODE_MII:
 		return RGMII_FER_MII(input);
-	case PHY_MODE_RTBI:
+	case PHY_INTERFACE_MODE_RTBI:
 		return RGMII_FER_RTBI(input);
 	default:
 		BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
 	out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
 
 	printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
-	       ofdev->dev.of_node, input, rgmii_mode_name(mode));
+	       ofdev->dev.of_node, input, phy_modes(mode));
 
 	++dev->users;
 
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 89c42d362292..fdcc734541fe 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -49,20 +49,20 @@
  */
 static inline int zmii_valid_mode(int mode)
 {
-	return  mode == PHY_MODE_MII ||
-		mode == PHY_MODE_RMII ||
-		mode == PHY_MODE_SMII ||
-		mode == PHY_MODE_NA;
+	return  mode == PHY_INTERFACE_MODE_MII ||
+		mode == PHY_INTERFACE_MODE_RMII ||
+		mode == PHY_INTERFACE_MODE_SMII ||
+		mode == PHY_INTERFACE_MODE_NA;
 }
 
 static inline const char *zmii_mode_name(int mode)
 {
 	switch (mode) {
-	case PHY_MODE_MII:
+	case PHY_INTERFACE_MODE_MII:
 		return "MII";
-	case PHY_MODE_RMII:
+	case PHY_INTERFACE_MODE_RMII:
 		return "RMII";
-	case PHY_MODE_SMII:
+	case PHY_INTERFACE_MODE_SMII:
 		return "SMII";
 	default:
 		BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
 static inline u32 zmii_mode_mask(int mode, int input)
 {
 	switch (mode) {
-	case PHY_MODE_MII:
+	case PHY_INTERFACE_MODE_MII:
 		return ZMII_FER_MII(input);
-	case PHY_MODE_RMII:
+	case PHY_INTERFACE_MODE_RMII:
 		return ZMII_FER_RMII(input);
-	case PHY_MODE_SMII:
+	case PHY_INTERFACE_MODE_SMII:
 		return ZMII_FER_SMII(input);
 	default:
 		return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
 	 * Please, always specify PHY mode in your board port to avoid
 	 * any surprises.
 	 */
-	if (dev->mode == PHY_MODE_NA) {
-		if (*mode == PHY_MODE_NA) {
+	if (dev->mode == PHY_INTERFACE_MODE_NA) {
+		if (*mode == PHY_INTERFACE_MODE_NA) {
 			u32 r = dev->fer_save;
 
 			ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
 
 			if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
-				dev->mode = PHY_MODE_MII;
+				dev->mode = PHY_INTERFACE_MODE_MII;
 			else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
-				dev->mode = PHY_MODE_RMII;
+				dev->mode = PHY_INTERFACE_MODE_RMII;
 			else
-				dev->mode = PHY_MODE_SMII;
-		} else
+				dev->mode = PHY_INTERFACE_MODE_SMII;
+		} else {
 			dev->mode = *mode;
-
+		}
 		printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
 		       ofdev->dev.of_node,
 		       zmii_mode_name(dev->mode));
 	} else {
 		/* All inputs must use the same mode */
-		if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+		if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
 			printk(KERN_ERR
 			       "%pOF: invalid mode %d specified for input %d\n",
 			       ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
 
 	mutex_init(&dev->lock);
 	dev->ofdev = ofdev;
-	dev->mode = PHY_MODE_NA;
+	dev->mode = PHY_INTERFACE_MODE_NA;
 
 	rc = -ENXIO;
 	if (of_address_to_resource(np, 0, &regs)) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b65f5f3ac034..8c3058d5d191 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -59,6 +59,7 @@
 #include <linux/mm.h>
 #include <linux/ethtool.h>
 #include <linux/proc_fs.h>
+#include <linux/if_arp.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -1177,6 +1178,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
 			hdr_len[2] = tcp_hdrlen(skb);
 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
 			hdr_len[2] = sizeof(struct udphdr);
+	} else if (skb->protocol == htons(ETH_P_ARP)) {
+		hdr_len[1] = arp_hdr_len(skb->dev);
+		hdr_len[2] = 0;
 	}
 
 	memset(hdr_data, 0, 120);
@@ -1412,7 +1416,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 	/* determine if l2/3/4 headers are sent to firmware */
 	if ((*hdrs >> 7) & 1 &&
 	    (skb->protocol == htons(ETH_P_IP) ||
-	     skb->protocol == htons(ETH_P_IPV6))) {
+	     skb->protocol == htons(ETH_P_IPV6) ||
+	     skb->protocol == htons(ETH_P_ARP))) {
 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
 		tx_crq.v1.n_crq_elem = num_entries;
 		tx_buff->indir_arr[0] = tx_crq;
@@ -1543,15 +1548,19 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+
+	init_completion(&adapter->fw_done);
 	ibmvnic_send_crq(adapter, &crq);
+	wait_for_completion(&adapter->fw_done);
 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
-	return 0;
+	return adapter->fw_done_rc ? -EIO : 0;
 }
 
 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 {
 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 	struct sockaddr *addr = p;
+	int rc;
 
 	if (adapter->state == VNIC_PROBED) {
 		memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
@@ -1559,9 +1568,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 		return 0;
 	}
 
-	__ibmvnic_set_mac(netdev, addr);
+	rc = __ibmvnic_set_mac(netdev, addr);
 
-	return 0;
+	return rc;
 }
 
 /**
@@ -2484,6 +2493,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
 	struct ibmvnic_sub_crq_queue *scrq = instance;
 	struct ibmvnic_adapter *adapter = scrq->adapter;
 
+	/* When booting a kdump kernel we can hit pending interrupts
+	 * prior to completing driver initialization.
+	 */
+	if (unlikely(adapter->state != VNIC_OPEN))
+		return IRQ_NONE;
+
 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
 
 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
@@ -3558,8 +3573,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
 }
 
-static void handle_change_mac_rsp(union ibmvnic_crq *crq,
-				  struct ibmvnic_adapter *adapter)
+static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+				 struct ibmvnic_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	struct device *dev = &adapter->vdev->dev;
@@ -3568,10 +3583,13 @@ static void handle_change_mac_rsp(union ibmvnic_crq *crq,
 	rc = crq->change_mac_addr_rsp.rc.code;
 	if (rc) {
 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
-		return;
+		goto out;
 	}
 	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
 	       ETH_ALEN);
+out:
+	complete(&adapter->fw_done);
+	return rc;
 }
 
 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
@@ -4031,7 +4049,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
 		break;
 	case CHANGE_MAC_ADDR_RSP:
 		netdev_dbg(netdev, "Got MAC address change Response\n");
-		handle_change_mac_rsp(crq, adapter);
+		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
 		break;
 	case ERROR_INDICATION:
 		netdev_dbg(netdev, "Got Error Indication\n");
@@ -4335,7 +4353,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	}
 
 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
-				   IBMVNIC_MAX_TX_QUEUES);
+				   IBMVNIC_MAX_QUEUES);
 	if (!netdev)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 3aec42118db2..fe21a6e2ddae 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,7 @@
 #define IBMVNIC_RX_WEIGHT		16
 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
 #define IBMVNIC_BUFFS_PER_POOL	100
-#define IBMVNIC_MAX_TX_QUEUES	5
+#define IBMVNIC_MAX_QUEUES	10
 
 #define IBMVNIC_TSO_BUF_SZ	65536
 #define IBMVNIC_TSO_BUFS	64
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3b3983a1ffbb..dc71e87c3260 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
 			p = (char *)adapter + stat->stat_offset;
 			break;
 		default:
-			WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
-				  stat->type, i);
+			netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
+					 stat->type, i);
 			continue;
 		}
 
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9f18d39bdc8f..1298b69f990b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3303,9 +3303,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 		if (adapter->flags & FLAG_IS_ICH) {
 			u32 rxdctl = er32(RXDCTL(0));
 
-			ew32(RXDCTL(0), rxdctl | 0x3);
+			ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
 		}
 
+		dev_info(&adapter->pdev->dev,
+			 "Some CPU C-states have been disabled in order to enable jumbo frames\n");
 		pm_qos_update_request(&adapter->pm_qos_req, lat);
 	} else {
 		pm_qos_update_request(&adapter->pm_qos_req,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index ea3ab24265ee..760cfa52d02c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -353,7 +353,7 @@ int fm10k_iov_resume(struct pci_dev *pdev)
 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
 
 		/* allocate all but the last GLORT to the VFs */
-		if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
+		if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
 			break;
 
 		/* assign GLORT to VF, and restrict it to multicast */
@@ -511,7 +511,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
 		return err;
 
 	/* allocate VFs if not already allocated */
-	if (num_vfs && (num_vfs != current_vfs)) {
+	if (num_vfs && num_vfs != current_vfs) {
 		/* Disable completer abort error reporting as
 		 * the VFs can trigger this any time they read a queue
 		 * that they don't own.
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 538b42d5c187..8e12aae065d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -446,13 +446,13 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
 
 	skb->protocol = eth_type_trans(skb, dev);
 
+	/* Record Rx queue, or update macvlan statistics */
 	if (!l2_accel)
-		return;
-
-	/* update MACVLAN statistics */
-	macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
-			 !!(rx_desc->w.hdr_info &
-			    cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
+		skb_record_rx_queue(skb, rx_ring->queue_index);
+	else
+		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+				 (skb->pkt_type == PACKET_BROADCAST) ||
+				 (skb->pkt_type == PACKET_MULTICAST));
 }
 
 /**
@@ -479,8 +479,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
 
 	FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
 
-	skb_record_rx_queue(skb, rx_ring->queue_index);
-
 	FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
 
 	if (rx_desc->w.vlan) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index adc62fb38c49..a38ae5c54da3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -934,8 +934,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
 	if (vid >= VLAN_N_VID)
 		return -EINVAL;
 
-	/* Verify we have permission to add VLANs */
-	if (hw->mac.vlan_override)
+	/* Verify that we have permission to add VLANs. If this is a request
+	 * to remove a VLAN, we still want to allow the user to remove the
+	 * VLAN device. In that case, we need to clear the bit in the
+	 * active_vlans bitmask.
+	 */
+	if (set && hw->mac.vlan_override)
 		return -EACCES;
 
 	/* update active_vlans bitmask */
@@ -954,6 +958,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
 			rx_ring->vid &= ~FM10K_VLAN_CLEAR;
 	}
 
+	/* If our VLAN has been overridden, there is no reason to send VLAN
+	 * removal requests as they will be silently ignored.
+	 */
+	if (hw->mac.vlan_override)
+		return 0;
+
 	/* Do not remove default VLAN ID related entries from VLAN and MAC
 	 * tables
 	 */
@@ -1040,14 +1050,13 @@ static int __fm10k_uc_sync(struct net_device *dev,
 			   const unsigned char *addr, bool sync)
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
-	struct fm10k_hw *hw = &interface->hw;
 	u16 vid, glort = interface->glort;
 	s32 err;
 
 	if (!is_valid_ether_addr(addr))
 		return -EADDRNOTAVAIL;
 
-	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+	for (vid = fm10k_find_next_vlan(interface, 0);
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
 		err = fm10k_queue_mac_request(interface, glort,
@@ -1106,14 +1115,13 @@ static int __fm10k_mc_sync(struct net_device *dev,
 			   const unsigned char *addr, bool sync)
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
-	struct fm10k_hw *hw = &interface->hw;
 	u16 vid, glort = interface->glort;
 	s32 err;
 
 	if (!is_multicast_ether_addr(addr))
 		return -EADDRNOTAVAIL;
 
-	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+	for (vid = fm10k_find_next_vlan(interface, 0);
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
 		err = fm10k_queue_mac_request(interface, glort,
@@ -1157,10 +1165,12 @@ static void fm10k_set_rx_mode(struct net_device *dev)
 
 	/* update xcast mode first, but only if it changed */
 	if (interface->xcast_mode != xcast_mode) {
-		/* update VLAN table */
+		/* update VLAN table when entering promiscuous mode */
 		if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
 			fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
 						 0, true);
+
+		/* clear VLAN table when exiting promiscuous mode */
 		if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
 			fm10k_clear_unused_vlans(interface);
 
@@ -1182,9 +1192,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
 
 void fm10k_restore_rx_state(struct fm10k_intfc *interface)
 {
+	struct fm10k_l2_accel *l2_accel = interface->l2_accel;
 	struct net_device *netdev = interface->netdev;
 	struct fm10k_hw *hw = &interface->hw;
-	int xcast_mode;
+	int xcast_mode, i;
 	u16 vid, glort;
 
 	/* record glort for this interface */
@@ -1211,11 +1222,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
 	fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
 				 xcast_mode == FM10K_XCAST_MODE_PROMISC);
 
-	/* Add filter for VLAN 0 */
-	fm10k_queue_vlan_request(interface, 0, 0, true);
-
 	/* update table with current entries */
-	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+	for (vid = fm10k_find_next_vlan(interface, 0);
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
 		fm10k_queue_vlan_request(interface, vid, 0, true);
@@ -1234,6 +1242,24 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
 	__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
 	__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
 
+	/* synchronize macvlan addresses */
+	if (l2_accel) {
+		for (i = 0; i < l2_accel->size; i++) {
+			struct net_device *sdev = l2_accel->macvlan[i];
+
+			if (!sdev)
+				continue;
+
+			glort = l2_accel->dglort + 1 + i;
+
+			hw->mac.ops.update_xcast_mode(hw, glort,
+						      FM10K_XCAST_MODE_MULTI);
+			fm10k_queue_mac_request(interface, glort,
+						sdev->dev_addr,
+						hw->mac.default_vid, true);
+		}
+	}
+
 	fm10k_mbx_unlock(interface);
 
 	/* record updated xcast mode state */
@@ -1490,7 +1516,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
 		hw->mac.ops.update_xcast_mode(hw, glort,
 					      FM10K_XCAST_MODE_MULTI);
 		fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
-					0, true);
+					hw->mac.default_vid, true);
 	}
 
 	fm10k_mbx_unlock(interface);
@@ -1530,7 +1556,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
 		hw->mac.ops.update_xcast_mode(hw, glort,
 					      FM10K_XCAST_MODE_NONE);
 		fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
-					0, false);
+					hw->mac.default_vid, false);
 	}
 
 	fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 425d814aed4d..d6406fc31ffb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -866,7 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
 	/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
 	 * used here to indicate to the VF that it will not have privilege to
 	 * write VLAN_TABLE. All policy is enforced on the PF but this allows
-	 * the VF to correctly report errors to userspace rqeuests.
+	 * the VF to correctly report errors to userspace requests.
 	 */
 	if (vf_info->pf_vid)
 		vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e019baa905c5..46e9f4e0a02c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -508,39 +508,40 @@ struct i40e_pf {
 #define I40E_HW_PORT_ID_VALID			BIT(17)
 #define I40E_HW_RESTART_AUTONEG			BIT(18)
 
-	u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED		BIT(0)
-#define I40E_FLAG_MSI_ENABLED			BIT(1)
-#define I40E_FLAG_MSIX_ENABLED			BIT(2)
-#define I40E_FLAG_RSS_ENABLED			BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED			BIT(4)
-#define I40E_FLAG_FILTER_SYNC			BIT(5)
-#define I40E_FLAG_SRIOV_ENABLED			BIT(6)
-#define I40E_FLAG_DCB_CAPABLE			BIT(7)
-#define I40E_FLAG_DCB_ENABLED			BIT(8)
-#define I40E_FLAG_FD_SB_ENABLED			BIT(9)
-#define I40E_FLAG_FD_ATR_ENABLED		BIT(10)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED		BIT(11)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED		BIT(12)
-#define I40E_FLAG_MFP_ENABLED			BIT(13)
-#define I40E_FLAG_UDP_FILTER_SYNC		BIT(14)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT(15)
-#define I40E_FLAG_VEB_MODE_ENABLED		BIT(16)
-#define I40E_FLAG_VEB_STATS_ENABLED		BIT(17)
-#define I40E_FLAG_LINK_POLLING_ENABLED		BIT(18)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT(19)
-#define I40E_FLAG_TEMP_LINK_POLLING		BIT(20)
-#define I40E_FLAG_LEGACY_RX			BIT(21)
-#define I40E_FLAG_PTP				BIT(22)
-#define I40E_FLAG_IWARP_ENABLED			BIT(23)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT(24)
-#define I40E_FLAG_CLIENT_L2_CHANGE		BIT(25)
-#define I40E_FLAG_CLIENT_RESET			BIT(26)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED	BIT(27)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED	BIT(28)
-#define I40E_FLAG_TC_MQPRIO			BIT(29)
-#define I40E_FLAG_FD_SB_INACTIVE		BIT(30)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER		BIT(31)
+	u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(0)
+#define I40E_FLAG_MSI_ENABLED			BIT_ULL(1)
+#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(2)
+#define I40E_FLAG_RSS_ENABLED			BIT_ULL(3)
+#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(4)
+#define I40E_FLAG_FILTER_SYNC			BIT_ULL(5)
+#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(6)
+#define I40E_FLAG_DCB_CAPABLE			BIT_ULL(7)
+#define I40E_FLAG_DCB_ENABLED			BIT_ULL(8)
+#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(9)
+#define I40E_FLAG_FD_ATR_ENABLED		BIT_ULL(10)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED		BIT_ULL(11)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED		BIT_ULL(12)
+#define I40E_FLAG_MFP_ENABLED			BIT_ULL(13)
+#define I40E_FLAG_UDP_FILTER_SYNC		BIT_ULL(14)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT_ULL(15)
+#define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(16)
+#define I40E_FLAG_VEB_STATS_ENABLED		BIT_ULL(17)
+#define I40E_FLAG_LINK_POLLING_ENABLED		BIT_ULL(18)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT_ULL(19)
+#define I40E_FLAG_TEMP_LINK_POLLING		BIT_ULL(20)
+#define I40E_FLAG_LEGACY_RX			BIT_ULL(21)
+#define I40E_FLAG_PTP				BIT_ULL(22)
+#define I40E_FLAG_IWARP_ENABLED			BIT_ULL(23)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(24)
+#define I40E_FLAG_CLIENT_L2_CHANGE		BIT_ULL(25)
+#define I40E_FLAG_CLIENT_RESET			BIT_ULL(26)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED	BIT_ULL(27)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED	BIT_ULL(28)
+#define I40E_FLAG_TC_MQPRIO			BIT_ULL(29)
+#define I40E_FLAG_FD_SB_INACTIVE		BIT_ULL(30)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER		BIT_ULL(31)
+#define I40E_FLAG_DISABLE_FW_LLDP		BIT_ULL(32)
 
 	struct i40e_client_instance *cinst;
 	bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9af74253c3f7..e78971605e0b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -907,10 +907,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
 	    (!details->async && !details->postpone)) {
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Writeback timeout.\n");
-		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: AQ Critical error.\n");
+			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+		} else {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Writeback timeout.\n");
+			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		}
 	}
 
 asq_send_command_error:
@@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	}
 
 	/* set next_to_use to head */
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
 	if (ntu == ntc) {
 		/* nothing to do - shouldn't need to update ring's values */
 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1027,7 +1032,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	hw->aq.arq.next_to_clean = ntc;
 	hw->aq.arq.next_to_use = ntu;
 
-	i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
+	i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
 clean_arq_element_out:
 	/* Set pending if needed, unlock and return */
 	if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8f91ba..a852775d3059 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_add_mirror_rule	= 0x0260,
 	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
-	/* Pipeline Personalization Profile */
+	/* Dynamic Device Personalization */
 	i40e_aqc_opc_write_personalization_profile	= 0x0270,
 	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
 
 	/* DCB commands */
 	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
 	i40e_aqc_opc_dcb_updated	= 0x0302,
+	i40e_aqc_opc_set_dcb_parameters = 0x0303,
 
 	/* TX scheduler */
 	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
@@ -1594,7 +1595,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
 struct i40e_aqc_write_personalization_profile {
 	u8      flags;
 	u8      reserved[3];
@@ -1605,7 +1606,7 @@ struct i40e_aqc_write_personalization_profile {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
 
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
 	__le32 error_offset;
 	__le32 error_info;
 	__le32 addr_high;
@@ -1614,8 +1615,8 @@ struct i40e_aqc_write_ppp_resp {
 
 struct i40e_aqc_get_applied_profiles {
 	u8      flags;
-#define I40E_AQC_GET_PPP_GET_CONF	0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF	0x2
+#define I40E_AQC_GET_DDP_GET_CONF	0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF	0x2
 	u8      rsv[3];
 	__le32  reserved;
 	__le32  addr_high;
@@ -2231,8 +2232,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
  */
 struct i40e_aqc_nvm_update {
 	u8	command_flags;
-#define I40E_AQ_NVM_LAST_CMD	0x01
-#define I40E_AQ_NVM_FLASH_ONLY	0x80
+#define I40E_AQ_NVM_LAST_CMD			0x01
+#define I40E_AQ_NVM_FLASH_ONLY			0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT	1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK	0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED	0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL	0x01
 	u8	module_pointer;
 	__le16	length;
 	__le32	offset;
@@ -2492,6 +2497,17 @@ struct i40e_aqc_lldp_start {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+	u8 command;
+#define I40E_AQ_DCB_SET_AGENT	0x1
+#define I40E_DCB_VALID		0x1
+	u8 valid_flags;
+	u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
 /* Get CEE DCBX Oper Config (0x0A07)
  * uses the generic descriptor struct
  * returns below as indirect response
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd07f..0de9610c1d8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
 	if (!client || !cdev)
 		return;
 
-	/* Here we handle client opens. If the client is down, but
-	 * the netdev is up, then open the client.
+	/* Here we handle client opens. If the client is down, and
+	 * the netdev is registered, then open the client.
 	 */
 	if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
-		if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+		if (vsi->netdev_registered &&
 		    client->ops && client->ops->open) {
 			set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
 			ret = client->ops->open(&cdev->lan_info, client);
@@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
 				i40e_client_del_instance(pf);
 			}
 		}
-	} else {
-	/* Likewise for client close. If the client is up, but the netdev
-	 * is down, then close the client.
-	 */
-		if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
-		    client->ops && client->ops->close) {
-			clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
-			client->ops->close(&cdev->lan_info, client, false);
-			i40e_client_release_qvlist(&cdev->lan_info);
-		}
 	}
+
+	/* enable/disable PE TCP_ENA flag based on netdev down/up
+	 */
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+					    0, 0, 0,
+					    I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+	else
+		i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+					    0, 0,
+					    I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
+					    I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
 }
 
 /**
@@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
 		return -ENOENT;
 	}
 
-	if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
-	    (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+	if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+	    (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
 		ctxt.info.valid_sections =
 			cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
-	} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
-		  !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+	} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+		  !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
 		ctxt.info.valid_sections =
 			cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
 		ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 15b21a5315b5..ba55c889e4c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -132,7 +132,7 @@ struct i40e_info {
 
 #define I40E_CLIENT_RESET_LEVEL_PF   1
 #define I40E_CLIENT_RESET_LEVEL_CORE 2
-#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE  BIT(1)
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE  BIT(1)
 
 struct i40e_ops {
 	/* setup_q_vector_list enables queues with a particular vector */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f268bd..ef5a868aae46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 		return "I40E_NOT_SUPPORTED";
 	case I40E_ERR_FIRMWARE_API_VERSION:
 		return "I40E_ERR_FIRMWARE_API_VERSION";
+	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
 	}
 
 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1486,6 +1488,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
 		case I40E_COMBINED_ACTIVITY:
 		case I40E_FILTER_ACTIVITY:
 		case I40E_MAC_ACTIVITY:
+		case I40E_LINK_ACTIVITY:
 			continue;
 		default:
 			break;
@@ -1534,6 +1537,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 		case I40E_COMBINED_ACTIVITY:
 		case I40E_FILTER_ACTIVITY:
 		case I40E_MAC_ACTIVITY:
+		case I40E_LINK_ACTIVITY:
 			continue;
 		default:
 			break;
@@ -1544,9 +1548,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
 			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
 
-		if (mode == I40E_LINK_ACTIVITY)
-			blink = false;
-
 		if (blink)
 			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 		else
@@ -3465,13 +3466,14 @@ exit:
  * @length: length of the section to be written (in bytes from the offset)
  * @data: command buffer (size [bytes] = length)
  * @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
  * @cmd_details: pointer to command details structure or NULL
  *
  * Update the NVM using the admin queue commands
  **/
 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 			       u32 offset, u16 length, void *data,
-			       bool last_command,
+				bool last_command, u8 preservation_flags,
 			       struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -3490,6 +3492,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 	/* If this is the last command in a series, set the proper flag. */
 	if (last_command)
 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	if (hw->mac.type == I40E_MAC_X722) {
+		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+			cmd->command_flags |=
+				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+			cmd->command_flags |=
+				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+	}
 	cmd->module_pointer = module_pointer;
 	cmd->offset = cpu_to_le32(offset);
 	cmd->length = cpu_to_le16(length);
@@ -3629,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
 
 	cmd->command = I40E_AQ_LLDP_AGENT_START;
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
 
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+			   struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_dcb_parameters *cmd =
+		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_dcb_parameters);
+
+	if (dcb_enable) {
+		cmd->valid_flags = I40E_DCB_VALID;
+		cmd->command = I40E_AQ_DCB_SET_AGENT;
+	}
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	return status;
@@ -5236,7 +5275,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
 }
 
 /**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
@@ -5246,7 +5285,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				   u16 buff_size, u32 track_id,
 				   u32 *error_offset, u32 *error_info,
 				   struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5294,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 	struct i40e_aqc_write_personalization_profile *cmd =
 		(struct i40e_aqc_write_personalization_profile *)
 		&desc.params.raw;
-	struct i40e_aqc_write_ppp_resp *resp;
+	struct i40e_aqc_write_ddp_resp *resp;
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5310,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
-		resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
 		if (error_offset)
 			*error_offset = le32_to_cpu(resp->error_offset);
 		if (error_info)
@@ -5282,14 +5321,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 }
 
 /**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				      u16 buff_size, u8 flags,
 				      struct i40e_asq_cmd_details *cmd_details)
 {
@@ -5364,11 +5403,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	u32 offset = 0, info = 0;
 	u32 i;
 
-	if (!track_id) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
-		return I40E_NOT_SUPPORTED;
-	}
-
 	dev_cnt = profile->device_table_count;
 
 	for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5412,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 				break;
 	}
 	if (i == dev_cnt) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 	}
 
@@ -5397,7 +5431,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 			sizeof(struct i40e_profile_section_header);
 
 		/* Write profile */
-		status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
 					   track_id, &offset, &info, NULL);
 		if (status) {
 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5473,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
 					     sec->section.offset);
 	pinfo->track_id = track_id;
 	pinfo->version = profile->version;
-	pinfo->op = I40E_PPP_ADD_TRACKID;
-	memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+	pinfo->op = I40E_DDP_ADD_TRACKID;
+	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
 
-	status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
 				   track_id, &offset, &info, NULL);
 
 	return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5f6cf7212d4f..2f5bee713fef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
 	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
 	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
 	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+	I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+	I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+	I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+	I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
 	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
 	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
 	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
@@ -229,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
 	I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
 	I40E_PRIV_FLAG("disable-source-pruning",
 		       I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
+	I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
 };
 
 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1585,6 +1590,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 			 */
 			rx_rings[i].desc = NULL;
 			rx_rings[i].rx_bi = NULL;
+			/* Clear cloned XDP RX-queue info before setup call */
+			memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
 			/* this is to allow wr32 to have something to write to
 			 * during early allocation of Rx buffers
 			 */
@@ -2299,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
 				   struct ethtool_coalesce *ec,
 				   int queue)
 {
+	struct i40e_ring *rx_ring = vsi->rx_rings[queue];
+	struct i40e_ring *tx_ring = vsi->tx_rings[queue];
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_q_vector *q_vector;
@@ -2306,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
 
 	intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
 
-	vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
-	vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+	rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+	tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
 
 	if (ec->use_adaptive_rx_coalesce)
-		vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+		rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
 	else
-		vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+		rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
 	if (ec->use_adaptive_tx_coalesce)
-		vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+		tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
 	else
-		vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+		tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
-	q_vector = vsi->rx_rings[queue]->q_vector;
-	q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
+	q_vector = rx_ring->q_vector;
+	q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
 	vector = vsi->base_vector + q_vector->v_idx;
 	wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
 
-	q_vector = vsi->tx_rings[queue]->q_vector;
-	q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
+	q_vector = tx_ring->q_vector;
+	q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
 	vector = vsi->base_vector + q_vector->v_idx;
 	wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
 
@@ -2740,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
 
 no_input_set:
 	if (input_set & I40E_L3_SRC_MASK)
-		fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+		fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
 
 	if (input_set & I40E_L3_DST_MASK)
-		fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+		fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
 
 	if (input_set & I40E_L4_SRC_MASK)
-		fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+		fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
 
 	if (input_set & I40E_L4_DST_MASK)
-		fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
+		fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
 
 	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -3800,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
 
 	i40e_write_fd_input_set(pf, index, new_mask);
 
+	/* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
+	 * frames. If we're programming the input set for IPv4/Other, we also
+	 * need to program the IPv4/Fragmented input set. Since we don't have
+	 * separate support, we'll always assume and enforce that the two flow
+	 * types must have matching input sets.
+	 */
+	if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
+		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+					new_mask);
+
 	/* Add the new offset and update table, if necessary */
 	if (new_flex_offset) {
 		err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
@@ -3822,6 +3841,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
 }
 
 /**
+ * i40e_match_fdir_filter - Return true of two filters match
+ * @a: pointer to filter struct
+ * @b: pointer to filter struct
+ *
+ * Returns true if the two filters match exactly the same criteria. I.e. they
+ * match the same flow type and have the same parameters. We don't need to
+ * check any input-set since all filters of the same flow type must use the
+ * same input set.
+ **/
+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
+				   struct i40e_fdir_filter *b)
+{
+	/* The filters do not much if any of these criteria differ. */
+	if (a->dst_ip != b->dst_ip ||
+	    a->src_ip != b->src_ip ||
+	    a->dst_port != b->dst_port ||
+	    a->src_port != b->src_port ||
+	    a->flow_type != b->flow_type ||
+	    a->ip4_proto != b->ip4_proto)
+		return false;
+
+	return true;
+}
+
+/**
+ * i40e_disallow_matching_filters - Check that new filters differ
+ * @vsi: pointer to the targeted VSI
+ * @input: new filter to check
+ *
+ * Due to hardware limitations, it is not possible for two filters that match
+ * similar criteria to be programmed at the same time. This is true for a few
+ * reasons:
+ *
+ * (a) all filters matching a particular flow type must use the same input
+ * set, that is they must match the same criteria.
+ * (b) different flow types will never match the same packet, as the flow type
+ * is decided by hardware before checking which rules apply.
+ * (c) hardware has no way to distinguish which order filters apply in.
+ *
+ * Due to this, we can't really support using the location data to order
+ * filters in the hardware parsing. It is technically possible for the user to
+ * request two filters matching the same criteria but which select different
+ * queues. In this case, rather than keep both filters in the list, we reject
+ * the 2nd filter when the user requests adding it.
+ *
+ * This avoids needing to track location for programming the filter to
+ * hardware, and ensures that we avoid some strange scenarios involving
+ * deleting filters which match the same criteria.
+ **/
+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
+					  struct i40e_fdir_filter *input)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_fdir_filter *rule;
+	struct hlist_node *node2;
+
+	/* Loop through every filter, and check that it doesn't match */
+	hlist_for_each_entry_safe(rule, node2,
+				  &pf->fdir_filter_list, fdir_node) {
+		/* Don't check the filters match if they share the same fd_id,
+		 * since the new filter is actually just updating the target
+		 * of the old filter.
+		 */
+		if (rule->fd_id == input->fd_id)
+			continue;
+
+		/* If any filters match, then print a warning message to the
+		 * kernel message buffer and bail out.
+		 */
+		if (i40e_match_fdir_filter(rule, input)) {
+			dev_warn(&pf->pdev->dev,
+				 "Existing user defined filter %d already matches this flow.\n",
+				 rule->fd_id);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
  * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
  * @vsi: pointer to the targeted VSI
  * @cmd: command to get or set RX flow classification rules
@@ -3933,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 		input->flex_offset = userdef.flex_offset;
 	}
 
-	ret = i40e_add_del_fdir(vsi, input, true);
+	/* Avoid programming two filters with identical match criteria. */
+	ret = i40e_disallow_matching_filters(vsi, input);
 	if (ret)
-		goto free_input;
+		goto free_filter_memory;
 
 	/* Add the input filter to the fdir_input_list, possibly replacing
 	 * a previous filter. Do not free the input structure after adding it
 	 * to the list as this would cause a use-after-free bug.
 	 */
 	i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
-
+	ret = i40e_add_del_fdir(vsi, input, true);
+	if (ret)
+		goto remove_sw_rule;
 	return 0;
 
-free_input:
+remove_sw_rule:
+	hlist_del(&input->fdir_node);
+	pf->fdir_pf_active_filters--;
+free_filter_memory:
 	kfree(input);
 	return ret;
 }
@@ -4258,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
-	u32 orig_flags, new_flags, changed_flags;
+	u64 orig_flags, new_flags, changed_flags;
 	u32 i, j;
 
 	orig_flags = READ_ONCE(pf->flags);
@@ -4309,13 +4415,32 @@ flags_complete:
 	    !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
 		return -EOPNOTSUPP;
 
+	/* Disable FW LLDP not supported if NPAR active or if FW
+	 * API version < 1.7
+	 */
+	if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+		if (pf->hw.func_caps.npar_enable) {
+			dev_warn(&pf->pdev->dev,
+				 "Unable to stop FW LLDP if NPAR active\n");
+			return -EOPNOTSUPP;
+		}
+
+		if (pf->hw.aq.api_maj_ver < 1 ||
+		    (pf->hw.aq.api_maj_ver == 1 &&
+		     pf->hw.aq.api_min_ver < 7)) {
+			dev_warn(&pf->pdev->dev,
+				 "FW ver does not support stopping FW LLDP\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
 	/* Compare and exchange the new flags into place. If we failed, that
 	 * is if cmpxchg returns anything but the old value, this means that
 	 * something else has modified the flags variable since we copied it
 	 * originally. We'll just punt with an error and log something in the
 	 * message buffer.
 	 */
-	if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
+	if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
 		dev_warn(&pf->pdev->dev,
 			 "Unable to update pf->flags as it was modified by another thread...\n");
 		return -EAGAIN;
@@ -4354,12 +4479,37 @@ flags_complete:
 		}
 	}
 
+	if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+		if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+			struct i40e_dcbx_config *dcbcfg;
+			int i;
+
+			i40e_aq_stop_lldp(&pf->hw, true, NULL);
+			i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+			/* reset local_dcbx_config to default */
+			dcbcfg = &pf->hw.local_dcbx_config;
+			dcbcfg->etscfg.willing = 1;
+			dcbcfg->etscfg.maxtcs = 0;
+			dcbcfg->etscfg.tcbwtable[0] = 100;
+			for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
+				dcbcfg->etscfg.tcbwtable[i] = 0;
+			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+				dcbcfg->etscfg.prioritytable[i] = 0;
+			dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+			dcbcfg->pfc.willing = 1;
+			dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+		} else {
+			i40e_aq_start_lldp(&pf->hw, NULL);
+		}
+	}
+
 	/* Issue reset to cause things to take effect, as additional bits
 	 * are added we will need to create a mask of bits requiring reset
 	 */
 	if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
 			     I40E_FLAG_LEGACY_RX |
-			     I40E_FLAG_SOURCE_PRUNING_DISABLED))
+			     I40E_FLAG_SOURCE_PRUNING_DISABLED |
+			     I40E_FLAG_DISABLE_FW_LLDP))
 		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index af792112a2d3..f95ce9b5e4fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -1818,6 +1818,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 	num_tc_qps = qcount / numtc;
 	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
 
+	/* Do not allow use more TC queue pairs than MSI-X vectors exist */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
+
 	/* Setup queue offset/count for all TCs for given VSI */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		/* See if the given TC is enabled for the given VSI */
@@ -4122,6 +4126,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
 
 		q_vector->num_ringpairs = num_ringpairs;
+		q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
 
 		q_vector->rx.count = 0;
 		q_vector->tx.count = 0;
@@ -4877,104 +4882,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
 #endif
 
 /**
- * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
- * @q_idx: TX queue number
- * @vsi: Pointer to VSI struct
- *
- * This function checks specified queue for given VSI. Detects hung condition.
- * We proactively detect hung TX queues by checking if interrupts are disabled
- * but there are pending descriptors.  If it appears hung, attempt to recover
- * by triggering a SW interrupt.
- **/
-static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
-{
-	struct i40e_ring *tx_ring = NULL;
-	struct i40e_pf	*pf;
-	u32 val, tx_pending;
-	int i;
-
-	pf = vsi->back;
-
-	/* now that we have an index, find the tx_ring struct */
-	for (i = 0; i < vsi->num_queue_pairs; i++) {
-		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
-			if (q_idx == vsi->tx_rings[i]->queue_index) {
-				tx_ring = vsi->tx_rings[i];
-				break;
-			}
-		}
-	}
-
-	if (!tx_ring)
-		return;
-
-	/* Read interrupt register */
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
-		val = rd32(&pf->hw,
-			   I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
-					       tx_ring->vsi->base_vector - 1));
-	else
-		val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
-
-	tx_pending = i40e_get_tx_pending(tx_ring);
-
-	/* Interrupts are disabled and TX pending is non-zero,
-	 * trigger the SW interrupt (don't wait). Worst case
-	 * there will be one extra interrupt which may result
-	 * into not cleaning any queues because queues are cleaned.
-	 */
-	if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
-		i40e_force_wb(vsi, tx_ring->q_vector);
-}
-
-/**
- * i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @pf:  pointer to PF struct
- *
- * LAN VSI has netdev and netdev has TX queues. This function is to check
- * each of those TX queues if they are hung, trigger recovery by issuing
- * SW interrupt.
- **/
-static void i40e_detect_recover_hung(struct i40e_pf *pf)
-{
-	struct net_device *netdev;
-	struct i40e_vsi *vsi;
-	unsigned int i;
-
-	/* Only for LAN VSI */
-	vsi = pf->vsi[pf->lan_vsi];
-
-	if (!vsi)
-		return;
-
-	/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
-	if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
-	    test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
-		return;
-
-	/* Make sure type is MAIN VSI */
-	if (vsi->type != I40E_VSI_MAIN)
-		return;
-
-	netdev = vsi->netdev;
-	if (!netdev)
-		return;
-
-	/* Bail out if netif_carrier is not OK */
-	if (!netif_carrier_ok(netdev))
-		return;
-
-	/* Go thru' TX queues for netdev */
-	for (i = 0; i < netdev->num_tx_queues; i++) {
-		struct netdev_queue *q;
-
-		q = netdev_get_tx_queue(netdev, i);
-		if (q)
-			i40e_detect_recover_hung_queue(i, vsi);
-	}
-}
-
-/**
  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
  * @pf: pointer to PF
  *
@@ -5342,6 +5249,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 {
 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
 	struct i40e_vsi_context ctxt;
 	int ret = 0;
 	int i;
@@ -5359,10 +5268,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+
+		dev_info(&pf->pdev->dev,
 			 "Failed configuring TC map %d for VSI %d\n",
 			 enabled_tc, vsi->seid);
-		goto out;
+		ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
+						  &bw_config, NULL);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Failed querying vsi bw info, err %s aq_err %s\n",
+				 i40e_stat_str(hw, ret),
+				 i40e_aq_str(hw, hw->aq.asq_last_status));
+			goto out;
+		}
+		if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
+			u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
+
+			if (!valid_tc)
+				valid_tc = bw_config.tc_valid_bits;
+			/* Always enable TC0, no matter what */
+			valid_tc |= 1;
+			dev_info(&pf->pdev->dev,
+				 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
+				 enabled_tc, bw_config.tc_valid_bits, valid_tc);
+			enabled_tc = valid_tc;
+		}
+
+		ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+		if (ret) {
+			dev_err(&pf->pdev->dev,
+				"Unable to  configure TC map %d for VSI %d\n",
+				enabled_tc, vsi->seid);
+			goto out;
+		}
 	}
 
 	/* Update Queue Pairs Mapping for currently enabled UPs */
@@ -5402,13 +5341,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 	/* Update the VSI after updating the VSI queue-mapping
 	 * information
 	 */
-	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		dev_info(&pf->pdev->dev,
 			 "Update vsi tc config failed, err %s aq_err %s\n",
-			 i40e_stat_str(&vsi->back->hw, ret),
-			 i40e_aq_str(&vsi->back->hw,
-				     vsi->back->hw.aq.asq_last_status));
+			 i40e_stat_str(hw, ret),
+			 i40e_aq_str(hw, hw->aq.asq_last_status));
 		goto out;
 	}
 	/* update the local VSI info with updated queue map */
@@ -5418,11 +5356,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 	/* Update current VSI BW information */
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		dev_info(&pf->pdev->dev,
 			 "Failed updating vsi bw info, err %s aq_err %s\n",
-			 i40e_stat_str(&vsi->back->hw, ret),
-			 i40e_aq_str(&vsi->back->hw,
-				     vsi->back->hw.aq.asq_last_status));
+			 i40e_stat_str(hw, ret),
+			 i40e_aq_str(hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -6388,8 +6325,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 	struct i40e_hw *hw = &pf->hw;
 	int err = 0;
 
-	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
-	if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
+	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
+	 * Also do not enable DCBx if FW LLDP agent is disabled
+	 */
+	if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
+	    (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
 		goto out;
 
 	/* Get the initial DCB configuration */
@@ -6416,6 +6356,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 			dev_dbg(&pf->pdev->dev,
 				"DCBX offload is supported for this PF.\n");
 		}
+	} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+		dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
+		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
 	} else {
 		dev_info(&pf->pdev->dev,
 			 "Query for DCB configuration failed, err %s aq_err %s\n",
@@ -7505,11 +7448,6 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
 {
 	struct i40e_vsi *vsi = np->vsi;
 
-	if (!tc_can_offload(vsi->netdev))
-		return -EOPNOTSUPP;
-	if (cls_flower->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_flower->command) {
 	case TC_CLSFLOWER_REPLACE:
 		return i40e_configure_clsflower(vsi, cls_flower);
@@ -7527,6 +7465,9 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 {
 	struct i40e_netdev_priv *np = cb_priv;
 
+	if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
+		return -EOPNOTSUPP;
+
 	switch (type) {
 	case TC_SETUP_CLSFLOWER:
 		return i40e_setup_tc_cls_flower(np, type_data);
@@ -7744,6 +7685,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
 	/* Reprogram the default input set for Other/IPv4 */
 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
 }
 
 /**
@@ -9077,6 +9021,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
 				 vsi->uplink_seid);
 			return ret;
 		}
+		/* Reconfigure TX queues using QTX_CTL register */
+		ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "failed to configure TX rings for channel %u\n",
+				 ch->seid);
+			return ret;
+		}
+		/* update 'next_base_queue' */
+		vsi->next_base_queue = vsi->next_base_queue +
+							ch->num_queue_pairs;
 		if (ch->max_tx_rate) {
 			u64 credits = ch->max_tx_rate;
 
@@ -9280,6 +9235,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
 		goto end_core_reset;
 	}
 
+	/* Enable FW to write a default DCB config on link-up */
+	i40e_aq_set_dcb_parameters(hw, true, NULL);
+
 #ifdef CONFIG_I40E_DCB
 	ret = i40e_init_pf_dcb(pf);
 	if (ret) {
@@ -9697,7 +9655,7 @@ static void i40e_service_task(struct work_struct *work)
 	if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
 		return;
 
-	i40e_detect_recover_hung(pf);
+	i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
 	i40e_sync_filters_subtask(pf);
 	i40e_reset_subtask(pf);
 	i40e_handle_mdd_event(pf);
@@ -10464,10 +10422,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
 	/* set up vector assignment tracking */
 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
-	if (!pf->irq_pile) {
-		dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+	if (!pf->irq_pile)
 		return -ENOMEM;
-	}
+
 	pf->irq_pile->num_entries = vectors;
 	pf->irq_pile->search_hint = 0;
 
@@ -10785,8 +10742,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
 	/* Determine the RSS size of the VSI */
 	if (!vsi->rss_size) {
 		u16 qcount;
-
-		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+		/* If the firmware does something weird during VSI init, we
+		 * could end up with zero TCs. Check for that to avoid
+		 * divide-by-zero. It probably won't pass traffic, but it also
+		 * won't panic.
+		 */
+		qcount = vsi->num_queue_pairs /
+			 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
 	}
 	if (!vsi->rss_size)
@@ -10974,7 +10936,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
 	ret = i40e_aq_update_nvm(&pf->hw,
 				 I40E_SR_NVM_CONTROL_WORD,
 				 0x10, sizeof(nvm_word),
-				 &nvm_word, true, NULL);
+				 &nvm_word, true, 0, NULL);
 	/* Save off last admin queue command status before releasing
 	 * the NVM
 	 */
@@ -11115,13 +11077,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
 	    pf->hw.aq.fw_maj_ver >= 6)
 		pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
 
-	if (pf->hw.func_caps.vmdq) {
+	if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
 		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
 	}
 
-	if (pf->hw.func_caps.iwarp) {
+	if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
 		pf->flags |= I40E_FLAG_IWARP_ENABLED;
 		/* IWARP needs one extra vector for CQP just like MISC.*/
 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
@@ -13598,6 +13560,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	pci_set_drvdata(pdev, pf);
 	pci_save_state(pdev);
+
+	/* Enable FW to write default DCB config on link-up */
+	i40e_aq_set_dcb_parameters(hw, true, NULL);
+
 #ifdef CONFIG_I40E_DCB
 	err = i40e_init_pf_dcb(pf);
 	if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2ee0d46..76a5cb04e4fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -239,8 +239,9 @@ read_nvm_exit:
  *
  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  **/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
-				    u32 offset, u16 words, void *data,
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+				    u8 module_pointer, u32 offset,
+				    u16 words, void *data,
 				    bool last_command)
 {
 	i40e_status ret_code = I40E_ERR_NVM;
@@ -389,7 +390,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
 					   u16 *words, u16 *data)
 {
 	i40e_status ret_code;
-	u16 read_size = *words;
+	u16 read_size;
 	bool last_cmd = false;
 	u16 words_read = 0;
 	u16 i = 0;
@@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
 					      2 * offset,  /*bytes*/
 					      2 * words,   /*bytes*/
-					      data, last_command, &cmd_details);
+					      data, last_command, 0,
+					      &cmd_details);
 
 	return ret_code;
 }
@@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 					     struct i40e_nvm_access *cmd,
 					     u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+					    struct i40e_nvm_access *cmd,
+					    u8 *bytes, int *perrno);
 static inline u8 i40e_nvmupd_get_module(u32 val)
 {
 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 }
 
+static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+	return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+		    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
 static const char * const i40e_nvm_update_state_str[] = {
 	"I40E_NVMUPD_INVALID",
 	"I40E_NVMUPD_READ_CON",
@@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = {
 	"I40E_NVMUPD_STATUS",
 	"I40E_NVMUPD_EXEC_AQ",
 	"I40E_NVMUPD_GET_AQ_RESULT",
+	"I40E_NVMUPD_GET_AQ_EVENT",
 };
 
 /**
@@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 		 * the wait info and return before doing anything else
 		 */
 		if (cmd->offset == 0xffff) {
-			i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+			i40e_nvmupd_clear_wait_state(hw);
 			status = 0;
-			goto exit;
+			break;
 		}
 
 		status = I40E_ERR_NOT_READY;
@@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 		*perrno = -ESRCH;
 		break;
 	}
-exit:
+
 	mutex_unlock(&hw->aq.arq_mutex);
 	return status;
 }
@@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
 		break;
 
+	case I40E_NVMUPD_GET_AQ_EVENT:
+		status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+		break;
+
 	default:
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "NVMUPD: bad cmd %s in init state\n",
@@ -1118,38 +1134,53 @@ retry:
 }
 
 /**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
  * @hw: pointer to the hardware structure
- * @opcode: the event that just happened
  **/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
 {
-	if (opcode == hw->nvm_wait_opcode) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
-		if (hw->nvm_release_on_done) {
-			i40e_release_nvm(hw);
-			hw->nvm_release_on_done = false;
-		}
-		hw->nvm_wait_opcode = 0;
+	i40e_debug(hw, I40E_DEBUG_NVM,
+		   "NVMUPD: clearing wait on opcode 0x%04x\n",
+		   hw->nvm_wait_opcode);
 
-		if (hw->aq.arq_last_status) {
-			hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
-			return;
-		}
+	if (hw->nvm_release_on_done) {
+		i40e_release_nvm(hw);
+		hw->nvm_release_on_done = false;
+	}
+	hw->nvm_wait_opcode = 0;
 
-		switch (hw->nvmupd_state) {
-		case I40E_NVMUPD_STATE_INIT_WAIT:
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-			break;
+	if (hw->aq.arq_last_status) {
+		hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+		return;
+	}
 
-		case I40E_NVMUPD_STATE_WRITE_WAIT:
-			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
-			break;
+	switch (hw->nvmupd_state) {
+	case I40E_NVMUPD_STATE_INIT_WAIT:
+		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		break;
 
-		default:
-			break;
-		}
+	case I40E_NVMUPD_STATE_WRITE_WAIT:
+		hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+		break;
+
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+				  struct i40e_aq_desc *desc)
+{
+	u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+	if (opcode == hw->nvm_wait_opcode) {
+		memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+		i40e_nvmupd_clear_wait_state(hw);
 	}
 }
 
@@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
 			else if (module == 0)
 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
 			break;
+		case I40E_NVM_AQE:
+			upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+			break;
 		}
 		break;
 
@@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 	u32 aq_data_len;
 
 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+	if (cmd->offset == 0xffff)
+		return 0;
+
 	memset(&cmd_details, 0, sizeof(cmd_details));
 	cmd_details.wb_desc = &hw->nvm_wb_desc;
 
@@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 		}
 	}
 
+	if (cmd->offset)
+		memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
 	/* and away we go! */
 	status = i40e_asq_send_command(hw, aq_desc, buff,
 				       buff_size, &cmd_details);
@@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 			   i40e_stat_str(hw, status),
 			   i40e_aq_str(hw, hw->aq.asq_last_status));
 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+		return status;
 	}
 
 	/* should we wait for a followup event? */
@@ -1392,6 +1433,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 }
 
 /**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+					    struct i40e_nvm_access *cmd,
+					    u8 *bytes, int *perrno)
+{
+	u32 aq_total_len;
+	u32 aq_desc_len;
+
+	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+	aq_desc_len = sizeof(struct i40e_aq_desc);
+	aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+	/* check copylength range */
+	if (cmd->data_size > aq_total_len) {
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "%s: copy length %d too big, trimming to %d\n",
+			   __func__, cmd->data_size, aq_total_len);
+		cmd->data_size = aq_total_len;
+	}
+
+	memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+	return 0;
+}
+
+/**
  * i40e_nvmupd_nvm_read - Read NVM
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
@@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
 	i40e_status status = 0;
 	struct i40e_asq_cmd_details cmd_details;
 	u8 module, transaction;
+	u8 preservation_flags;
 	bool last;
 
 	transaction = i40e_nvmupd_get_transaction(cmd->config);
 	module = i40e_nvmupd_get_module(cmd->config);
 	last = (transaction & I40E_NVM_LCB);
+	preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
 
 	memset(&cmd_details, 0, sizeof(cmd_details));
 	cmd_details.wb_desc = &hw->nvm_wb_desc;
 
 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
 				    (u16)cmd->data_size, bytes, last,
-				    &cmd_details);
+				    preservation_flags, &cmd_details);
 	if (status) {
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659db822..83798b7841b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 				u32 offset, u16 length, void *data,
-				bool last_command,
+				bool last_command, u8 preservation_flags,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
 				u8 mib_type, void *buff, u16 buff_size,
@@ -225,6 +225,10 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+				       bool dcb_enable,
+				       struct i40e_asq_cmd_details
+				       *cmd_details);
 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -333,7 +337,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 				struct i40e_nvm_access *cmd,
 				u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+				  struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -343,6 +349,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
 	return i40e_ptype_lookup[ptype];
 }
 
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+	switch (link_speed) {
+	case I40E_LINK_SPEED_100MB:
+		return VIRTCHNL_LINK_SPEED_100MB;
+	case I40E_LINK_SPEED_1GB:
+		return VIRTCHNL_LINK_SPEED_1GB;
+	case I40E_LINK_SPEED_10GB:
+		return VIRTCHNL_LINK_SPEED_10GB;
+	case I40E_LINK_SPEED_40GB:
+		return VIRTCHNL_LINK_SPEED_40GB;
+	case I40E_LINK_SPEED_20GB:
+		return VIRTCHNL_LINK_SPEED_20GB;
+	case I40E_LINK_SPEED_25GB:
+		return VIRTCHNL_LINK_SPEED_25GB;
+	case I40E_LINK_SPEED_UNKNOWN:
+	default:
+		return VIRTCHNL_LINK_SPEED_UNKNOWN;
+	}
+}
+
 /* prototype for functions used for SW locks */
 
 /* i40e_common for VF drivers*/
@@ -400,13 +437,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
 				    u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
 			      u16 buff_size, u32 track_id,
 			      u32 *error_offset, u32 *error_info,
-			      struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+			      struct i40e_asq_cmd_details *
+			      cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				 u16 buff_size, u8 flags,
-				 struct i40e_asq_cmd_details *cmd_details);
+				 struct i40e_asq_cmd_details *
+				 cmd_details);
 struct i40e_generic_seg_header *
 i40e_find_segment_in_package(u32 segment_type,
 			     struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5f9cac55aa55..afb72e711d43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
 	I40E_ERR_NOT_READY			= -63,
 	I40E_NOT_SUPPORTED			= -64,
 	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+	I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR	= -66,
 };
 
 #endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 5bc2748ac468..e554aa6cf070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
 #include <linux/prefetch.h>
 #include <net/busy_poll.h>
 #include <linux/bpf_trace.h>
+#include <net/xdp.h>
 #include "i40e.h"
 #include "i40e_trace.h"
 #include "i40e_prototype.h"
@@ -725,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
 	return 0;
 }
 
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi:  pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+{
+	struct i40e_ring *tx_ring = NULL;
+	struct net_device *netdev;
+	unsigned int i;
+	int packets;
+
+	if (!vsi)
+		return;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		return;
+
+	netdev = vsi->netdev;
+	if (!netdev)
+		return;
+
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		tx_ring = vsi->tx_rings[i];
+		if (tx_ring && tx_ring->desc) {
+			/* If packet counter has not changed the queue is
+			 * likely stalled, so force an interrupt for this
+			 * queue.
+			 *
+			 * prev_pkt_ctr would be negative if there was no
+			 * pending work.
+			 */
+			packets = tx_ring->stats.packets & INT_MAX;
+			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+				i40e_force_wb(vsi, tx_ring->q_vector);
+				continue;
+			}
+
+			/* Memory barrier between read of packet count and call
+			 * to i40e_get_tx_pending()
+			 */
+			smp_rmb();
+			tx_ring->tx_stats.prev_pkt_ctr =
+			    i40e_get_tx_pending(tx_ring) ? packets : -1;
+		}
+	}
+}
+
 #define WB_STRIDE 4
 
 /**
@@ -902,7 +956,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
 		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
 
 		wr32(&vsi->back->hw,
-		     I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
 		     val);
 	} else {
 		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
@@ -929,8 +983,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 			  /* allow 00 to be written to the index */
 
 		wr32(&vsi->back->hw,
-		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
-					 vsi->base_vector - 1), val);
+		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
 	} else {
 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
@@ -1162,6 +1215,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
+	tx_ring->tx_stats.prev_pkt_ctr = -1;
 	return 0;
 
 err:
@@ -1236,6 +1290,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 {
 	i40e_clean_rx_ring(rx_ring);
+	if (rx_ring->vsi->type == I40E_VSI_MAIN)
+		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 	rx_ring->xdp_prog = NULL;
 	kfree(rx_ring->rx_bi);
 	rx_ring->rx_bi = NULL;
@@ -1256,6 +1312,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
 	struct device *dev = rx_ring->dev;
+	int err = -ENOMEM;
 	int bi_size;
 
 	/* warn if we are about to overwrite the pointer */
@@ -1283,13 +1340,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
+	/* XDP RX-queue info only needed for RX rings exposed to XDP */
+	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+				       rx_ring->queue_index);
+		if (err < 0)
+			goto err;
+	}
+
 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
 	return 0;
 err:
 	kfree(rx_ring->rx_bi);
 	rx_ring->rx_bi = NULL;
-	return -ENOMEM;
+	return err;
 }
 
 /**
@@ -2068,11 +2133,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 	struct sk_buff *skb = rx_ring->skb;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
 	bool failure = false, xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.rxq = &rx_ring->xdp_rxq;
 
 	while (likely(total_rx_packets < (unsigned int)budget)) {
 		struct i40e_rx_buffer *rx_buffer;
 		union i40e_rx_desc *rx_desc;
-		struct xdp_buff xdp;
 		unsigned int size;
 		u16 vlan_tag;
 		u8 rx_ptype;
@@ -2243,7 +2310,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 	struct i40e_hw *hw = &vsi->back->hw;
 	bool rx = false, tx = false;
 	u32 rxval, txval;
-	int vector;
 	int idx = q_vector->v_idx;
 	int rx_itr_setting, tx_itr_setting;
 
@@ -2253,8 +2319,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 		return;
 	}
 
-	vector = (q_vector->v_idx + vsi->base_vector);
-
 	/* avoid dynamic calculation if in countdown mode OR if
 	 * all dynamic is disabled
 	 */
@@ -2303,12 +2367,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 		 */
 		rxval |= BIT(31);
 		/* don't check _DOWN because interrupt isn't being enabled */
-		wr32(hw, INTREG(vector - 1), rxval);
+		wr32(hw, INTREG(q_vector->reg_idx), rxval);
 	}
 
 enable_int:
 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
-		wr32(hw, INTREG(vector - 1), txval);
+		wr32(hw, INTREG(q_vector->reg_idx), txval);
 
 	if (q_vector->itr_countdown)
 		q_vector->itr_countdown--;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index fbae1182e2ea..701b708628b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,6 +27,8 @@
 #ifndef _I40E_TXRX_H_
 #define _I40E_TXRX_H_
 
+#include <net/xdp.h>
+
 /* Interrupt Throttling and Rate Limiting Goodies */
 
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
@@ -277,7 +279,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
 }
 
 /* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
 #define I40E_MIN_DESC_PENDING	4
 
 #define I40E_TX_FLAGS_HW_VLAN		BIT(1)
@@ -331,6 +333,7 @@ struct i40e_tx_queue_stats {
 	u64 tx_done_old;
 	u64 tx_linearize;
 	u64 tx_force_wb;
+	int prev_pkt_ctr;
 };
 
 struct i40e_rx_queue_stats {
@@ -428,6 +431,7 @@ struct i40e_ring {
 					 */
 
 	struct i40e_channel *ch;
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_internodealigned_in_smp;
 
 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -498,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
 u32 i40e_get_tx_pending(struct i40e_ring *ring);
+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 bool __i40e_chk_linearize(struct sk_buff *skb);
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e8568719b4e..cd294e6a8587 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd {
 	I40E_NVMUPD_STATUS,
 	I40E_NVMUPD_EXEC_AQ,
 	I40E_NVMUPD_GET_AQ_RESULT,
+	I40E_NVMUPD_GET_AQ_EVENT,
 };
 
 enum i40e_nvmupd_state {
@@ -421,15 +422,21 @@ enum i40e_nvmupd_state {
 
 #define I40E_NVM_MOD_PNT_MASK 0xFF
 
-#define I40E_NVM_TRANS_SHIFT	8
-#define I40E_NVM_TRANS_MASK	(0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON		0x0
-#define I40E_NVM_SNT		0x1
-#define I40E_NVM_LCB		0x2
-#define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA		0x4
-#define I40E_NVM_CSUM		0x8
-#define I40E_NVM_EXEC		0xf
+#define I40E_NVM_TRANS_SHIFT			8
+#define I40E_NVM_TRANS_MASK			(0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT	12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+				(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED	0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL		0x02
+#define I40E_NVM_CON				0x0
+#define I40E_NVM_SNT				0x1
+#define I40E_NVM_LCB				0x2
+#define I40E_NVM_SA				(I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA				0x4
+#define I40E_NVM_CSUM				0x8
+#define I40E_NVM_AQE				0xe
+#define I40E_NVM_EXEC				0xf
 
 #define I40E_NVM_ADAPT_SHIFT	16
 #define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -611,6 +618,7 @@ struct i40e_hw {
 	/* state of nvm update process */
 	enum i40e_nvmupd_state nvmupd_state;
 	struct i40e_aq_desc nvm_wb_desc;
+	struct i40e_aq_desc nvm_aq_event_desc;
 	struct i40e_virt_mem nvm_buff;
 	bool nvm_release_on_done;
 	u16 nvm_wait_opcode;
@@ -1502,19 +1510,19 @@ struct i40e_lldp_variables {
 #define I40E_FLEX_57_SHIFT		6
 #define I40E_FLEX_57_MASK		(0x1ULL << I40E_FLEX_57_SHIFT)
 
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
 	u8 major;
 	u8 minor;
 	u8 update;
 	u8 draft;
 };
 
-#define I40E_PPP_NAME_SIZE	32
+#define I40E_DDP_NAME_SIZE	32
 
 /* Package header */
 struct i40e_package_header {
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 segment_count;
 	u32 segment_offset[1];
 };
@@ -1526,16 +1534,16 @@ struct i40e_generic_seg_header {
 #define SEGMENT_TYPE_I40E	0x00000011
 #define SEGMENT_TYPE_X722	0x00000012
 	u32 type;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 size;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_metadata_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 track_id;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_device_id_entry {
@@ -1545,8 +1553,8 @@ struct i40e_device_id_entry {
 
 struct i40e_profile_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
-	char name[I40E_PPP_NAME_SIZE];
+	struct i40e_ddp_version version;
+	char name[I40E_DDP_NAME_SIZE];
 	u32 device_table_count;
 	struct i40e_device_id_entry device_table[1];
 };
@@ -1573,11 +1581,11 @@ struct i40e_profile_section_header {
 
 struct i40e_profile_info {
 	u32 track_id;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u8 op;
-#define I40E_PPP_ADD_TRACKID		0x01
-#define I40E_PPP_REMOVE_TRACKID	0x02
+#define I40E_DDP_ADD_TRACKID		0x01
+#define I40E_DDP_REMOVE_TRACKID	0x02
 	u8 reserved[7];
-	u8 name[I40E_PPP_NAME_SIZE];
+	u8 name[I40E_DDP_NAME_SIZE];
 };
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e068e85..e9309fb9084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
 	if (vf->link_forced) {
 		pfe.event_data.link_event.link_status = vf->link_up;
 		pfe.event_data.link_event.link_speed =
-			(vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+			(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
 	} else {
 		pfe.event_data.link_event.link_status =
 			ls->link_info & I40E_AQ_LINK_UP;
 		pfe.event_data.link_event.link_speed =
-			(enum virtchnl_link_speed)ls->link_speed;
+			i40e_virtchnl_link_speed(ls->link_speed);
 	}
 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
 		break;
 	case VIRTCHNL_OP_GET_VF_RESOURCES:
 		ret = i40e_vc_get_vf_resources_msg(vf, msg);
+		i40e_vc_notify_vf_link_state(vf);
 		break;
 	case VIRTCHNL_OP_RESET_VF:
 		i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 8b0d4b255dea..d1aab6b8bfb1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -837,10 +837,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
 	    (!details->async && !details->postpone)) {
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Writeback timeout.\n");
-		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: AQ Critical error.\n");
+			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+		} else {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Writeback timeout.\n");
+			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		}
 	}
 
 asq_send_command_error:
@@ -901,7 +906,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
 	}
 
 	/* set next_to_use to head */
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+	ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 	if (ntu == ntc) {
 		/* nothing to do - shouldn't need to update ring's values */
 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b04572c518..815de8d9c3fb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_add_mirror_rule	= 0x0260,
 	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
-	/* Pipeline Personalization Profile */
+	/* Dynamic Device Personalization */
 	i40e_aqc_opc_write_personalization_profile	= 0x0270,
 	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
 
 	/* DCB commands */
 	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
 	i40e_aqc_opc_dcb_updated	= 0x0302,
+	i40e_aqc_opc_set_dcb_parameters = 0x0303,
 
 	/* TX scheduler */
 	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
@@ -1562,7 +1563,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
 struct i40e_aqc_write_personalization_profile {
 	u8      flags;
 	u8      reserved[3];
@@ -1573,7 +1574,7 @@ struct i40e_aqc_write_personalization_profile {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
 
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
 	__le32 error_offset;
 	__le32 error_info;
 	__le32 addr_high;
@@ -1582,8 +1583,8 @@ struct i40e_aqc_write_ppp_resp {
 
 struct i40e_aqc_get_applied_profiles {
 	u8      flags;
-#define I40E_AQC_GET_PPP_GET_CONF	0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF	0x2
+#define I40E_AQC_GET_DDP_GET_CONF	0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF	0x2
 	u8      rsv[3];
 	__le32  reserved;
 	__le32  addr_high;
@@ -2196,8 +2197,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
  */
 struct i40e_aqc_nvm_update {
 	u8	command_flags;
-#define I40E_AQ_NVM_LAST_CMD	0x01
-#define I40E_AQ_NVM_FLASH_ONLY	0x80
+#define I40E_AQ_NVM_LAST_CMD			0x01
+#define I40E_AQ_NVM_FLASH_ONLY			0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT	1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK	0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED	0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL	0x01
 	u8	module_pointer;
 	__le16	length;
 	__le32	offset;
@@ -2457,6 +2462,17 @@ struct i40e_aqc_lldp_start {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+	u8 command;
+#define I40E_AQ_DCB_SET_AGENT	0x1
+#define I40E_DCB_VALID		0x1
+	u8 valid_flags;
+	u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
 /* Apply MIB changes (0x0A07)
  * uses the generic struc as it contains no data
  */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf69b249..67bf5cebb76f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -284,6 +284,8 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 		return "I40E_NOT_SUPPORTED";
 	case I40E_ERR_FIRMWARE_API_VERSION:
 		return "I40E_ERR_FIRMWARE_API_VERSION";
+	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
 	}
 
 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1202,7 +1204,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
 }
 
 /**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
@@ -1212,7 +1214,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				     u16 buff_size, u32 track_id,
 				     u32 *error_offset, u32 *error_info,
 				     struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1223,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
 	struct i40e_aqc_write_personalization_profile *cmd =
 		(struct i40e_aqc_write_personalization_profile *)
 		&desc.params.raw;
-	struct i40e_aqc_write_ppp_resp *resp;
+	struct i40e_aqc_write_ddp_resp *resp;
 	i40e_status status;
 
 	i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1239,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
 
 	status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
-		resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
 		if (error_offset)
 			*error_offset = le32_to_cpu(resp->error_offset);
 		if (error_info)
@@ -1248,16 +1250,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
 }
 
 /**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 					u16 buff_size, u8 flags,
-				      struct i40e_asq_cmd_details *cmd_details)
+				       struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1332,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	u32 offset = 0, info = 0;
 	u32 i;
 
-	if (!track_id) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
-		return I40E_NOT_SUPPORTED;
-	}
-
 	dev_cnt = profile->device_table_count;
 
 	for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1341,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 				break;
 	}
 	if (i == dev_cnt) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 	}
 
@@ -1363,7 +1360,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 			sizeof(struct i40e_profile_section_header);
 
 		/* Write profile */
-		status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+		status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
 					     track_id, &offset, &info, NULL);
 		if (status) {
 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1402,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
 					     sec->section.offset);
 	pinfo->track_id = track_id;
 	pinfo->version = profile->version;
-	pinfo->op = I40E_PPP_ADD_TRACKID;
-	memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+	pinfo->op = I40E_DDP_ADD_TRACKID;
+	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
 
-	status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+	status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
 				     track_id, &offset, &info, NULL);
 	return status;
 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b5994075..47c429931a57 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
 				    u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				u16 buff_size, u32 track_id,
 				u32 *error_offset, u32 *error_info,
-				struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+				struct i40e_asq_cmd_details *
+				cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				   u16 buff_size, u8 flags,
-				   struct i40e_asq_cmd_details *cmd_details);
+				   struct i40e_asq_cmd_details *
+				   cmd_details);
 struct i40e_generic_seg_header *
 i40evf_find_segment_in_package(u32 segment_type,
 			       struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7fa7a41915c1..5b222246e08b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
 	I40E_ERR_NOT_READY			= -63,
 	I40E_NOT_SUPPORTED			= -64,
 	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+	I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR	= -66,
 };
 
 #endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 1ba29bb85b67..357d6051281f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
 	return 0;
 }
 
+/**
+ * i40evf_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi:  pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
+{
+	struct i40e_ring *tx_ring = NULL;
+	struct net_device *netdev;
+	unsigned int i;
+	int packets;
+
+	if (!vsi)
+		return;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		return;
+
+	netdev = vsi->netdev;
+	if (!netdev)
+		return;
+
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	for (i = 0; i < vsi->back->num_active_queues; i++) {
+		tx_ring = &vsi->back->tx_rings[i];
+		if (tx_ring && tx_ring->desc) {
+			/* If packet counter has not changed the queue is
+			 * likely stalled, so force an interrupt for this
+			 * queue.
+			 *
+			 * prev_pkt_ctr would be negative if there was no
+			 * pending work.
+			 */
+			packets = tx_ring->stats.packets & INT_MAX;
+			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+				i40evf_force_wb(vsi, tx_ring->q_vector);
+				continue;
+			}
+
+			/* Memory barrier between read of packet count and call
+			 * to i40evf_get_tx_pending()
+			 */
+			smp_rmb();
+			tx_ring->tx_stats.prev_pkt_ctr =
+			  i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
+		}
+	}
+}
+
 #define WB_STRIDE 4
 
 /**
@@ -316,8 +369,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
 	      I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
 
 	wr32(&vsi->back->hw,
-	     I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
-				  vsi->base_vector - 1), val);
+	     I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
 	q_vector->arm_wb_state = true;
 }
 
@@ -336,7 +388,7 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 		  /* allow 00 to be written to the index */;
 
 	wr32(&vsi->back->hw,
-	     I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+	     I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
 	     val);
 }
 
@@ -469,6 +521,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
+	tx_ring->tx_stats.prev_pkt_ctr = -1;
 	return 0;
 
 err:
@@ -1444,12 +1497,9 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 	struct i40e_hw *hw = &vsi->back->hw;
 	bool rx = false, tx = false;
 	u32 rxval, txval;
-	int vector;
 	int idx = q_vector->v_idx;
 	int rx_itr_setting, tx_itr_setting;
 
-	vector = (q_vector->v_idx + vsi->base_vector);
-
 	/* avoid dynamic calculation if in countdown mode OR if
 	 * all dynamic is disabled
 	 */
@@ -1498,12 +1548,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 		 */
 		rxval |= BIT(31);
 		/* don't check _DOWN because interrupt isn't being enabled */
-		wr32(hw, INTREG(vector - 1), rxval);
+		wr32(hw, INTREG(q_vector->reg_idx), rxval);
 	}
 
 enable_int:
 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
-		wr32(hw, INTREG(vector - 1), txval);
+		wr32(hw, INTREG(q_vector->reg_idx), txval);
 
 	if (q_vector->itr_countdown)
 		q_vector->itr_countdown--;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8d26c85d12e1..7798a6645c3f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -260,7 +260,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
 }
 
 /* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
 #define I40E_MIN_DESC_PENDING	4
 
 #define I40E_TX_FLAGS_HW_VLAN		BIT(1)
@@ -313,6 +313,7 @@ struct i40e_tx_queue_stats {
 	u64 tx_done_old;
 	u64 tx_linearize;
 	u64 tx_force_wb;
+	int prev_pkt_ctr;
 	u64 tx_lost_interrupt;
 };
 
@@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
 int i40evf_napi_poll(struct napi_struct *napi, int budget);
 void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
 u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
 int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 bool __i40evf_chk_linearize(struct sk_buff *skb);
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773dfad6..54951c84a481 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd {
 	I40E_NVMUPD_STATUS,
 	I40E_NVMUPD_EXEC_AQ,
 	I40E_NVMUPD_GET_AQ_RESULT,
+	I40E_NVMUPD_GET_AQ_EVENT,
 };
 
 enum i40e_nvmupd_state {
@@ -380,15 +381,21 @@ enum i40e_nvmupd_state {
 
 #define I40E_NVM_MOD_PNT_MASK 0xFF
 
-#define I40E_NVM_TRANS_SHIFT	8
-#define I40E_NVM_TRANS_MASK	(0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON		0x0
-#define I40E_NVM_SNT		0x1
-#define I40E_NVM_LCB		0x2
-#define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA		0x4
-#define I40E_NVM_CSUM		0x8
-#define I40E_NVM_EXEC		0xf
+#define I40E_NVM_TRANS_SHIFT			8
+#define I40E_NVM_TRANS_MASK			(0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT	12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+				(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED	0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL		0x02
+#define I40E_NVM_CON				0x0
+#define I40E_NVM_SNT				0x1
+#define I40E_NVM_LCB				0x2
+#define I40E_NVM_SA				(I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA				0x4
+#define I40E_NVM_CSUM				0x8
+#define I40E_NVM_AQE				0xe
+#define I40E_NVM_EXEC				0xf
 
 #define I40E_NVM_ADAPT_SHIFT	16
 #define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -561,6 +568,7 @@ struct i40e_hw {
 	/* state of nvm update process */
 	enum i40e_nvmupd_state nvmupd_state;
 	struct i40e_aq_desc nvm_wb_desc;
+	struct i40e_aq_desc nvm_aq_event_desc;
 	struct i40e_virt_mem nvm_buff;
 	bool nvm_release_on_done;
 	u16 nvm_wait_opcode;
@@ -1422,19 +1430,19 @@ enum i40e_reset_type {
 #define I40E_FD_INSET_FLEX_WORD57_MASK		(0x1ULL << \
 					I40E_FD_INSET_FLEX_WORD57_SHIFT)
 
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
 	u8 major;
 	u8 minor;
 	u8 update;
 	u8 draft;
 };
 
-#define I40E_PPP_NAME_SIZE	32
+#define I40E_DDP_NAME_SIZE	32
 
 /* Package header */
 struct i40e_package_header {
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 segment_count;
 	u32 segment_offset[1];
 };
@@ -1446,16 +1454,16 @@ struct i40e_generic_seg_header {
 #define SEGMENT_TYPE_I40E	0x00000011
 #define SEGMENT_TYPE_X722	0x00000012
 	u32 type;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 size;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_metadata_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 track_id;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_device_id_entry {
@@ -1465,8 +1473,8 @@ struct i40e_device_id_entry {
 
 struct i40e_profile_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
-	char name[I40E_PPP_NAME_SIZE];
+	struct i40e_ddp_version version;
+	char name[I40E_DDP_NAME_SIZE];
 	u32 device_table_count;
 	struct i40e_device_id_entry device_table[1];
 };
@@ -1493,11 +1501,11 @@ struct i40e_profile_section_header {
 
 struct i40e_profile_info {
 	u32 track_id;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u8 op;
-#define I40E_PPP_ADD_TRACKID		0x01
-#define I40E_PPP_REMOVE_TRACKID	0x02
+#define I40E_DDP_ADD_TRACKID		0x01
+#define I40E_DDP_REMOVE_TRACKID	0x02
 	u8 reserved[7];
-	u8 name[I40E_PPP_NAME_SIZE];
+	u8 name[I40E_DDP_NAME_SIZE];
 };
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af521d602..9690c1ea019e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -114,14 +114,14 @@ struct i40e_q_vector {
 	struct i40evf_adapter *adapter;
 	struct i40e_vsi *vsi;
 	struct napi_struct napi;
-	unsigned long reg_idx;
 	struct i40e_ring_container rx;
 	struct i40e_ring_container tx;
 	u32 ring_mask;
 	u8 num_ringpairs;	/* total number of ring pairs in vector */
 #define ITR_COUNTDOWN_START 100
 	u8 itr_countdown;	/* when 0 or 1 update ITR */
-	int v_idx;	/* vector index in list */
+	u16 v_idx;		/* index in the vsi->q_vector array. */
+	u16 reg_idx;		/* register index of the interrupt */
 	char name[IFNAMSIZ + 15];
 	bool arm_wb_state;
 	cpumask_t affinity_mask;
@@ -187,6 +187,7 @@ enum i40evf_state_t {
 enum i40evf_critical_section_t {
 	__I40EVF_IN_CRITICAL_TASK,	/* cannot be interrupted */
 	__I40EVF_IN_CLIENT_TASK,
+	__I40EVF_IN_REMOVE_TASK,	/* device being removed */
 };
 
 /* board specific private data structure */
@@ -199,6 +200,9 @@ struct i40evf_adapter {
 	wait_queue_head_t down_waitqueue;
 	struct i40e_q_vector *q_vectors;
 	struct list_head vlan_filter_list;
+	struct list_head mac_filter_list;
+	/* Lock to protect accesses to MAC and VLAN lists */
+	spinlock_t mac_vlan_list_lock;
 	char misc_vector_name[IFNAMSIZ + 9];
 	int num_active_queues;
 	int num_req_queues;
@@ -206,7 +210,6 @@ struct i40evf_adapter {
 	/* TX */
 	struct i40e_ring *tx_rings;
 	u32 tx_timeout_count;
-	struct list_head mac_filter_list;
 	u32 tx_desc_count;
 
 	/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index da006fa3fec1..e2d8aa19d205 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -512,31 +512,31 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
 				     struct ethtool_coalesce *ec,
 				     int queue)
 {
+	struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
+	struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
 	struct i40e_vsi *vsi = &adapter->vsi;
 	struct i40e_hw *hw = &adapter->hw;
 	struct i40e_q_vector *q_vector;
 	u16 vector;
 
-	adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
-	adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+	rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+	tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
 
-	if (ec->use_adaptive_rx_coalesce)
-		adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
-	else
-		adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+	rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
+	if (!ec->use_adaptive_rx_coalesce)
+		rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC;
 
-	if (ec->use_adaptive_tx_coalesce)
-		adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
-	else
-		adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+	tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
+	if (!ec->use_adaptive_tx_coalesce)
+		tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC;
 
-	q_vector = adapter->rx_rings[queue].q_vector;
-	q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+	q_vector = rx_ring->q_vector;
+	q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
 	vector = vsi->base_vector + q_vector->v_idx;
 	wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
 
-	q_vector = adapter->tx_rings[queue].q_vector;
-	q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+	q_vector = tx_ring->q_vector;
+	q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
 	vector = vsi->base_vector + q_vector->v_idx;
 	wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eba92e2..16989ad2ca90 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
 		if (mask & BIT(i - 1)) {
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
 			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			     I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
-		}
-	}
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	int i;
-	u32 dyn_ctl;
-
-	if (mask & 1) {
-		dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
-		dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
-			   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
-		wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
-	}
-	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & BIT(i)) {
-			dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
-			dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
-				   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-				   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
-			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
 		}
 	}
 }
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
 	struct net_device *netdev = data;
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 	struct i40e_hw *hw = &adapter->hw;
-	u32 val;
 
 	/* handle non-queue interrupts, these reads clear the registers */
-	val = rd32(hw, I40E_VFINT_ICR01);
-	val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
-	val = rd32(hw, I40E_VFINT_DYN_CTL01) |
-	      I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
-	wr32(hw, I40E_VFINT_DYN_CTL01, val);
+	rd32(hw, I40E_VFINT_ICR01);
+	rd32(hw, I40E_VFINT_ICR0_ENA1);
 
 	/* schedule work on the private workqueue */
 	schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
  * @adapter: board private structure
  * @vlan: vlan tag
  *
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
  **/
 static struct
 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
 	struct i40evf_vlan_filter *f = NULL;
-	int count = 50;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0)
-			goto out;
-	}
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 
 	f = i40evf_find_vlan(adapter, vlan);
 	if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 	}
 
 clearout:
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 	return f;
 }
 
@@ -768,21 +727,16 @@ out:
 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
 	struct i40evf_vlan_filter *f;
-	int count = 50;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0)
-			return;
-	}
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 
 	f = i40evf_find_vlan(adapter, vlan);
 	if (f) {
 		f->remove = true;
 		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
 	}
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 }
 
 /**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
  * @adapter: board private structure
  * @macaddr: the MAC address
  *
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
  **/
 static struct
 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
 				     u8 *macaddr)
 {
 	struct i40evf_mac_filter *f;
-	int count = 50;
 
 	if (!macaddr)
 		return NULL;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0)
-			return NULL;
-	}
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 
 	f = i40evf_find_filter(adapter, macaddr);
 	if (!f) {
 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
-		if (!f) {
-			clear_bit(__I40EVF_IN_CRITICAL_TASK,
-				  &adapter->crit_section);
-			return NULL;
-		}
+		if (!f)
+			goto clearout;
 
 		ether_addr_copy(f->macaddr, macaddr);
 
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
 		f->remove = false;
 	}
 
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 	return f;
 }
 
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
 	if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
 		return -EPERM;
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	f = i40evf_find_filter(adapter, hw->mac.addr);
 	if (f) {
 		f->remove = true;
 		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
 	}
 
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	f = i40evf_add_filter(adapter, addr->sa_data);
 	if (f) {
 		ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 	struct netdev_hw_addr *uca;
 	struct netdev_hw_addr *mca;
 	struct netdev_hw_addr *ha;
-	int count = 50;
 
 	/* add addr if not already in the filter list */
 	netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 		i40evf_add_filter(adapter, mca->addr);
 	}
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0) {
-			dev_err(&adapter->pdev->dev,
-				"Failed to get lock in %s\n", __func__);
-			return;
-		}
-	}
-	/* remove filter if not in netdev list */
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
 		netdev_for_each_mc_addr(mca, netdev)
 			if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ bottom_of_search_loop:
 		 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
 		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
 
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 }
 
 /**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
 /**
  * i40evf_up_complete - Finish the last steps of bringing up a connection
  * @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
  **/
 static void i40evf_up_complete(struct i40evf_adapter *adapter)
 {
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
 /**
  * i40e_down - Shutdown the connection processing
  * @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
  **/
 void i40evf_down(struct i40evf_adapter *adapter)
 {
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
 	if (adapter->state <= __I40EVF_DOWN_PENDING)
 		return;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section))
-		usleep_range(500, 1000);
-
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 	adapter->link_up = false;
 	i40evf_napi_disable_all(adapter);
 	i40evf_irq_disable(adapter);
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	/* remove all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		f->remove = true;
 	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
 	    adapter->state != __I40EVF_RESETTING) {
 		/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
 		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
 	}
 
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
 }
 
@@ -1441,6 +1387,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
 		q_vector->adapter = adapter;
 		q_vector->vsi = &adapter->vsi;
 		q_vector->v_idx = q_idx;
+		q_vector->reg_idx = q_idx;
 		cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
 		netif_napi_add(adapter->netdev, &q_vector->napi,
 			       i40evf_napi_poll, NAPI_POLL_WEIGHT);
@@ -1770,13 +1717,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
 	if (adapter->state == __I40EVF_RUNNING)
 		i40evf_request_stats(adapter);
 watchdog_done:
-	if (adapter->state == __I40EVF_RUNNING) {
-		i40evf_irq_enable_queues(adapter, ~0);
-		i40evf_fire_sw_int(adapter, 0xFF);
-	} else {
-		i40evf_fire_sw_int(adapter, 0x1);
-	}
-
+	if (adapter->state == __I40EVF_RUNNING)
+		i40evf_detect_recover_hung(&adapter->vsi);
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 restart_watchdog:
 	if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1738,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
 
 	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
 
-	if (netif_running(adapter->netdev)) {
+	/* We don't use netif_running() because it may be true prior to
+	 * ndo_open() returning, so we can't assume it means all our open
+	 * tasks have finished, since we're not holding the rtnl_lock here.
+	 */
+	if (adapter->state == __I40EVF_RUNNING) {
 		set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
 		netif_carrier_off(adapter->netdev);
 		netif_tx_disable(adapter->netdev);
@@ -1808,6 +1754,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
 		i40evf_free_all_rx_resources(adapter);
 	}
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	/* Delete all of the filters, both MAC and VLAN. */
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
 		list_del(&f->list);
@@ -1819,6 +1767,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
 		kfree(fv);
 	}
 
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_free_misc_irq(adapter);
 	i40evf_reset_interrupt_capability(adapter);
 	i40evf_free_queues(adapter);
@@ -1854,6 +1804,13 @@ static void i40evf_reset_task(struct work_struct *work)
 	struct i40evf_mac_filter *f;
 	u32 reg_val;
 	int i = 0, err;
+	bool running;
+
+	/* When device is being removed it doesn't make sense to run the reset
+	 * task, just return in such a case.
+	 */
+	if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
+		return;
 
 	while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
 				&adapter->crit_section))
@@ -1913,7 +1870,13 @@ static void i40evf_reset_task(struct work_struct *work)
 	}
 
 continue_reset:
-	if (netif_running(netdev)) {
+	/* We don't use netif_running() because it may be true prior to
+	 * ndo_open() returning, so we can't assume it means all our open
+	 * tasks have finished, since we're not holding the rtnl_lock here.
+	 */
+	running = (adapter->state == __I40EVF_RUNNING);
+
+	if (running) {
 		netif_carrier_off(netdev);
 		netif_tx_stop_all_queues(netdev);
 		adapter->link_up = false;
@@ -1948,6 +1911,8 @@ continue_reset:
 	adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
 	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	/* re-add all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		f->add = true;
@@ -1956,15 +1921,19 @@ continue_reset:
 	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
 		vlf->add = true;
 	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
 	i40evf_misc_irq_enable(adapter);
 
 	mod_timer(&adapter->watchdog_timer, jiffies + 2);
 
-	if (netif_running(adapter->netdev)) {
+	/* We were running when the reset started, so we need to restore some
+	 * state here.
+	 */
+	if (running) {
 		/* allocate transmit descriptors */
 		err = i40evf_setup_all_tx_resources(adapter);
 		if (err)
@@ -1993,9 +1962,13 @@ continue_reset:
 		adapter->state = __I40EVF_DOWN;
 		wake_up(&adapter->down_waitqueue);
 	}
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 
 	return;
 reset_err:
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
 	i40evf_close(netdev);
 }
@@ -2239,8 +2212,14 @@ static int i40evf_open(struct net_device *netdev)
 		return -EIO;
 	}
 
-	if (adapter->state != __I40EVF_DOWN)
-		return -EBUSY;
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
+	if (adapter->state != __I40EVF_DOWN) {
+		err = -EBUSY;
+		goto err_unlock;
+	}
 
 	/* allocate transmit descriptors */
 	err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2243,8 @@ static int i40evf_open(struct net_device *netdev)
 
 	i40evf_irq_enable(adapter, true);
 
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
 	return 0;
 
 err_req_irq:
@@ -2273,6 +2254,8 @@ err_setup_rx:
 	i40evf_free_all_rx_resources(adapter);
 err_setup_tx:
 	i40evf_free_all_tx_resources(adapter);
+err_unlock:
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 
 	return err;
 }
@@ -2296,6 +2279,9 @@ static int i40evf_close(struct net_device *netdev)
 	if (adapter->state <= __I40EVF_DOWN_PENDING)
 		return 0;
 
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
 
 	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
 	if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2291,8 @@ static int i40evf_close(struct net_device *netdev)
 	adapter->state = __I40EVF_DOWN_PENDING;
 	i40evf_free_traffic_irqs(adapter);
 
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
 	/* We explicitly don't free resources here because the hardware is
 	 * still active and can DMA into memory. Resources are cleared in
 	 * i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2357,13 +2345,19 @@ static int i40evf_set_features(struct net_device *netdev,
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 
-	if (!VLAN_ALLOWED(adapter))
+	/* Don't allow changing VLAN_RX flag when VLAN is set for VF
+	 * and return an error in this case
+	 */
+	if (VLAN_ALLOWED(adapter)) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			adapter->aq_required |=
+				I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+		else
+			adapter->aq_required |=
+				I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+	} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
 		return -EINVAL;
-
-	if (features & NETIF_F_HW_VLAN_CTAG_RX)
-		adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
-	else
-		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+	}
 
 	return 0;
 }
@@ -2943,6 +2937,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	mutex_init(&hw->aq.asq_mutex);
 	mutex_init(&hw->aq.arq_mutex);
 
+	spin_lock_init(&adapter->mac_vlan_list_lock);
+
 	INIT_LIST_HEAD(&adapter->mac_filter_list);
 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
 
@@ -2985,6 +2981,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	netif_device_detach(netdev);
 
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
 	if (netif_running(netdev)) {
 		rtnl_lock();
 		i40evf_down(adapter);
@@ -2993,6 +2993,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
 	i40evf_free_misc_irq(adapter);
 	i40evf_reset_interrupt_capability(adapter);
 
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
 	retval = pci_save_state(pdev);
 	if (retval)
 		return retval;
@@ -3066,7 +3068,8 @@ static void i40evf_remove(struct pci_dev *pdev)
 	struct i40evf_mac_filter *f, *ftmp;
 	struct i40e_hw *hw = &adapter->hw;
 	int err;
-
+	/* Indicate we are in remove and not to run reset_task */
+	set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
 	cancel_delayed_work_sync(&adapter->init_task);
 	cancel_work_sync(&adapter->reset_task);
 	cancel_delayed_work_sync(&adapter->client_task);
@@ -3101,8 +3104,6 @@ static void i40evf_remove(struct pci_dev *pdev)
 	if (adapter->watchdog_timer.function)
 		del_timer_sync(&adapter->watchdog_timer);
 
-	flush_scheduled_work();
-
 	i40evf_free_rss(adapter);
 
 	if (hw->aq.asq.count)
@@ -3118,6 +3119,7 @@ static void i40evf_remove(struct pci_dev *pdev)
 	i40evf_free_all_rx_resources(adapter);
 	i40evf_free_queues(adapter);
 	kfree(adapter->vf_res);
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 	/* If we got removed before an up/down sequence, we've got a filter
 	 * hanging out there that we need to get rid of.
 	 */
@@ -3130,6 +3132,8 @@ static void i40evf_remove(struct pci_dev *pdev)
 		kfree(f);
 	}
 
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	free_netdev(netdev);
 
 	pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a3907c..50ce0d6c09ef 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
 			adapter->current_op);
 		return;
 	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		if (f->add)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
 	}
 
 	veal = kzalloc(len, GFP_KERNEL);
-	if (!veal)
+	if (!veal) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	veal->vsi_id = adapter->vsi_res->vsi_id;
 	veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
 			   (u8 *)veal, len);
 	kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
 			adapter->current_op);
 		return;
 	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		if (f->remove)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
 		more = true;
 	}
 	veal = kzalloc(len, GFP_KERNEL);
-	if (!veal)
+	if (!veal) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	veal->vsi_id = adapter->vsi_res->vsi_id;
 	veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
 			   (u8 *)veal, len);
 	kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
 		return;
 	}
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		if (f->add)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
 		more = true;
 	}
 	vvfl = kzalloc(len, GFP_KERNEL);
-	if (!vvfl)
+	if (!vvfl) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
 	vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
 	kfree(vvfl);
 }
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 		return;
 	}
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		if (f->remove)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 		more = true;
 	}
 	vvfl = kzalloc(len, GFP_KERNEL);
-	if (!vvfl)
+	if (!vvfl) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
 	vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
 	kfree(vvfl);
 }
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
 	}
 
 	if (!flags) {
-		adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+		adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+				    I40EVF_FLAG_ALLMULTI_ON);
+		adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+					  I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
 	}
 
@@ -965,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
 	if (v_opcode == VIRTCHNL_OP_EVENT) {
 		struct virtchnl_pf_event *vpe =
 			(struct virtchnl_pf_event *)msg;
+		bool link_up = vpe->event_data.link_event.link_status;
 		switch (vpe->event) {
 		case VIRTCHNL_EVENT_LINK_CHANGE:
 			adapter->link_speed =
 				vpe->event_data.link_event.link_speed;
-			if (adapter->link_up !=
-			    vpe->event_data.link_event.link_status) {
-				adapter->link_up =
-					vpe->event_data.link_event.link_status;
-				if (adapter->link_up) {
-					netif_tx_start_all_queues(netdev);
-					netif_carrier_on(netdev);
-				} else {
-					netif_tx_stop_all_queues(netdev);
-					netif_carrier_off(netdev);
-				}
-				i40evf_print_link_message(adapter);
+
+			/* we've already got the right link status, bail */
+			if (adapter->link_up == link_up)
+				break;
+
+			/* If we get link up message and start queues before
+			 * our queues are configured it will trigger a TX hang.
+			 * In that case, just ignore the link status message,
+			 * we'll get another one after we enable queues and
+			 * actually prepared to send traffic.
+			 */
+			if (link_up && adapter->state != __I40EVF_RUNNING)
+				break;
+
+			adapter->link_up = link_up;
+			if (link_up) {
+				netif_tx_start_all_queues(netdev);
+				netif_carrier_on(netdev);
+			} else {
+				netif_tx_stop_all_queues(netdev);
+				netif_carrier_off(netdev);
 			}
+			i40evf_print_link_message(adapter);
 			break;
 		case VIRTCHNL_EVENT_RESET_IMPENDING:
 			dev_info(&adapter->pdev->dev, "PF reset warning received\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 92845692087a..1c6b8d9176a8 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -690,6 +690,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+unsigned int igb_get_max_rss_queues(struct igb_adapter *);
 #ifdef CONFIG_IGB_HWMON
 void igb_sysfs_exit(struct igb_adapter *adapter);
 int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d06a8db514d4..606e6761758f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3338,37 +3338,7 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
 
 static unsigned int igb_max_channels(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
-	unsigned int max_combined = 0;
-
-	switch (hw->mac.type) {
-	case e1000_i211:
-		max_combined = IGB_MAX_RX_QUEUES_I211;
-		break;
-	case e1000_82575:
-	case e1000_i210:
-		max_combined = IGB_MAX_RX_QUEUES_82575;
-		break;
-	case e1000_i350:
-		if (!!adapter->vfs_allocated_count) {
-			max_combined = 1;
-			break;
-		}
-		/* fall through */
-	case e1000_82576:
-		if (!!adapter->vfs_allocated_count) {
-			max_combined = 2;
-			break;
-		}
-		/* fall through */
-	case e1000_82580:
-	case e1000_i354:
-	default:
-		max_combined = IGB_MAX_RX_QUEUES;
-		break;
-	}
-
-	return max_combined;
+	return igb_get_max_rss_queues(adapter);
 }
 
 static void igb_get_channels(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c208753ff5b7..b88fae785369 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1744,6 +1744,20 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
 		 *     value = idleSlope * 61034
 		 *             -----------------                          (E6)
 		 *                  1000000
+		 *
+		 * NOTE: For i210, given the above, we can see that idleslope
+		 *       is represented in 16.38431 kbps units by the value at
+		 *       the TQAVCC register (1Gbps / 61034), which reduces
+		 *       the granularity for idleslope increments.
+		 *       For instance, if you want to configure a 2576kbps
+		 *       idleslope, the value to be written on the register
+		 *       would have to be 157.23. If rounded down, you end
+		 *       up with less bandwidth available than originally
+		 *       required (~2572 kbps). If rounded up, you end up
+		 *       with a higher bandwidth (~2589 kbps). Below the
+		 *       approach we take is to always round up the
+		 *       calculated value, so the resulting bandwidth might
+		 *       be slightly higher for some configurations.
 		 */
 		value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
 
@@ -3200,8 +3214,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
 	/* if allocation failed then we do not support SR-IOV */
 	if (!adapter->vf_data) {
 		adapter->vfs_allocated_count = 0;
-		dev_err(&pdev->dev,
-			"Unable to allocate memory for VF Data Storage\n");
 		err = -ENOMEM;
 		goto out;
 	}
@@ -3373,10 +3385,10 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
 #endif /* CONFIG_PCI_IOV */
 }
 
-static void igb_init_queue_configuration(struct igb_adapter *adapter)
+unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 max_rss_queues;
+	unsigned int max_rss_queues;
 
 	/* Determine the maximum number of RSS queues supported. */
 	switch (hw->mac.type) {
@@ -3407,6 +3419,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
 		break;
 	}
 
+	return max_rss_queues;
+}
+
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
+{
+	u32 max_rss_queues;
+
+	max_rss_queues = igb_get_max_rss_queues(adapter);
 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
 
 	igb_set_flag_queue_pairs(adapter, max_rss_queues);
@@ -3676,7 +3696,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
 
 int igb_close(struct net_device *netdev)
 {
-	if (netif_device_present(netdev))
+	if (netif_device_present(netdev) || netdev->dismantle)
 		return __igb_close(netdev, false);
 	return 0;
 }
@@ -8718,7 +8738,8 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
 
 	/* Indicate to hardware the Address is Valid. */
 	if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
-		rar_high |= E1000_RAH_AV;
+		if (is_valid_ether_addr(addr))
+			rar_high |= E1000_RAH_AV;
 
 		if (hw->mac.type == e1000_82575)
 			rar_high |= E1000_RAH_POOL_1 *
@@ -8756,17 +8777,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+
+	if (vf >= adapter->vfs_allocated_count)
+		return -EINVAL;
+
+	/* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
+	 * flag and allows to overwrite the MAC via VF netdev.  This
+	 * is necessary to allow libvirt a way to restore the original
+	 * MAC after unbinding vfio-pci and reloading igbvf after shutting
+	 * down a VM.
+	 */
+	if (is_zero_ether_addr(mac)) {
+		adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
+		dev_info(&adapter->pdev->dev,
+			 "remove administratively set MAC on VF %d\n",
+			 vf);
+	} else if (is_valid_ether_addr(mac)) {
+		adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+		dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
+			 mac, vf);
+		dev_info(&adapter->pdev->dev,
+			 "Reload the VF driver to make this change effective.");
+		/* Generate additional warning if PF is down */
+		if (test_bit(__IGB_DOWN, &adapter->state)) {
+			dev_warn(&adapter->pdev->dev,
+				 "The VF MAC address has been set, but the PF device is not up.\n");
+			dev_warn(&adapter->pdev->dev,
+				 "Bring the PF device up before attempting to use the VF device.\n");
+		}
+	} else {
 		return -EINVAL;
-	adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
-	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
-	dev_info(&adapter->pdev->dev,
-		 "Reload the VF driver to make this change effective.");
-	if (test_bit(__IGB_DOWN, &adapter->state)) {
-		dev_warn(&adapter->pdev->dev,
-			 "The VF MAC address has been set, but the PF device is not up.\n");
-		dev_warn(&adapter->pdev->dev,
-			 "Bring the PF device up before attempting to use the VF device.\n");
 	}
 	return igb_set_vf_mac(adapter, vf, mac);
 }
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 841c2a083349..0746b19ec6d3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work)
 		adapter->ptp_tx_skb = NULL;
 		clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
 		adapter->tx_hwtstamp_timeouts++;
+		/* Clear the tx valid bit in TSYNCTXCTL register to enable
+		 * interrupt
+		 */
+		rd32(E1000_TXSTMPH);
 		dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
 		return;
 	}
@@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
  */
 void igb_ptp_tx_hang(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
 					      IGB_PTP_TX_TIMEOUT);
 
@@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter)
 		adapter->ptp_tx_skb = NULL;
 		clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
 		adapter->tx_hwtstamp_timeouts++;
+		/* Clear the tx valid bit in TSYNCTXCTL register to enable
+		 * interrupt
+		 */
+		rd32(E1000_TXSTMPH);
 		dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
 	}
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 35e6fa643c7e..8319465eb38d 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -42,3 +42,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
 ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
 ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
+ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 468c3555a629..c1e3a0039ea5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,7 +52,9 @@
 #ifdef CONFIG_IXGBE_DCA
 #include <linux/dca.h>
 #endif
+#include "ixgbe_ipsec.h"
 
+#include <net/xdp.h>
 #include <net/busy_poll.h>
 
 /* common prefix used by pr_<> macros */
@@ -170,10 +172,11 @@ enum ixgbe_tx_flags {
 	IXGBE_TX_FLAGS_CC	= 0x08,
 	IXGBE_TX_FLAGS_IPV4	= 0x10,
 	IXGBE_TX_FLAGS_CSUM	= 0x20,
+	IXGBE_TX_FLAGS_IPSEC	= 0x40,
 
 	/* software defined flags */
-	IXGBE_TX_FLAGS_SW_VLAN	= 0x40,
-	IXGBE_TX_FLAGS_FCOE	= 0x80,
+	IXGBE_TX_FLAGS_SW_VLAN	= 0x80,
+	IXGBE_TX_FLAGS_FCOE	= 0x100,
 };
 
 /* VLAN info */
@@ -332,7 +335,6 @@ struct ixgbe_ring {
 	struct net_device *netdev;	/* netdev ring belongs to */
 	struct bpf_prog *xdp_prog;
 	struct device *dev;		/* device for DMA mapping */
-	struct ixgbe_fwd_adapter *l2_accel_priv;
 	void *desc;			/* descriptor ring memory */
 	union {
 		struct ixgbe_tx_buffer *tx_buffer_info;
@@ -371,6 +373,7 @@ struct ixgbe_ring {
 		struct ixgbe_tx_queue_stats tx_stats;
 		struct ixgbe_rx_queue_stats rx_stats;
 	};
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
@@ -395,8 +398,7 @@ enum ixgbe_ring_f_enum {
 #define MAX_XDP_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
 #define IXGBE_MAX_L2A_QUEUES		4
 #define IXGBE_BAD_L2A_QUEUE		3
-#define IXGBE_MAX_MACVLANS		31
-#define IXGBE_MAX_DCBMACVLANS		8
+#define IXGBE_MAX_MACVLANS		63
 
 struct ixgbe_ring_feature {
 	u16 limit;	/* upper limit on feature indices */
@@ -629,15 +631,18 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_EEE_CAPABLE			BIT(14)
 #define IXGBE_FLAG2_EEE_ENABLED			BIT(15)
 #define IXGBE_FLAG2_RX_LEGACY			BIT(16)
+#define IXGBE_FLAG2_IPSEC_ENABLED		BIT(17)
 
 	/* Tx fast path data */
 	int num_tx_queues;
 	u16 tx_itr_setting;
 	u16 tx_work_limit;
+	u64 tx_ipsec;
 
 	/* Rx fast path data */
 	int num_rx_queues;
 	u16 rx_itr_setting;
+	u64 rx_ipsec;
 
 	/* Port number used to identify VXLAN traffic */
 	__be16 vxlan_port;
@@ -674,6 +679,7 @@ struct ixgbe_adapter {
 	struct ieee_ets *ixgbe_ieee_ets;
 	struct ixgbe_dcb_config dcb_cfg;
 	struct ixgbe_dcb_config temp_dcb_cfg;
+	u8 hw_tcs;
 	u8 dcb_set_bitmap;
 	u8 dcbx_cap;
 	enum ixgbe_fc_mode last_lfc_mode;
@@ -721,8 +727,7 @@ struct ixgbe_adapter {
 
 	u16 bridge_mode;
 
-	u16 eeprom_verh;
-	u16 eeprom_verl;
+	char eeprom_id[NVM_VER_SIZE];
 	u16 eeprom_cap;
 
 	u32 interrupt_event;
@@ -766,7 +771,8 @@ struct ixgbe_adapter {
 #endif /*CONFIG_DEBUG_FS*/
 
 	u8 default_up;
-	unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+	/* Bitmask indicating in use pools */
+	DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
 
 #define IXGBE_MAX_LINK_HANDLE 10
 	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
@@ -780,6 +786,10 @@ struct ixgbe_adapter {
 
 #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
 	u32 *rss_key;
+
+#ifdef CONFIG_XFRM
+	struct ixgbe_ipsec *ipsec;
+#endif /* CONFIG_XFRM */
 };
 
 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1010,4 +1020,24 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
 void ixgbe_store_reta(struct ixgbe_adapter *adapter);
 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+#ifdef CONFIG_XFRM_OFFLOAD
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+		    union ixgbe_adv_rx_desc *rx_desc,
+		    struct sk_buff *skb);
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+		   struct ixgbe_ipsec_tx_data *itd);
+#else
+static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+				  union ixgbe_adv_rx_desc *rx_desc,
+				  struct sk_buff *skb) { };
+static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+				 struct ixgbe_tx_buffer *first,
+				 struct ixgbe_ipsec_tx_data *itd) { return 0; };
+#endif /* CONFIG_XFRM_OFFLOAD */
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 8a32eb7d47b9..a0ebd9ecf243 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -431,6 +431,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
 /**
  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
  *  @hw: pointer to hardware structure
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
  *
  *  Configures link settings based on values in the ixgbe_hw struct.
  *  Restarts the link.  Performs autonegotiation if needed.
@@ -1054,7 +1055,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
  *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset at address 0xA2
- *  @eeprom_data: value read
+ *  @sff8472_data: value read
  *
  *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
  **/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index d602637ccc40..4dfc81dbee4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -221,7 +221,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
 /**
  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
  * @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
  * @locked: bool to indicate whether the SW/FW lock was already taken by
  *	     previous proc_autoc_read_82599.
  *
@@ -1310,10 +1310,11 @@ do { \
 
 /**
  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- *  @stream: input bitstream to compute the hash on
+ *  @input: input bitstream to compute the hash on
+ *  @common: compressed common input dword
  *
  *  This function is almost identical to the function above but contains
- *  several optomizations such as unwinding all of the loops, letting the
+ *  several optimizations such as unwinding all of the loops, letting the
  *  compiler work out all of the conditional ifs since the keys are static
  *  defines, and computing two keys at once since the hashed dword stream
  *  will be the same for both keys.
@@ -1446,7 +1447,7 @@ do { \
 
 /**
  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- *  @atr_input: input bitstream to compute the hash on
+ *  @input: input bitstream to compute the hash on
  *  @input_mask: mask for the input bitstream
  *
  *  This function serves two main purposes.  First it applies the input_mask
@@ -2078,6 +2079,7 @@ reset_pipeline_out:
  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
+ *  @dev_addr: address to read from
  *  @data: value read
  *
  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2131,6 +2133,7 @@ release_i2c_access:
  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to write
+ *  @dev_addr: address to write to
  *  @data: value to write
  *
  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255f6a18..61188f343955 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1613,6 +1613,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
 /**
  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
  *  @hw: pointer to hardware structure
+ *  @count: number of bits to shift
  **/
 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
 {
@@ -1667,7 +1668,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
 /**
  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
  *  @hw: pointer to hardware structure
- *  @eecd: EECD's current value
+ *  @eec: EEC's current value
  **/
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
 {
@@ -2037,7 +2038,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 /**
  *  ixgbe_set_mta - Set bit-vector in multicast table
  *  @hw: pointer to hardware structure
- *  @hash_value: Multicast address hash value
+ *  @mc_addr: Multicast address
  *
  *  Sets the bit-vector in the multicast table.
  **/
@@ -3086,6 +3087,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
  *  @hw: pointer to hardware structure
  *  @vlan: VLAN id to write to VLAN filter
+ *  @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ *		  vlanid not found
  *
  *  return the VLVF index where this VLAN id should be placed
  *
@@ -3476,7 +3479,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
  *  @hw: pointer to hardware structure
  *  @enable: enable or disable switch for VLAN anti-spoofing
- *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
  *
  **/
 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
@@ -4028,6 +4031,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  if valid option ROM version, nvm_ver->or_valid set to true
+ *  else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+			    struct ixgbe_nvm_version *nvm_ver)
+{
+	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+	nvm_ver->or_valid = false;
+	/* Option Rom may or may not be present.  Start with pointer */
+	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+	/* make sure offset is valid */
+	if (offset == 0x0 || offset == NVM_INVALID_PTR)
+		return;
+
+	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+	/* option rom exists and is valid */
+	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+	    eeprom_cfg_blkl == NVM_VER_INVALID ||
+	    eeprom_cfg_blkh == NVM_VER_INVALID)
+		return;
+
+	nvm_ver->or_valid = true;
+	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ *  ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  if valid OEM product version, nvm_ver->oem_valid set to true
+ *  else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+				struct ixgbe_nvm_version *nvm_ver)
+{
+	u16 rel_num, prod_ver, mod_len, cap, offset;
+
+	nvm_ver->oem_valid = false;
+	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+	/* Return is offset to OEM Product Version block is invalid */
+	if (offset == 0x0 || offset == NVM_INVALID_PTR)
+		return;
+
+	/* Read product version block */
+	hw->eeprom.ops.read(hw, offset, &mod_len);
+	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+	/* Return if OEM product version block is invalid */
+	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+		return;
+
+	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+	/* Return if version is invalid */
+	if ((rel_num | prod_ver) == 0x0 ||
+	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+		return;
+
+	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+	nvm_ver->oem_release = rel_num;
+	nvm_ver->oem_valid = true;
+}
+
+/**
+ *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+		      struct ixgbe_nvm_version *nvm_ver)
+{
+	u16 etk_id_l, etk_id_h;
+
+	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+		etk_id_l = NVM_VER_INVALID;
+	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+		etk_id_h = NVM_VER_INVALID;
+
+	/* The word order for the version format is determined by high order
+	 * word bit 15.
+	 */
+	if ((etk_id_h & NVM_ETK_VALID) == 0) {
+		nvm_ver->etk_id = etk_id_h;
+		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+	} else {
+		nvm_ver->etk_id = etk_id_l;
+		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+	}
+}
+
 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
 {
 	u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e2e06c..4d4c02366cb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
 
 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+		      struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+				struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+			    struct ixgbe_nvm_version *nvm_ver);
 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 072ef3b5fc61..aaea8282bfd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -39,6 +39,10 @@
  * are the smallest unit programmable into the underlying
  * hardware. The IEEE 802.1Qaz specification do not use bandwidth
  * groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame: maximum frame size
  */
 static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
 			      __u16 *max, int max_frame)
@@ -72,8 +76,10 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
 
 /**
  * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame: Maximum frame size
+ * @direction: Configuring either Tx or Rx
  *
  * This function calculates the credits allocated to each traffic class.
  * It should be called only after the rules are checked by
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index b79e93a5b699..f94c7e82a30b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -34,7 +34,9 @@
 /**
  * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Rx Data Arbiter and credits for each traffic class.
  */
@@ -91,7 +93,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
@@ -137,7 +142,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Data Arbiter and credits for each traffic class.
  */
@@ -184,7 +192,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_pfc_82598 - Config priority flow control
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure Priority Flow Control for each traffic class.
  */
@@ -269,7 +277,11 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_hw_config_82598 - Config and enable DCB
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure dcb settings and enable dcb mode.
  */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1011d644978f..1eed6811e914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -38,6 +38,7 @@
  * @max: max credits index by traffic class
  * @bwg_id: bandwidth grouping indexed by traffic class
  * @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
  *
  * Configure Rx Packet Arbiter and credits for each traffic class.
  */
@@ -148,6 +149,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
  * @max: max credits index by traffic class
  * @bwg_id: bandwidth grouping indexed by traffic class
  * @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
  *
  * Configure Tx Packet Arbiter and credits for each traffic class.
  */
@@ -344,11 +346,12 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
  * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
  * @refill: refill credits index by traffic class
  * @max: max credits index by traffic class
  * @bwg_id: bandwidth grouping indexed by traffic class
  * @prio_type: priority type indexed by traffic class
- * @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
  *
  * Configure dcb settings and enable dcb mode.
  */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 78c52375acc6..b33f3f87e4b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -571,7 +571,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
 	if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
 		return -EINVAL;
 
-	if (max_tc != netdev_get_num_tc(dev)) {
+	if (max_tc != adapter->hw_tcs) {
 		err = ixgbe_setup_tc(dev, max_tc);
 		if (err)
 			return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 5e2c1e35e517..ad54080488ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -249,7 +249,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
- * @pf: the pf that is stopping
+ * @adapter: the adapter that is exiting
  **/
 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
 {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aad1c2a3667..221f15803480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -115,6 +115,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
 	{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
 	{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
+	{"tx_ipsec", IXGBE_STAT(tx_ipsec)},
+	{"rx_ipsec", IXGBE_STAT(rx_ipsec)},
 #ifdef IXGBE_FCOE
 	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
 	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -1014,16 +1016,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 			      struct ethtool_drvinfo *drvinfo)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u32 nvm_track_id;
 
 	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
 	strlcpy(drvinfo->version, ixgbe_driver_version,
 		sizeof(drvinfo->version));
 
-	nvm_track_id = (adapter->eeprom_verh << 16) |
-			adapter->eeprom_verl;
-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
-		 nvm_track_id);
+	strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+		sizeof(drvinfo->fw_version));
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
@@ -1156,6 +1155,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 			memcpy(&temp_ring[i], adapter->rx_ring[i],
 			       sizeof(struct ixgbe_ring));
 
+			/* Clear copied XDP RX-queue info */
+			memset(&temp_ring[i].xdp_rxq, 0,
+			       sizeof(temp_ring[i].xdp_rxq));
+
 			temp_ring[i].count = new_rx_count;
 			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
 			if (err) {
@@ -3082,26 +3085,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_x550em_a:
 		info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
-		/* fallthrough */
+		break;
 	case ixgbe_mac_X540:
 	case ixgbe_mac_82599EB:
-		info->so_timestamping =
-			SOF_TIMESTAMPING_TX_SOFTWARE |
-			SOF_TIMESTAMPING_RX_SOFTWARE |
-			SOF_TIMESTAMPING_SOFTWARE |
-			SOF_TIMESTAMPING_TX_HARDWARE |
-			SOF_TIMESTAMPING_RX_HARDWARE |
-			SOF_TIMESTAMPING_RAW_HARDWARE;
-
-		if (adapter->ptp_clock)
-			info->phc_index = ptp_clock_index(adapter->ptp_clock);
-		else
-			info->phc_index = -1;
-
-		info->tx_types =
-			BIT(HWTSTAMP_TX_OFF) |
-			BIT(HWTSTAMP_TX_ON);
-
 		info->rx_filters |=
 			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
 			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
@@ -3110,13 +3096,31 @@ static int ixgbe_get_ts_info(struct net_device *dev,
 	default:
 		return ethtool_op_get_ts_info(dev, info);
 	}
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE |
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	if (adapter->ptp_clock)
+		info->phc_index = ptp_clock_index(adapter->ptp_clock);
+	else
+		info->phc_index = -1;
+
+	info->tx_types =
+		BIT(HWTSTAMP_TX_OFF) |
+		BIT(HWTSTAMP_TX_ON);
+
 	return 0;
 }
 
 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
 {
 	unsigned int max_combined;
-	u8 tcs = netdev_get_num_tc(adapter->netdev);
+	u8 tcs = adapter->hw_tcs;
 
 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
 		/* We only support one q_vector without MSI-X */
@@ -3173,7 +3177,7 @@ static void ixgbe_get_channels(struct net_device *dev,
 		return;
 
 	/* same thing goes for being DCB enabled */
-	if (netdev_get_num_tc(dev) > 1)
+	if (adapter->hw_tcs > 1)
 		return;
 
 	/* if ATR is disabled we can exit */
@@ -3219,7 +3223,7 @@ static int ixgbe_set_channels(struct net_device *dev,
 
 #endif
 	/* use setup TC to update any traffic class queue mapping */
-	return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+	return ixgbe_setup_tc(dev, adapter->hw_tcs);
 }
 
 static int ixgbe_get_module_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5411a0..7a09a40e4472 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -150,6 +150,7 @@ skip_ddpinv:
  * @xid: the exchange id requesting ddp
  * @sgl: the scatter-gather list for this request
  * @sgc: the number of scatter-gather items
+ * @target_mode: 1 to setup target mode, 0 to setup initiator mode
  *
  * Returns : 1 for success and 0 for no ddp
  */
@@ -1034,11 +1035,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
 		 ixgbe_driver_name,
 		 ixgbe_driver_version);
 	/* Firmware Version */
-	snprintf(info->firmware_version,
-		 sizeof(info->firmware_version),
-		 "0x%08x",
-		 (adapter->eeprom_verh << 16) |
-		  adapter->eeprom_verl);
+	strlcpy(info->firmware_version, adapter->eeprom_id,
+		sizeof(info->firmware_version));
 
 	/* Model */
 	if (hw->mac.type == ixgbe_mac_82599EB) {
@@ -1066,7 +1064,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
 
 /**
  * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
  *
  * Return : TC that FCoE is mapped to
  */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 000000000000..93eacddb6704
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,941 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "ixgbe.h"
+#include <net/xfrm.h>
+#include <crypto/aead.h>
+
+/**
+ * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @key: key byte array
+ * @salt: salt bytes
+ **/
+static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
+				  u32 key[], u32 salt)
+{
+	u32 reg;
+	int i;
+
+	for (i = 0; i < 4; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
+	IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
+	IXGBE_WRITE_FLUSH(hw);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
+	reg &= IXGBE_RXTXIDX_IPS_EN;
+	reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
+	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_item - set an Rx table item
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @tbl: table selector
+ *
+ * Trigger the device to store into a particular Rx table the
+ * data that has already been loaded into the input register
+ **/
+static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
+				    enum ixgbe_ipsec_tbl_sel tbl)
+{
+	u32 reg;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
+	reg &= IXGBE_RXTXIDX_IPS_EN;
+	reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
+	       idx << IXGBE_RXTXIDX_IDX_SHIFT |
+	       IXGBE_RXTXIDX_WRITE;
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @spi: security parameter index
+ * @key: key byte array
+ * @salt: salt bytes
+ * @mode: rx decrypt control bits
+ * @ip_idx: index into IP table for related IP address
+ **/
+static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
+				  u32 key[], u32 salt, u32 mode, u32 ip_idx)
+{
+	int i;
+
+	/* store the SPI (in bigendian) and IPidx */
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
+	IXGBE_WRITE_FLUSH(hw);
+
+	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
+
+	/* store the key, salt, and mode */
+	for (i = 0; i < 4; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
+	IXGBE_WRITE_FLUSH(hw);
+
+	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @addr: IP address byte array
+ **/
+static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
+{
+	int i;
+
+	/* store the ip address */
+	for (i = 0; i < 4; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
+	IXGBE_WRITE_FLUSH(hw);
+
+	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
+}
+
+/**
+ * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 buf[4] = {0, 0, 0, 0};
+	u16 idx;
+
+	/* disable Rx and Tx SA lookup */
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+
+	/* scrub the tables - split the loops for the max of the IP table */
+	for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
+		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+		ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
+	}
+	for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
+		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+	}
+
+	ipsec->num_rx_sa = 0;
+	ipsec->num_tx_sa = 0;
+}
+
+/**
+ * ixgbe_ipsec_stop_data
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	bool link = adapter->link_up;
+	u32 t_rdy, r_rdy;
+	u32 limit;
+	u32 reg;
+
+	/* halt data paths */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	reg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	reg |= IXGBE_SECRXCTRL_RX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* If the tx fifo doesn't have link, but still has data,
+	 * we can't clear the tx sec block.  Set the MAC loopback
+	 * before block clear
+	 */
+	if (!link) {
+		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+		reg |= IXGBE_MACC_FLU;
+		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+		reg |= IXGBE_HLREG0_LPBK;
+		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+		IXGBE_WRITE_FLUSH(hw);
+		mdelay(3);
+	}
+
+	/* wait for the paths to empty */
+	limit = 20;
+	do {
+		mdelay(10);
+		t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
+			IXGBE_SECTXSTAT_SECTX_RDY;
+		r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
+			IXGBE_SECRXSTAT_SECRX_RDY;
+	} while (!t_rdy && !r_rdy && limit--);
+
+	/* undo loopback if we played with it earlier */
+	if (!link) {
+		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+		reg &= ~IXGBE_MACC_FLU;
+		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+		reg &= ~IXGBE_HLREG0_LPBK;
+		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+		IXGBE_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ * ixgbe_ipsec_stop_engine
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 reg;
+
+	ixgbe_ipsec_stop_data(adapter);
+
+	/* disable Rx and Tx SA lookup */
+	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+
+	/* disable the Rx and Tx engines and full packet store-n-forward */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	reg |= IXGBE_SECTXCTRL_SECTX_DIS;
+	reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	reg |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+	/* restore the "tx security buffer almost full threshold" to 0x250 */
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
+
+	/* Set minimum IFG between packets back to the default 0x1 */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x1;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+	/* final set for normal (no ipsec offload) processing */
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_start_engine
+ * @adapter: board private structure
+ *
+ * NOTE: this increases power consumption whether being used or not
+ **/
+static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 reg;
+
+	ixgbe_ipsec_stop_data(adapter);
+
+	/* Set minimum IFG between packets to 3 */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+	/* Set "tx security buffer almost full threshold" to 0x15 so that the
+	 * almost full indication is generated only after buffer contains at
+	 * least an entire jumbo packet.
+	 */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+	reg = (reg & 0xfffffc00) | 0x15;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
+
+	/* restart the data paths by clearing the DISABLE bits */
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
+
+	/* enable Rx and Tx SA lookup */
+	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
+	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
+ * @adapter: board private structure
+ **/
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i;
+
+	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
+		return;
+
+	/* clean up and restart the engine */
+	ixgbe_ipsec_stop_engine(adapter);
+	ixgbe_ipsec_clear_hw_tables(adapter);
+	ixgbe_ipsec_start_engine(adapter);
+
+	/* reload the IP addrs */
+	for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
+		struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
+
+		if (ipsa->used)
+			ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
+	}
+
+	/* reload the Rx and Tx keys */
+	for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+		struct rx_sa *rsa = &ipsec->rx_tbl[i];
+		struct tx_sa *tsa = &ipsec->tx_tbl[i];
+
+		if (rsa->used)
+			ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
+					      rsa->key, rsa->salt,
+					      rsa->mode, rsa->iptbl_ind);
+
+		if (tsa->used)
+			ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
+	}
+}
+
+/**
+ * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
+ * @ipsec: pointer to ipsec struct
+ * @rxtable: true if we need to look in the Rx table
+ *
+ * Returns the first unused index in either the Rx or Tx SA table
+ **/
+static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
+{
+	u32 i;
+
+	if (rxtable) {
+		if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+			return -ENOSPC;
+
+		/* search rx sa table */
+		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+			if (!ipsec->rx_tbl[i].used)
+				return i;
+		}
+	} else {
+		if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+			return -ENOSPC;
+
+		/* search tx sa table */
+		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+			if (!ipsec->tx_tbl[i].used)
+				return i;
+		}
+	}
+
+	return -ENOSPC;
+}
+
+/**
+ * ixgbe_ipsec_find_rx_state - find the state that matches
+ * @ipsec: pointer to ipsec struct
+ * @daddr: inbound address to match
+ * @proto: protocol to match
+ * @spi: SPI to match
+ * @ip4: true if using an ipv4 address
+ *
+ * Returns a pointer to the matching SA state information
+ **/
+static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
+						    __be32 *daddr, u8 proto,
+						    __be32 spi, bool ip4)
+{
+	struct rx_sa *rsa;
+	struct xfrm_state *ret = NULL;
+
+	rcu_read_lock();
+	hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
+		if (spi == rsa->xs->id.spi &&
+		    ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
+		      (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
+				       sizeof(rsa->xs->id.daddr.a6)))) &&
+		    proto == rsa->xs->id.proto) {
+			ret = rsa->xs;
+			xfrm_state_hold(ret);
+			break;
+		}
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @xs: pointer to xfrm_state struct
+ * @mykey: pointer to key array to populate
+ * @mysalt: pointer to salt value to populate
+ *
+ * This copies the protocol keys and salt to our own data tables.  The
+ * 82599 family only supports the one algorithm.
+ **/
+static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+					u32 *mykey, u32 *mysalt)
+{
+	struct net_device *dev = xs->xso.dev;
+	unsigned char *key_data;
+	char *alg_name = NULL;
+	const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+	int key_len;
+
+	if (xs->aead) {
+		key_data = &xs->aead->alg_key[0];
+		key_len = xs->aead->alg_key_len;
+		alg_name = xs->aead->alg_name;
+	} else {
+		netdev_err(dev, "Unsupported IPsec algorithm\n");
+		return -EINVAL;
+	}
+
+	if (strcmp(alg_name, aes_gcm_name)) {
+		netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+			   aes_gcm_name);
+		return -EINVAL;
+	}
+
+	/* The key bytes come down in a bigendian array of bytes, so
+	 * we don't need to do any byteswapping.
+	 * 160 accounts for 16 byte key and 4 byte salt
+	 */
+	if (key_len == 160) {
+		*mysalt = ((u32 *)key_data)[4];
+	} else if (key_len != 128) {
+		netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
+		return -EINVAL;
+	} else {
+		netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
+		*mysalt = 0;
+	}
+	memcpy(mykey, key_data, 16);
+
+	return 0;
+}
+
+/**
+ * ixgbe_ipsec_add_sa - program device with a security association
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+{
+	struct net_device *dev = xs->xso.dev;
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int checked, match, first;
+	u16 sa_idx;
+	int ret;
+	int i;
+
+	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+			   xs->id.proto);
+		return -EINVAL;
+	}
+
+	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+		struct rx_sa rsa;
+
+		if (xs->calg) {
+			netdev_err(dev, "Compression offload not supported\n");
+			return -EINVAL;
+		}
+
+		/* find the first unused index */
+		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
+		if (ret < 0) {
+			netdev_err(dev, "No space for SA in Rx table!\n");
+			return ret;
+		}
+		sa_idx = (u16)ret;
+
+		memset(&rsa, 0, sizeof(rsa));
+		rsa.used = true;
+		rsa.xs = xs;
+
+		if (rsa.xs->id.proto & IPPROTO_ESP)
+			rsa.decrypt = xs->ealg || xs->aead;
+
+		/* get the key and salt */
+		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+		if (ret) {
+			netdev_err(dev, "Failed to get key data for Rx SA table\n");
+			return ret;
+		}
+
+		/* get ip for rx sa table */
+		if (xs->props.family == AF_INET6)
+			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
+		else
+			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
+
+		/* The HW does not have a 1:1 mapping from keys to IP addrs, so
+		 * check for a matching IP addr entry in the table.  If the addr
+		 * already exists, use it; else find an unused slot and add the
+		 * addr.  If one does not exist and there are no unused table
+		 * entries, fail the request.
+		 */
+
+		/* Find an existing match or first not used, and stop looking
+		 * after we've checked all we know we have.
+		 */
+		checked = 0;
+		match = -1;
+		first = -1;
+		for (i = 0;
+		     i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
+		     (checked < ipsec->num_rx_sa || first < 0);
+		     i++) {
+			if (ipsec->ip_tbl[i].used) {
+				if (!memcmp(ipsec->ip_tbl[i].ipaddr,
+					    rsa.ipaddr, sizeof(rsa.ipaddr))) {
+					match = i;
+					break;
+				}
+				checked++;
+			} else if (first < 0) {
+				first = i;  /* track the first empty seen */
+			}
+		}
+
+		if (ipsec->num_rx_sa == 0)
+			first = 0;
+
+		if (match >= 0) {
+			/* addrs are the same, we should use this one */
+			rsa.iptbl_ind = match;
+			ipsec->ip_tbl[match].ref_cnt++;
+
+		} else if (first >= 0) {
+			/* no matches, but here's an empty slot */
+			rsa.iptbl_ind = first;
+
+			memcpy(ipsec->ip_tbl[first].ipaddr,
+			       rsa.ipaddr, sizeof(rsa.ipaddr));
+			ipsec->ip_tbl[first].ref_cnt = 1;
+			ipsec->ip_tbl[first].used = true;
+
+			ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
+
+		} else {
+			/* no match and no empty slot */
+			netdev_err(dev, "No space for SA in Rx IP SA table\n");
+			memset(&rsa, 0, sizeof(rsa));
+			return -ENOSPC;
+		}
+
+		rsa.mode = IXGBE_RXMOD_VALID;
+		if (rsa.xs->id.proto & IPPROTO_ESP)
+			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
+		if (rsa.decrypt)
+			rsa.mode |= IXGBE_RXMOD_DECRYPT;
+		if (rsa.xs->props.family == AF_INET6)
+			rsa.mode |= IXGBE_RXMOD_IPV6;
+
+		/* the preparations worked, so save the info */
+		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
+
+		ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
+				      rsa.salt, rsa.mode, rsa.iptbl_ind);
+		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
+
+		ipsec->num_rx_sa++;
+
+		/* hash the new entry for faster search in Rx path */
+		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
+			     rsa.xs->id.spi);
+	} else {
+		struct tx_sa tsa;
+
+		/* find the first unused index */
+		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
+		if (ret < 0) {
+			netdev_err(dev, "No space for SA in Tx table\n");
+			return ret;
+		}
+		sa_idx = (u16)ret;
+
+		memset(&tsa, 0, sizeof(tsa));
+		tsa.used = true;
+		tsa.xs = xs;
+
+		if (xs->id.proto & IPPROTO_ESP)
+			tsa.encrypt = xs->ealg || xs->aead;
+
+		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+		if (ret) {
+			netdev_err(dev, "Failed to get key data for Tx SA table\n");
+			memset(&tsa, 0, sizeof(tsa));
+			return ret;
+		}
+
+		/* the preparations worked, so save the info */
+		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
+
+		ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
+
+		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
+
+		ipsec->num_tx_sa++;
+	}
+
+	/* enable the engine if not already warmed up */
+	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
+		ixgbe_ipsec_start_engine(adapter);
+		adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_ipsec_del_sa - clear out this specific SA
+ * @xs: pointer to transformer state struct
+ **/
+static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+{
+	struct net_device *dev = xs->xso.dev;
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 zerobuf[4] = {0, 0, 0, 0};
+	u16 sa_idx;
+
+	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+		struct rx_sa *rsa;
+		u8 ipi;
+
+		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+		rsa = &ipsec->rx_tbl[sa_idx];
+
+		if (!rsa->used) {
+			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
+				   sa_idx, xs->xso.offload_handle);
+			return;
+		}
+
+		ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
+		hash_del_rcu(&rsa->hlist);
+
+		/* if the IP table entry is referenced by only this SA,
+		 * i.e. ref_cnt is only 1, clear the IP table entry as well
+		 */
+		ipi = rsa->iptbl_ind;
+		if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
+			ipsec->ip_tbl[ipi].ref_cnt--;
+
+			if (!ipsec->ip_tbl[ipi].ref_cnt) {
+				memset(&ipsec->ip_tbl[ipi], 0,
+				       sizeof(struct rx_ip_sa));
+				ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
+			}
+		}
+
+		memset(rsa, 0, sizeof(struct rx_sa));
+		ipsec->num_rx_sa--;
+	} else {
+		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+
+		if (!ipsec->tx_tbl[sa_idx].used) {
+			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
+				   sa_idx, xs->xso.offload_handle);
+			return;
+		}
+
+		ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
+		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
+		ipsec->num_tx_sa--;
+	}
+
+	/* if there are no SAs left, stop the engine to save energy */
+	if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
+		adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
+		ixgbe_ipsec_stop_engine(adapter);
+	}
+}
+
+/**
+ * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+	if (xs->props.family == AF_INET) {
+		/* Offload with IPv4 options is not supported yet */
+		if (ip_hdr(skb)->ihl != 5)
+			return false;
+	} else {
+		/* Offload with IPv6 extension headers is not support yet */
+		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * ixgbe_ipsec_free - called by xfrm garbage collections
+ * @xs: pointer to transformer state struct
+ *
+ * We don't have any garbage to collect, so we shouldn't bother
+ * implementing this function, but the XFRM code doesn't check for
+ * existence before calling the API callback.
+ **/
+static void ixgbe_ipsec_free(struct xfrm_state *xs)
+{
+}
+
+static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
+	.xdo_dev_state_add = ixgbe_ipsec_add_sa,
+	.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
+	.xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
+	.xdo_dev_state_free = ixgbe_ipsec_free,
+};
+
+/**
+ * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
+ * @tx_ring: outgoing context
+ * @first: current data packet
+ * @itd: ipsec Tx data for later use in building context descriptor
+ **/
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+		   struct ixgbe_tx_buffer *first,
+		   struct ixgbe_ipsec_tx_data *itd)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+	struct xfrm_state *xs;
+	struct tx_sa *tsa;
+
+	if (unlikely(!first->skb->sp->len)) {
+		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
+			   __func__, first->skb->sp->len);
+		return 0;
+	}
+
+	xs = xfrm_input_state(first->skb);
+	if (unlikely(!xs)) {
+		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
+			   __func__, xs);
+		return 0;
+	}
+
+	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
+			   __func__, itd->sa_idx, xs->xso.offload_handle);
+		return 0;
+	}
+
+	tsa = &ipsec->tx_tbl[itd->sa_idx];
+	if (unlikely(!tsa->used)) {
+		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
+			   __func__, itd->sa_idx);
+		return 0;
+	}
+
+	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
+
+	itd->flags = 0;
+	if (xs->id.proto == IPPROTO_ESP) {
+		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		if (first->protocol == htons(ETH_P_IP))
+			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
+		itd->trailer_len = xs->props.trailer_len;
+	}
+	if (tsa->encrypt)
+		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
+
+	return 1;
+}
+
+/**
+ * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
+ * @rx_ring: receiving ring
+ * @rx_desc: receive data descriptor
+ * @skb: current data packet
+ *
+ * Determine if there was an ipsec encapsulation noticed, and if so set up
+ * the resulting status for later in the receive stack.
+ **/
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+		    union ixgbe_adv_rx_desc *rx_desc,
+		    struct sk_buff *skb)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
+					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+	struct xfrm_offload *xo = NULL;
+	struct xfrm_state *xs = NULL;
+	struct ipv6hdr *ip6 = NULL;
+	struct iphdr *ip4 = NULL;
+	void *daddr;
+	__be32 spi;
+	u8 *c_hdr;
+	u8 proto;
+
+	/* Find the ip and crypto headers in the data.
+	 * We can assume no vlan header in the way, b/c the
+	 * hw won't recognize the IPsec packet and anyway the
+	 * currently vlan device doesn't support xfrm offload.
+	 */
+	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
+		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
+		daddr = &ip4->daddr;
+		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
+	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
+		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+		daddr = &ip6->daddr;
+		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
+	} else {
+		return;
+	}
+
+	switch (pkt_info & ipsec_pkt_types) {
+	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
+		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
+		proto = IPPROTO_AH;
+		break;
+	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
+		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
+		proto = IPPROTO_ESP;
+		break;
+	default:
+		return;
+	}
+
+	xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
+	if (unlikely(!xs))
+		return;
+
+	skb->sp = secpath_dup(skb->sp);
+	if (unlikely(!skb->sp))
+		return;
+
+	skb->sp->xvec[skb->sp->len++] = xs;
+	skb->sp->olen++;
+	xo = xfrm_offload(skb);
+	xo->flags = CRYPTO_DONE;
+	xo->status = CRYPTO_SUCCESS;
+
+	adapter->rx_ipsec++;
+}
+
+/**
+ * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
+ * @adapter: board private structure
+ **/
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ipsec *ipsec;
+	size_t size;
+
+	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+		return;
+
+	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+	if (!ipsec)
+		goto err1;
+	hash_init(ipsec->rx_sa_list);
+
+	size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+	ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
+	if (!ipsec->rx_tbl)
+		goto err2;
+
+	size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+	ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
+	if (!ipsec->tx_tbl)
+		goto err2;
+
+	size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
+	ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
+	if (!ipsec->ip_tbl)
+		goto err2;
+
+	ipsec->num_rx_sa = 0;
+	ipsec->num_tx_sa = 0;
+
+	adapter->ipsec = ipsec;
+	ixgbe_ipsec_stop_engine(adapter);
+	ixgbe_ipsec_clear_hw_tables(adapter);
+
+	adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+	adapter->netdev->features |= NETIF_F_HW_ESP;
+	adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+	return;
+
+err2:
+	kfree(ipsec->ip_tbl);
+	kfree(ipsec->rx_tbl);
+	kfree(ipsec->tx_tbl);
+err1:
+	kfree(adapter->ipsec);
+	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
+}
+
+/**
+ * ixgbe_stop_ipsec_offload - tear down the ipsec offload
+ * @adapter: board private structure
+ **/
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ipsec *ipsec = adapter->ipsec;
+
+	adapter->ipsec = NULL;
+	if (ipsec) {
+		kfree(ipsec->ip_tbl);
+		kfree(ipsec->rx_tbl);
+		kfree(ipsec->tx_tbl);
+		kfree(ipsec);
+	}
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 000000000000..da3ce7849e85
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_IPSEC_H_
+#define _IXGBE_IPSEC_H_
+
+#define IXGBE_IPSEC_MAX_SA_COUNT	1024
+#define IXGBE_IPSEC_MAX_RX_IP_COUNT	128
+#define IXGBE_IPSEC_BASE_RX_INDEX	0
+#define IXGBE_IPSEC_BASE_TX_INDEX	IXGBE_IPSEC_MAX_SA_COUNT
+
+#define IXGBE_RXTXIDX_IPS_EN		0x00000001
+#define IXGBE_RXIDX_TBL_SHIFT		1
+enum ixgbe_ipsec_tbl_sel {
+	ips_rx_ip_tbl	=	0x01,
+	ips_rx_spi_tbl	=	0x02,
+	ips_rx_key_tbl	=	0x03,
+};
+
+#define IXGBE_RXTXIDX_IDX_SHIFT		3
+#define IXGBE_RXTXIDX_READ		0x40000000
+#define IXGBE_RXTXIDX_WRITE		0x80000000
+
+#define IXGBE_RXMOD_VALID		0x00000001
+#define IXGBE_RXMOD_PROTO_ESP		0x00000004
+#define IXGBE_RXMOD_DECRYPT		0x00000008
+#define IXGBE_RXMOD_IPV6		0x00000010
+
+struct rx_sa {
+	struct hlist_node hlist;
+	struct xfrm_state *xs;
+	__be32 ipaddr[4];
+	u32 key[4];
+	u32 salt;
+	u32 mode;
+	u8  iptbl_ind;
+	bool used;
+	bool decrypt;
+};
+
+struct rx_ip_sa {
+	__be32 ipaddr[4];
+	u32 ref_cnt;
+	bool used;
+};
+
+struct tx_sa {
+	struct xfrm_state *xs;
+	u32 key[4];
+	u32 salt;
+	bool encrypt;
+	bool used;
+};
+
+struct ixgbe_ipsec_tx_data {
+	u32 flags;
+	u16 trailer_len;
+	u16 sa_idx;
+};
+
+struct ixgbe_ipsec {
+	u16 num_rx_sa;
+	u16 num_tx_sa;
+	struct rx_ip_sa *ip_tbl;
+	struct rx_sa *rx_tbl;
+	struct tx_sa *tx_tbl;
+	DECLARE_HASHTABLE(rx_sa_list, 10);
+};
+#endif /* _IXGBE_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957aca18..4242f0213e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
 #endif /* IXGBE_FCOE */
 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 	int i;
-	u16 reg_idx;
-	u8 tcs = netdev_get_num_tc(adapter->netdev);
+	u16 reg_idx, pool;
+	u8 tcs = adapter->hw_tcs;
 
 	/* verify we have DCB queueing enabled before proceeding */
 	if (tcs <= 1)
@@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
 		return false;
 
 	/* start at VMDq register offset for SR-IOV enabled setups */
+	pool = 0;
 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
-	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
 		/* If we are greater than indices move to next pool */
-		if ((reg_idx & ~vmdq->mask) >= tcs)
+		if ((reg_idx & ~vmdq->mask) >= tcs) {
+			pool++;
 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+		}
 		adapter->rx_ring[i]->reg_idx = reg_idx;
+		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
 	}
 
 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
 			adapter->rx_ring[i]->reg_idx = reg_idx;
+			adapter->rx_ring[i]->netdev = adapter->netdev;
 			reg_idx++;
 		}
 
@@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
 				    unsigned int *tx, unsigned int *rx)
 {
-	struct net_device *dev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u8 num_tcs = netdev_get_num_tc(dev);
+	u8 num_tcs = adapter->hw_tcs;
 
 	*tx = 0;
 	*rx = 0;
@@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
  **/
 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 {
-	struct net_device *dev = adapter->netdev;
+	u8 num_tcs = adapter->hw_tcs;
 	unsigned int tx_idx, rx_idx;
 	int tc, offset, rss_i, i;
-	u8 num_tcs = netdev_get_num_tc(dev);
 
 	/* verify we have DCB queueing enabled before proceeding */
 	if (num_tcs <= 1)
@@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
 			adapter->tx_ring[offset + i]->dcb_tc = tc;
 			adapter->rx_ring[offset + i]->dcb_tc = tc;
 		}
@@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 #endif /* IXGBE_FCOE */
 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+	u16 reg_idx, pool;
 	int i;
-	u16 reg_idx;
 
 	/* only proceed if VMDq is enabled */
 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
 		return false;
 
 	/* start at VMDq register offset for SR-IOV enabled setups */
+	pool = 0;
 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
 #ifdef IXGBE_FCOE
@@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 			break;
 #endif
 		/* If we are greater than indices move to next pool */
-		if ((reg_idx & ~vmdq->mask) >= rss->indices)
+		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
+			pool++;
 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+		}
 		adapter->rx_ring[i]->reg_idx = reg_idx;
+		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
 	}
 
 #ifdef IXGBE_FCOE
 	/* FCoE uses a linear block of queues so just assigning 1:1 */
-	for (; i < adapter->num_rx_queues; i++, reg_idx++)
+	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
 		adapter->rx_ring[i]->reg_idx = reg_idx;
+		adapter->rx_ring[i]->netdev = adapter->netdev;
+	}
 
 #endif
 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 {
 	int i, reg_idx;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
+	for (i = 0; i < adapter->num_rx_queues; i++) {
 		adapter->rx_ring[i]->reg_idx = i;
+		adapter->rx_ring[i]->netdev = adapter->netdev;
+	}
 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
 		adapter->tx_ring[i]->reg_idx = reg_idx;
 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
@@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
 #ifdef IXGBE_FCOE
 	u16 fcoe_i = 0;
 #endif
-	u8 tcs = netdev_get_num_tc(adapter->netdev);
+	u8 tcs = adapter->hw_tcs;
 
 	/* verify we have DCB queueing enabled before proceeding */
 	if (tcs <= 1)
@@ -350,6 +362,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 		return false;
 
+	/* limit VMDq instances on the PF by number of Tx queues */
+	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
 	/* Add starting offset to total pool count */
 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 
@@ -437,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 	int tcs;
 
 	/* Map queue offset and counts onto allocated tx queues */
-	tcs = netdev_get_num_tc(dev);
+	tcs = adapter->hw_tcs;
 
 	/* verify we have DCB queueing enabled before proceeding */
 	if (tcs <= 1)
@@ -512,12 +527,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 #ifdef IXGBE_FCOE
 	u16 fcoe_i = 0;
 #endif
-	bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
 
 	/* only proceed if SR-IOV is enabled */
 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 		return false;
 
+	/* limit l2fwd RSS based on total Tx queue limit */
+	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
 	/* Add starting offset to total pool count */
 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 
@@ -525,7 +542,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 
 	/* 64 pool mode with 2 queues per pool */
-	if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+	if (vmdq_i > 32) {
 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
 		rss_m = IXGBE_RSS_2Q_MASK;
 		rss_i = min_t(u16, rss_i, 2);
@@ -602,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 	}
 
 #endif
+	/* populate TC0 for use by pool 0 */
+	netdev_set_tc_queue(adapter->netdev, 0,
+			    adapter->num_rx_queues_per_pool, 0);
+
 	return true;
 }
 
@@ -701,7 +722,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
 	adapter->num_xdp_queues = 0;
-	adapter->num_rx_pools = adapter->num_rx_queues;
+	adapter->num_rx_pools = 1;
 	adapter->num_rx_queues_per_pool = 1;
 
 #ifdef CONFIG_IXGBE_DCB
@@ -834,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 	int node = NUMA_NO_NODE;
 	int cpu = -1;
 	int ring_count, size;
-	u8 tcs = netdev_get_num_tc(adapter->netdev);
+	u8 tcs = adapter->hw_tcs;
 
 	ring_count = txr_count + rxr_count + xdp_count;
 	size = sizeof(struct ixgbe_q_vector) +
@@ -917,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 
 		/* apply Tx specific ring traits */
 		ring->count = adapter->tx_ring_count;
-		if (adapter->num_rx_pools > 1)
-			ring->queue_index =
-				txr_idx % adapter->num_rx_queues_per_pool;
-		else
-			ring->queue_index = txr_idx;
+		ring->queue_index = txr_idx;
 
 		/* assign ring to adapter */
 		adapter->tx_ring[txr_idx] = ring;
@@ -991,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 #endif /* IXGBE_FCOE */
 		/* apply Rx specific ring traits */
 		ring->count = adapter->rx_ring_count;
-		if (adapter->num_rx_pools > 1)
-			ring->queue_index =
-				rxr_idx % adapter->num_rx_queues_per_pool;
-		else
-			ring->queue_index = rxr_idx;
+		ring->queue_index = rxr_idx;
 
 		/* assign ring to adapter */
 		adapter->rx_ring[rxr_idx] = ring;
@@ -1171,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 	 */
 
 	/* Disable DCB unless we only have a single traffic class */
-	if (netdev_get_num_tc(adapter->netdev) > 1) {
+	if (adapter->hw_tcs > 1) {
 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
 		netdev_reset_tc(adapter->netdev);
 
@@ -1183,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 		adapter->dcb_cfg.pfc_mode_enable = false;
 	}
 
+	adapter->hw_tcs = 0;
 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
 
@@ -1268,7 +1282,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
 }
 
 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
-		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
 {
 	struct ixgbe_adv_tx_context_desc *context_desc;
 	u16 i = tx_ring->next_to_use;
@@ -1282,7 +1296,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 
 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
-	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
+	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62a18914f00f..0da5aa2c8aba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
 
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+	return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
 					  u32 reg, u16 *value)
 {
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
 
 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 {
-	struct ixgbe_adapter *adapter;
-	struct ixgbe_hw *hw;
-	u32 head, tail;
+	unsigned int head, tail;
 
-	if (ring->l2_accel_priv)
-		adapter = ring->l2_accel_priv->real_adapter;
-	else
-		adapter = netdev_priv(ring->netdev);
+	head = ring->next_to_clean;
+	tail = ring->next_to_use;
 
-	hw = &adapter->hw;
-	head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
-	tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
-
-	if (head != tail)
-		return (head < tail) ?
-			tail - head : (tail + ring->count - head);
-
-	return 0;
+	return ((head <= tail) ? tail : tail + ring->count) - head;
 }
 
 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -1133,6 +1128,9 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate
  **/
 static int ixgbe_tx_maxrate(struct net_device *netdev,
 			    int queue_index, u32 maxrate)
@@ -1173,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 	struct ixgbe_adapter *adapter = q_vector->adapter;
 	struct ixgbe_tx_buffer *tx_buffer;
 	union ixgbe_adv_tx_desc *tx_desc;
-	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
 	unsigned int budget = q_vector->tx.work_limit;
 	unsigned int i = tx_ring->next_to_clean;
 
@@ -1204,6 +1202,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 		/* update the statistics for this packet */
 		total_bytes += tx_buffer->bytecount;
 		total_packets += tx_buffer->gso_segs;
+		if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
+			total_ipsec++;
 
 		/* free the skb */
 		if (ring_is_xdp(tx_ring))
@@ -1266,6 +1266,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 	u64_stats_update_end(&tx_ring->syncp);
 	q_vector->tx.total_bytes += total_bytes;
 	q_vector->tx.total_packets += total_packets;
+	adapter->tx_ipsec += total_ipsec;
 
 	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
 		/* schedule immediate reset if we believe we hung */
@@ -1754,9 +1755,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 	}
 
-	skb_record_rx_queue(skb, rx_ring->queue_index);
+	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
+		ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
 
 	skb->protocol = eth_type_trans(skb, dev);
+
+	/* record Rx queue, or update MACVLAN statistics */
+	if (netif_is_ixgbe(dev))
+		skb_record_rx_queue(skb, rx_ring->queue_index);
+	else
+		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+				 (skb->pkt_type == PACKET_BROADCAST) ||
+				 (skb->pkt_type == PACKET_MULTICAST));
 }
 
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1921,10 +1931,13 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
 	if (IS_ERR(skb))
 		return true;
 
-	/* verify that the packet does not have any known errors */
-	if (unlikely(ixgbe_test_staterr(rx_desc,
-					IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
-	    !(netdev->features & NETIF_F_RXALL))) {
+	/* Verify netdev is present, and that packet does not have any
+	 * errors that would be unacceptable to the netdev.
+	 */
+	if (!netdev ||
+	    (unlikely(ixgbe_test_staterr(rx_desc,
+					 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+	     !(netdev->features & NETIF_F_RXALL)))) {
 		dev_kfree_skb_any(skb);
 		return true;
 	}
@@ -2021,8 +2034,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
+ * @size: size of data in rx_buffer
  *
  * This function will add the data contained in rx_buffer->page to the skb.
  * This is done either through a direct copy if the data in the buffer is
@@ -2318,12 +2331,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 #endif /* IXGBE_FCOE */
 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 	bool xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.rxq = &rx_ring->xdp_rxq;
 
 	while (likely(total_rx_packets < budget)) {
 		union ixgbe_adv_rx_desc *rx_desc;
 		struct ixgbe_rx_buffer *rx_buffer;
 		struct sk_buff *skb;
-		struct xdp_buff xdp;
 		unsigned int size;
 
 		/* return some buffers to hardware, one at a time is too slow */
@@ -2515,13 +2530,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 }
 
-enum latency_range {
-	lowest_latency = 0,
-	low_latency = 1,
-	bulk_latency = 2,
-	latency_invalid = 255
-};
-
 /**
  * ixgbe_update_itr - update the dynamic ITR value based on statistics
  * @q_vector: structure containing interrupt and ring information
@@ -2534,8 +2542,6 @@ enum latency_range {
  *      based on theoretical maximum wire speed and thresholds were set based
  *      on testing data as well as attempting to minimize response time
  *      while increasing bulk throughput.
- *      this functionality is controlled by the InterruptThrottleRate module
- *      parameter (see ixgbe_param.c)
  **/
 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
 			     struct ixgbe_ring_container *ring_container)
@@ -3020,6 +3026,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
 /**
  * ixgbe_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
+ * @queues: enable irqs for queues
+ * @flush: flush register write
  **/
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
 				    bool flush)
@@ -3475,6 +3483,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ * @adapter: board private structure
  *
  **/
 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
@@ -3586,7 +3595,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 rttdcs, mtqc;
-	u8 tcs = netdev_get_num_tc(adapter->netdev);
+	u8 tcs = adapter->hw_tcs;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		return;
@@ -3853,16 +3862,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
 	u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 vfreta = 0;
-	unsigned int pf_pool = adapter->num_vfs;
 
 	/* Write redirection table to HW */
 	for (i = 0; i < reta_entries; i++) {
+		u16 pool = adapter->num_rx_pools;
+
 		vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
-		if ((i & 3) == 3) {
-			IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+		if ((i & 3) != 3)
+			continue;
+
+		while (pool--)
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
 					vfreta);
-			vfreta = 0;
-		}
+		vfreta = 0;
 	}
 }
 
@@ -3899,13 +3912,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
-	unsigned int pf_pool = adapter->num_vfs;
 	int i, j;
 
 	/* Fill out hash function seeds */
-	for (i = 0; i < 10; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
-				*(adapter->rss_key + i));
+	for (i = 0; i < 10; i++) {
+		u16 pool = adapter->num_rx_pools;
+
+		while (pool--)
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+					*(adapter->rss_key + i));
+	}
 
 	/* Fill out the redirection table */
 	for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3933,7 +3950,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 		if (adapter->ring_feature[RING_F_RSS].mask)
 			mrqc = IXGBE_MRQC_RSSEN;
 	} else {
-		u8 tcs = netdev_get_num_tc(adapter->netdev);
+		u8 tcs = adapter->hw_tcs;
 
 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 			if (tcs > 4)
@@ -3971,7 +3988,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 
 	if ((hw->mac.type >= ixgbe_mac_X550) &&
 	    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
-		unsigned int pf_pool = adapter->num_vfs;
+		u16 pool = adapter->num_rx_pools;
 
 		/* Enable VF RSS mode */
 		mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3981,7 +3998,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 		ixgbe_setup_vfreta(adapter);
 		vfmrqc = IXGBE_MRQC_RSSEN;
 		vfmrqc |= rss_field;
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+		while (pool--)
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PFVFMRQC(VMDQ_P(pool)),
+					vfmrqc);
 	} else {
 		ixgbe_setup_reta(adapter);
 		mrqc |= rss_field;
@@ -3991,8 +4012,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
- * @adapter:    address of board private structure
- * @index:      index of ring to set
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
  **/
 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
 				   struct ixgbe_ring *ring)
@@ -4112,11 +4133,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 		rxdctl &= ~0x3FFFFF;
 		rxdctl |=  0x080420;
 #if (PAGE_SIZE < 8192)
-	} else {
+	/* RXDCTL.RLPML does not work on 82599 */
+	} else if (hw->mac.type != ixgbe_mac_82599EB) {
 		rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
 			    IXGBE_RXDCTL_RLPML_EN);
 
-		/* Limit the maximum frame size so we don't overrun the skb */
+		/* Limit the maximum frame size so we don't overrun the skb.
+		 * This can happen in SRIOV mode when the MTU of the VF is
+		 * higher than the MTU of the PF.
+		 */
 		if (ring_uses_build_skb(ring) &&
 		    !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 			rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
@@ -4144,7 +4169,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int rss_i = adapter->ring_feature[RING_F_RSS].indices;
-	u16 pool;
+	u16 pool = adapter->num_rx_pools;
 
 	/* PSRTYPE must be initialized in non 82598 adapters */
 	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4161,7 +4186,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
 	else if (rss_i > 1)
 		psrtype |= 1u << 29;
 
-	for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+	while (pool--)
 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
 
@@ -4488,8 +4513,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			struct ixgbe_ring *ring = adapter->rx_ring[i];
 
-			if (ring->l2_accel_priv)
+			if (!netif_is_ixgbe(ring->netdev))
 				continue;
+
 			j = ring->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 			vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4525,8 +4551,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			struct ixgbe_ring *ring = adapter->rx_ring[i];
 
-			if (ring->l2_accel_priv)
+			if (!netif_is_ixgbe(ring->netdev))
 				continue;
+
 			j = ring->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 			vlnctrl |= IXGBE_RXDCTL_VME;
@@ -4846,9 +4873,11 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
 
 	return -ENOMEM;
 }
+
 /**
  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
  * @netdev: network interface device structure
+ * @vfn: pool to associate with unicast addresses
  *
  * Writes unicast address list to the RAR table.
  * Returns: -ENOMEM on failure/insufficient address space
@@ -5195,7 +5224,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	int num_tc = netdev_get_num_tc(adapter->netdev);
+	int num_tc = adapter->hw_tcs;
 	int i;
 
 	if (!num_tc)
@@ -5218,7 +5247,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int hdrm;
-	u8 tc = netdev_get_num_tc(adapter->netdev);
+	u8 tc = adapter->hw_tcs;
 
 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
 	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -5277,29 +5306,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
 }
 
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
-	struct ixgbe_adapter *adapter = vadapter->real_adapter;
-	int rss_i = adapter->num_rx_queues_per_pool;
-	struct ixgbe_hw *hw = &adapter->hw;
-	u16 pool = vadapter->pool;
-	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
-		      IXGBE_PSRTYPE_UDPHDR |
-		      IXGBE_PSRTYPE_IPV4HDR |
-		      IXGBE_PSRTYPE_L2HDR |
-		      IXGBE_PSRTYPE_IPV6HDR;
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return;
-
-	if (rss_i > 3)
-		psrtype |= 2u << 29;
-	else if (rss_i > 1)
-		psrtype |= 1u << 29;
-
-	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
@@ -5352,96 +5358,45 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 	rx_ring->next_to_use = 0;
 }
 
-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
-				   struct ixgbe_ring *rx_ring)
-{
-	struct ixgbe_adapter *adapter = vadapter->real_adapter;
-	int index = rx_ring->queue_index + vadapter->rx_base_queue;
-
-	/* shutdown specific queue receive and wait for dma to settle */
-	ixgbe_disable_rx_queue(adapter, rx_ring);
-	usleep_range(10000, 20000);
-	ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
-	ixgbe_clean_rx_ring(rx_ring);
-	rx_ring->l2_accel_priv = NULL;
-}
-
-static int ixgbe_fwd_ring_down(struct net_device *vdev,
-			       struct ixgbe_fwd_adapter *accel)
-{
-	struct ixgbe_adapter *adapter = accel->real_adapter;
-	unsigned int rxbase = accel->rx_base_queue;
-	unsigned int txbase = accel->tx_base_queue;
-	int i;
-
-	netif_tx_stop_all_queues(vdev);
-
-	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
-		ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
-		adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
-	}
-
-	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
-		adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
-		adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
-	}
-
-
-	return 0;
-}
-
 static int ixgbe_fwd_ring_up(struct net_device *vdev,
 			     struct ixgbe_fwd_adapter *accel)
 {
 	struct ixgbe_adapter *adapter = accel->real_adapter;
-	unsigned int rxbase, txbase, queues;
-	int i, baseq, err = 0;
+	int i, baseq, err;
 
-	if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+	if (!test_bit(accel->pool, adapter->fwd_bitmask))
 		return 0;
 
 	baseq = accel->pool * adapter->num_rx_queues_per_pool;
-	netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+	netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
 		   accel->pool, adapter->num_rx_pools,
-		   baseq, baseq + adapter->num_rx_queues_per_pool,
-		   adapter->fwd_bitmask);
+		   baseq, baseq + adapter->num_rx_queues_per_pool);
 
 	accel->netdev = vdev;
-	accel->rx_base_queue = rxbase = baseq;
-	accel->tx_base_queue = txbase = baseq;
+	accel->rx_base_queue = baseq;
+	accel->tx_base_queue = baseq;
 
 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
-		ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+		adapter->rx_ring[baseq + i]->netdev = vdev;
 
-	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
-		adapter->rx_ring[rxbase + i]->netdev = vdev;
-		adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
-		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
-	}
+	/* Guarantee all rings are updated before we update the
+	 * MAC address filter.
+	 */
+	wmb();
 
-	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
-		adapter->tx_ring[txbase + i]->netdev = vdev;
-		adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+	/* ixgbe_add_mac_filter will return an index if it succeeds, so we
+	 * need to only treat it as an error value if it is negative.
+	 */
+	err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+				   VMDQ_P(accel->pool));
+	if (err >= 0) {
+		ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+		return 0;
 	}
 
-	queues = min_t(unsigned int,
-		       adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
-	err = netif_set_real_num_tx_queues(vdev, queues);
-	if (err)
-		goto fwd_queue_err;
-
-	err = netif_set_real_num_rx_queues(vdev, queues);
-	if (err)
-		goto fwd_queue_err;
-
-	if (is_valid_ether_addr(vdev->dev_addr))
-		ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+		adapter->rx_ring[baseq + i]->netdev = NULL;
 
-	ixgbe_fwd_psrtype(accel);
-	ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
-	return err;
-fwd_queue_err:
-	ixgbe_fwd_ring_down(vdev, accel);
 	return err;
 }
 
@@ -5480,6 +5435,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
 
 	ixgbe_set_rx_mode(adapter->netdev);
 	ixgbe_restore_vlan(adapter);
+	ixgbe_ipsec_restore(adapter);
 
 	switch (hw->mac.type) {
 	case ixgbe_mac_82599EB:
@@ -5917,21 +5873,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
 	spin_unlock(&adapter->fdir_perfect_lock);
 }
 
-static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
-{
-	if (netif_is_macvlan(upper)) {
-		struct macvlan_dev *vlan = netdev_priv(upper);
-
-		if (vlan->fwd_priv) {
-			netif_tx_stop_all_queues(upper);
-			netif_carrier_off(upper);
-			netif_tx_disable(upper);
-		}
-	}
-
-	return 0;
-}
-
 void ixgbe_down(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -5961,10 +5902,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
-	/* disable any upper devices */
-	netdev_walk_all_upper_dev_rcu(adapter->netdev,
-				      ixgbe_disable_macvlan, NULL);
-
 	ixgbe_irq_disable(adapter);
 
 	ixgbe_napi_disable_all(adapter);
@@ -6119,6 +6056,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
 /**
  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  * @adapter: board private structure to initialize
+ * @ii: pointer to ixgbe_info for device
  *
  * ixgbe_sw_init initializes the Adapter private data structure.
  * Fields are initialized based on PCI device information and
@@ -6153,6 +6091,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 	fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
 	adapter->ring_feature[RING_F_FDIR].limit = fdir;
 	adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+	adapter->ring_feature[RING_F_VMDQ].limit = 1;
 #ifdef CONFIG_IXGBE_DCA
 	adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
 #endif
@@ -6302,7 +6241,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 	}
 
 	/* PF holds first pool slot */
-	set_bit(0, &adapter->fwd_bitmask);
+	set_bit(0, adapter->fwd_bitmask);
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	return 0;
@@ -6402,6 +6341,7 @@ err_setup_tx:
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: pointer to ixgbe_adapter
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
@@ -6444,6 +6384,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
+	/* XDP RX-queue info */
+	if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+			     rx_ring->queue_index) < 0)
+		goto err;
+
 	rx_ring->xdp_prog = adapter->xdp_prog;
 
 	return 0;
@@ -6541,6 +6486,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 	ixgbe_clean_rx_ring(rx_ring);
 
 	rx_ring->xdp_prog = NULL;
+	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 	vfree(rx_ring->rx_buffer_info);
 	rx_ring->rx_buffer_info = NULL;
 
@@ -6646,20 +6592,12 @@ int ixgbe_open(struct net_device *netdev)
 		goto err_req_irq;
 
 	/* Notify the stack of the actual queue counts. */
-	if (adapter->num_rx_pools > 1)
-		queues = adapter->num_rx_queues_per_pool;
-	else
-		queues = adapter->num_tx_queues;
-
+	queues = adapter->num_tx_queues;
 	err = netif_set_real_num_tx_queues(netdev, queues);
 	if (err)
 		goto err_set_queues;
 
-	if (adapter->num_rx_pools > 1 &&
-	    adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
-		queues = IXGBE_MAX_L2A_QUEUES;
-	else
-		queues = adapter->num_rx_queues;
+	queues = adapter->num_rx_queues;
 	err = netif_set_real_num_rx_queues(netdev, queues);
 	if (err)
 		goto err_set_queues;
@@ -6783,7 +6721,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 ctrl, fctrl;
+	u32 ctrl;
 	u32 wufc = adapter->wol;
 #ifdef CONFIG_PM
 	int retval = 0;
@@ -6808,18 +6746,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 		hw->mac.ops.stop_link_on_d3(hw);
 
 	if (wufc) {
+		u32 fctrl;
+
 		ixgbe_set_rx_mode(netdev);
 
 		/* enable the optics for 82599 SFP+ fiber as we can WoL */
 		if (hw->mac.ops.enable_tx_laser)
 			hw->mac.ops.enable_tx_laser(hw);
 
-		/* turn on all-multi mode if wake on multicast is enabled */
-		if (wufc & IXGBE_WUFC_MC) {
-			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-			fctrl |= IXGBE_FCTRL_MPE;
-			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-		}
+		/* enable the reception of multicast packets */
+		fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+		fctrl |= IXGBE_FCTRL_MPE;
+		IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
 		ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7216,7 +7154,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 /**
  * ixgbe_watchdog_update_link - update the link status
  * @adapter: pointer to the device adapter structure
- * @link_speed: pointer to a u32 to store the link_speed
  **/
 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
 {
@@ -7273,18 +7210,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
 #endif
 }
 
-static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
-{
-	if (netif_is_macvlan(upper)) {
-		struct macvlan_dev *vlan = netdev_priv(upper);
-
-		if (vlan->fwd_priv)
-			netif_tx_wake_all_queues(upper);
-	}
-
-	return 0;
-}
-
 /**
  * ixgbe_watchdog_link_is_up - update netif_carrier status and
  *                             print link up message
@@ -7338,6 +7263,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
 	case IXGBE_LINK_SPEED_10GB_FULL:
 		speed_str = "10 Gbps";
 		break;
+	case IXGBE_LINK_SPEED_5GB_FULL:
+		speed_str = "5 Gbps";
+		break;
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
 		speed_str = "2.5 Gbps";
 		break;
@@ -7365,12 +7293,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
 	/* enable transmits */
 	netif_tx_wake_all_queues(adapter->netdev);
 
-	/* enable any upper devices */
-	rtnl_lock();
-	netdev_walk_all_upper_dev_rcu(adapter->netdev,
-				      ixgbe_enable_macvlan, NULL);
-	rtnl_unlock();
-
 	/* update the default user priority for VFs */
 	ixgbe_update_default_up(adapter);
 
@@ -7655,6 +7577,7 @@ sfp_out:
 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	u32 cap_speed;
 	u32 speed;
 	bool autoneg = false;
 
@@ -7667,16 +7590,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 
 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 
-	speed = hw->phy.autoneg_advertised;
-	if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
-		hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+	hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
 
-		/* setup the highest link when no autoneg */
-		if (!autoneg) {
-			if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-				speed = IXGBE_LINK_SPEED_10GB_FULL;
-		}
-	}
+	/* advertise highest capable link speed */
+	if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+		speed = IXGBE_LINK_SPEED_10GB_FULL;
+	else
+		speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+				     IXGBE_LINK_SPEED_1GB_FULL);
 
 	if (hw->mac.ops.setup_link)
 		hw->mac.ops.setup_link(hw, speed, true);
@@ -7688,7 +7609,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list structure
  **/
 static void ixgbe_service_timer(struct timer_list *t)
 {
@@ -7888,10 +7809,12 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
 }
 
 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
-			  struct ixgbe_tx_buffer *first)
+			  struct ixgbe_tx_buffer *first,
+			  struct ixgbe_ipsec_tx_data *itd)
 {
 	struct sk_buff *skb = first->skb;
 	u32 vlan_macip_lens = 0;
+	u32 fceof_saidx = 0;
 	u32 type_tucmd = 0;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -7932,7 +7855,12 @@ no_csum:
 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
-	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
+	if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
+		fceof_saidx |= itd->sa_idx;
+		type_tucmd |= itd->flags | itd->trailer_len;
+	}
+
+	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
 }
 
 #define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7975,11 +7903,16 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
 					IXGBE_TX_FLAGS_CSUM,
 					IXGBE_ADVTXD_POPTS_TXSM);
 
-	/* enble IPv4 checksum for TSO */
+	/* enable IPv4 checksum for TSO */
 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
 					IXGBE_TX_FLAGS_IPV4,
 					IXGBE_ADVTXD_POPTS_IXSM);
 
+	/* enable IPsec */
+	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+					IXGBE_TX_FLAGS_IPSEC,
+					IXGBE_ADVTXD_POPTS_IPSEC);
+
 	/*
 	 * Check Context must be set if Tx switch is enabled, which it
 	 * always is for case where virtual functions are running
@@ -8332,14 +8265,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
 			      void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
-#ifdef IXGBE_FCOE
 	struct ixgbe_adapter *adapter;
-	struct ixgbe_ring_feature *f;
 	int txq;
+#ifdef IXGBE_FCOE
+	struct ixgbe_ring_feature *f;
 #endif
 
-	if (fwd_adapter)
-		return skb->queue_mapping + fwd_adapter->tx_base_queue;
+	if (fwd_adapter) {
+		adapter = netdev_priv(dev);
+		txq = reciprocal_scale(skb_get_hash(skb),
+				       adapter->num_rx_queues_per_pool);
+
+		return txq + fwd_adapter->tx_base_queue;
+	}
 
 #ifdef IXGBE_FCOE
 
@@ -8438,6 +8376,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 	u32 tx_flags = 0;
 	unsigned short f;
 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+	struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
 	__be16 protocol = skb->protocol;
 	u8 hdr_len = 0;
 
@@ -8542,11 +8481,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 	}
 
 #endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_XFRM_OFFLOAD
+	if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+		goto out_drop;
+#endif
 	tso = ixgbe_tso(tx_ring, first, &hdr_len);
 	if (tso < 0)
 		goto out_drop;
 	else if (!tso)
-		ixgbe_tx_csum(tx_ring, first);
+		ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
 
 	/* add the ATR filter if ATR is on */
 	if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
@@ -8671,7 +8615,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
 /**
  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  * netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
  *
  * Returns non-zero on failure
  **/
@@ -8695,7 +8639,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
 /**
  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
  * netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
  *
  * Returns non-zero on failure
  **/
@@ -8862,14 +8806,13 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
 /**
  * ixgbe_setup_tc - configure net_device for multiple traffic classes
  *
- * @netdev: net device to configure
+ * @dev: net device to configure
  * @tc: number of traffic classes to enable
  */
 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	bool pools;
 
 	/* Hardware supports up to 8 traffic classes */
 	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8878,10 +8821,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 	if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
 		return -EINVAL;
 
-	pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
-	if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
-		return -EBUSY;
-
 	/* Hardware has to reinitialize queues and interrupts to
 	 * match packet buffer alignment. Unfortunately, the
 	 * hardware is not flexible enough to do this dynamically.
@@ -8898,6 +8837,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 		netdev_set_num_tc(dev, tc);
 		ixgbe_set_prio_tc_map(adapter);
 
+		adapter->hw_tcs = tc;
 		adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
 
 		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
@@ -8907,10 +8847,19 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 	} else {
 		netdev_reset_tc(dev);
 
+		/* To support macvlan offload we have to use num_tc to
+		 * restrict the queues that can be used by the device.
+		 * By doing this we can avoid reporting a false number of
+		 * queues.
+		 */
+		if (!tc && adapter->num_rx_pools > 1)
+			netdev_set_num_tc(dev, 1);
+
 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
 
 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+		adapter->hw_tcs = tc;
 
 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
 		adapter->dcb_cfg.pfc_mode_enable = false;
@@ -9044,6 +8993,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
 				  u8 *queue, u64 *action)
 {
+	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 	unsigned int num_vfs = adapter->num_vfs, vf;
 	struct upper_walk_data data;
 	struct net_device *upper;
@@ -9052,11 +9002,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
 	for (vf = 0; vf < num_vfs; ++vf) {
 		upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
 		if (upper->ifindex == ifindex) {
-			if (adapter->num_rx_pools > 1)
-				*queue = vf * 2;
-			else
-				*queue = vf * adapter->num_rx_queues_per_pool;
-
+			*queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
 			*action = vf + 1;
 			*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
 			return 0;
@@ -9101,9 +9047,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
 
 		/* Redirect to a VF or a offloaded macvlan */
 		if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
+			struct net_device *dev = tcf_mirred_dev(a);
 
-			err = handle_redirect_action(adapter, ifindex, queue,
+			if (!dev)
+				return -EINVAL;
+			err = handle_redirect_action(adapter, dev->ifindex, queue,
 						     action);
 			if (err == 0)
 				return err;
@@ -9362,9 +9310,6 @@ free_jump:
 static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
 				  struct tc_cls_u32_offload *cls_u32)
 {
-	if (cls_u32->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_u32->command) {
 	case TC_CLSU32_NEW_KNODE:
 	case TC_CLSU32_REPLACE_KNODE:
@@ -9386,7 +9331,7 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 {
 	struct ixgbe_adapter *adapter = cb_priv;
 
-	if (!tc_can_offload(adapter->netdev))
+	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -9444,7 +9389,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
 	struct net_device *netdev = adapter->netdev;
 
 	rtnl_lock();
-	ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+	ixgbe_setup_tc(netdev, adapter->hw_tcs);
 	rtnl_unlock();
 }
 
@@ -9520,7 +9465,7 @@ static int ixgbe_set_features(struct net_device *netdev,
 		/* We cannot enable ATR if SR-IOV is enabled */
 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
 		    /* We cannot enable ATR if we have 2 or more tcs */
-		    (netdev_get_num_tc(netdev) > 1) ||
+		    (adapter->hw_tcs > 1) ||
 		    /* We cannot enable ATR if RSS is disabled */
 		    (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
 		    /* A sample rate of 0 indicates ATR disabled */
@@ -9695,8 +9640,8 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
 /**
  * ixgbe_configure_bridge_mode - set various bridge modes
- * @adapter - the private structure
- * @mode - requested bridge mode
+ * @adapter: the private structure
+ * @mode: requested bridge mode
  *
  * Configure some settings require for various bridge modes.
  **/
@@ -9821,6 +9766,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	struct ixgbe_fwd_adapter *fwd_adapter = NULL;
 	struct ixgbe_adapter *adapter = netdev_priv(pdev);
 	int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+	int tcs = adapter->hw_tcs ? : 1;
 	unsigned int limit;
 	int pool, err;
 
@@ -9831,24 +9777,8 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
 		return ERR_PTR(-EINVAL);
 
-#ifdef CONFIG_RPS
-	if (vdev->num_rx_queues != vdev->num_tx_queues) {
-		netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
-			    vdev->name);
-		return ERR_PTR(-EINVAL);
-	}
-#endif
-	/* Check for hardware restriction on number of rx/tx queues */
-	if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
-	    vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
-		netdev_info(pdev,
-			    "%s: Supports RX/TX Queue counts 1,2, and 4\n",
-			    pdev->name);
-		return ERR_PTR(-EINVAL);
-	}
-
 	if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
-	      adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+	      adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
 	    (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
 		return ERR_PTR(-EBUSY);
 
@@ -9856,53 +9786,68 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	if (!fwd_adapter)
 		return ERR_PTR(-ENOMEM);
 
-	pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
-	adapter->num_rx_pools++;
-	set_bit(pool, &adapter->fwd_bitmask);
-	limit = find_last_bit(&adapter->fwd_bitmask, 32);
+	pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+	set_bit(pool, adapter->fwd_bitmask);
+	limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
 
 	/* Enable VMDq flag so device will be set in VM mode */
 	adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
 	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
-	adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
 
-	/* Force reinit of ring allocation with VMDQ enabled */
-	err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
-	if (err)
-		goto fwd_add_err;
 	fwd_adapter->pool = pool;
 	fwd_adapter->real_adapter = adapter;
 
-	if (netif_running(pdev)) {
+	/* Force reinit of ring allocation with VMDQ enabled */
+	err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+
+	if (!err && netif_running(pdev))
 		err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
-		if (err)
-			goto fwd_add_err;
-		netif_tx_start_all_queues(vdev);
-	}
 
-	return fwd_adapter;
-fwd_add_err:
+	if (!err)
+		return fwd_adapter;
+
 	/* unwind counter and free adapter struct */
 	netdev_info(pdev,
 		    "%s: dfwd hardware acceleration failed\n", vdev->name);
-	clear_bit(pool, &adapter->fwd_bitmask);
-	adapter->num_rx_pools--;
+	clear_bit(pool, adapter->fwd_bitmask);
 	kfree(fwd_adapter);
 	return ERR_PTR(err);
 }
 
 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 {
-	struct ixgbe_fwd_adapter *fwd_adapter = priv;
-	struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
-	unsigned int limit;
+	struct ixgbe_fwd_adapter *accel = priv;
+	struct ixgbe_adapter *adapter = accel->real_adapter;
+	unsigned int rxbase = accel->rx_base_queue;
+	unsigned int limit, i;
+
+	/* delete unicast filter associated with offloaded interface */
+	ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
+			     VMDQ_P(accel->pool));
+
+	/* disable ability to receive packets for this pool */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
 
-	clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
-	adapter->num_rx_pools--;
+	/* Allow remaining Rx packets to get flushed out of the
+	 * Rx FIFO before we drop the netdev for the ring.
+	 */
+	usleep_range(10000, 20000);
 
-	limit = find_last_bit(&adapter->fwd_bitmask, 32);
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+		struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
+		struct ixgbe_q_vector *qv = ring->q_vector;
+
+		/* Make sure we aren't processing any packets and clear
+		 * netdev to shut down the ring.
+		 */
+		if (netif_running(adapter->netdev))
+			napi_synchronize(&qv->napi);
+		ring->netdev = NULL;
+	}
+
+	clear_bit(accel->pool, adapter->fwd_bitmask);
+	limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
 	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
-	ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
 
 	/* go back to full RSS if we're done with our VMQs */
 	if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
@@ -9914,13 +9859,13 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 		adapter->ring_feature[RING_F_RSS].limit = rss;
 	}
 
-	ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
-	netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
-		   fwd_adapter->pool, adapter->num_rx_pools,
-		   fwd_adapter->rx_base_queue,
-		   fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
-		   adapter->fwd_bitmask);
-	kfree(fwd_adapter);
+	ixgbe_setup_tc(pdev, adapter->hw_tcs);
+	netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
+		   accel->pool, adapter->num_rx_pools,
+		   accel->rx_base_queue,
+		   accel->rx_base_queue +
+		   adapter->num_rx_queues_per_pool);
+	kfree(accel);
 }
 
 #define IXGBE_MAX_MAC_HDR_LEN		127
@@ -9954,6 +9899,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
 		features &= ~NETIF_F_TSO;
 
+#ifdef CONFIG_XFRM_OFFLOAD
+	/* IPsec offload doesn't get along well with others *yet* */
+	if (skb->sp)
+		features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
+#endif
+
 	return features;
 }
 
@@ -9987,7 +9938,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
 
 	/* If transitioning XDP modes reconfigure rings */
 	if (!!prog != !!old_prog) {
-		int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+		int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
 
 		if (err) {
 			rcu_assign_pointer(adapter->xdp_prog, old_prog);
@@ -10164,7 +10115,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
  * ixgbe_wol_supported - Check whether device supports WoL
  * @adapter: the adapter private structure
  * @device_id: the device ID
- * @subdev_id: the subsystem device ID
+ * @subdevice_id: the subsystem device ID
  *
  * This function is used by probe and ethtool to determine
  * which devices have WoL support
@@ -10233,6 +10184,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
 }
 
 /**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_nvm_version nvm_ver;
+
+	ixgbe_get_oem_prod_version(hw, &nvm_ver);
+	if (nvm_ver.oem_valid) {
+		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+			 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+			 nvm_ver.oem_release);
+		return;
+	}
+
+	ixgbe_get_etk_id(hw, &nvm_ver);
+	ixgbe_get_orom_version(hw, &nvm_ver);
+
+	if (nvm_ver.or_valid) {
+		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+			 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+			 nvm_ver.or_build, nvm_ver.or_patch);
+		return;
+	}
+
+	/* Set ETrack ID format */
+	snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+		 "0x%08x", nvm_ver.etk_id);
+}
+
+/**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
  * @ent: entry in ixgbe_pci_tbl
@@ -10508,6 +10494,7 @@ skip_sriov:
 					 NETIF_F_FCOE_MTU;
 	}
 #endif /* IXGBE_FCOE */
+	ixgbe_init_ipsec_offload(adapter);
 
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
 		netdev->hw_features |= NETIF_F_LRO;
@@ -10568,8 +10555,7 @@ skip_sriov:
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
 	/* save off EEPROM version number */
-	hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
-	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+	ixgbe_set_fw_version(adapter);
 
 	/* pick up the PCI bus settings for reporting later */
 	if (ixgbe_pcie_from_parent(hw))
@@ -10744,6 +10730,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(netdev);
 
+	ixgbe_stop_ipsec_offload(adapter);
 	ixgbe_clear_interrupt_scheme(adapter);
 
 	ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 654a402f0e9e..91bde90f9265 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -378,7 +378,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
 
 /**
  *  ixgbe_get_phy_type_from_id - Get the phy type
- *  @hw: pointer to hardware structure
+ *  @phy_id: hardware phy id
  *
  **/
 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
@@ -489,6 +489,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
  *  the SWFW lock
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit address of PHY register to read
+ *  @device_type: 5 bit device type
  *  @phy_data: Pointer to read data from PHY register
  **/
 s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
@@ -564,6 +565,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
  *  using the SWFW lock - this function is needed in most cases
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit address of PHY register to read
+ *  @device_type: 5 bit device type
  *  @phy_data: Pointer to read data from PHY register
  **/
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -763,6 +765,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
  *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
+ *  @autoneg_wait_to_complete: unused
  **/
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 				       ixgbe_link_speed speed,
@@ -861,6 +864,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
 /**
  *  ixgbe_check_phy_link_tnx - Determine link and speed status
  *  @hw: pointer to hardware structure
+ *  @speed: link speed
+ *  @link_up: status of link
  *
  *  Reads the VS1 register to determine if link is up and the current speed for
  *  the PHY.
@@ -1667,7 +1672,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset at address 0xA2
- *  @eeprom_data: value read
+ *  @sff8472_data: value read
  *
  *  Performs byte read operation to SFP module's SFF-8472 data over I2C
  **/
@@ -1714,6 +1719,7 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
  *  ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
  *  @data: value read
  *  @lock: true if to take and release semaphore
  *
@@ -1804,6 +1810,7 @@ fail:
  *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
  *  @data: value read
  *
  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1820,6 +1827,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
  *  @data: value read
  *
  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1836,6 +1844,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
  *  ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to write
+ *  @dev_addr: device address
  *  @data: value to write
  *  @lock: true if to take and release semaphore
  *
@@ -1904,6 +1913,7 @@ fail:
  *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to write
+ *  @dev_addr: device address
  *  @data: value to write
  *
  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -1920,6 +1930,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to write
+ *  @dev_addr: device address
  *  @data: value to write
  *
  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ae312c45696a..f6cc9166082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -166,7 +166,7 @@
 
 /**
  * ixgbe_ptp_setup_sdp_x540
- * @hw: the hardware private structure
+ * @adapter: private adapter structure
  *
  * this function enables or disables the clock out feature on SDP0 for
  * the X540 device. It will create a 1second periodic output that can
@@ -299,7 +299,7 @@ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
  * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
  * @adapter: private adapter structure
  * @hwtstamp: stack timestamp structure
- * @systim: unsigned 64bit system time value
+ * @timestamp: unsigned 64bit system time value
  *
  * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
  * which can be used by the stack's ptp functions.
@@ -1015,7 +1015,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
 /**
  * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
  * @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * @ifr: ioctl data
  *
  * Set hardware to requested mode. If unsupported, return an error with no
  * changes. Otherwise, store the mode for future reference.
@@ -1338,7 +1338,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_ptp_suspend - stop PTP work items
- * @ adapter: pointer to adapter struct
+ * @adapter: pointer to adapter struct
  *
  * this function suspends PTP activity, and prevents more PTP work from being
  * generated, but does not destroy the PTP clock device.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c6c9ce..27a70a52f3c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -78,12 +78,9 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
 	struct ixgbe_hw *hw = &adapter->hw;
 	int i;
 
-	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
-
 	/* Enable VMDq flag so device will be set in VM mode */
-	adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
-	if (!adapter->ring_feature[RING_F_VMDQ].limit)
-		adapter->ring_feature[RING_F_VMDQ].limit = 1;
+	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
+			  IXGBE_FLAG_VMDQ_ENABLED;
 
 	/* Allocate memory for per VF control structures */
 	adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
@@ -227,9 +224,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 {
 	unsigned int num_vfs = adapter->num_vfs, vf;
-	struct ixgbe_hw *hw = &adapter->hw;
-	u32 gpie;
-	u32 vmdctl;
 	int rss;
 
 	/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +265,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 	pci_disable_sriov(adapter->pdev);
 #endif
 
-	/* turn off device IOV mode */
-	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
-	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
-	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
-	/* set default pool back to 0 */
-	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
-	IXGBE_WRITE_FLUSH(hw);
-
 	/* Disable VMDq flag so device will be set in VM mode */
 	if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
 		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +287,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 {
 #ifdef CONFIG_PCI_IOV
 	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
-	int err = 0;
-	u8 num_tc;
-	int i;
 	int pre_existing_vfs = pci_num_vf(dev);
+	int err = 0, num_rx_pools, i, limit;
+	u8 num_tc;
 
 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 		err = ixgbe_disable_sriov(adapter);
@@ -330,23 +311,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 	 * than we have available pools. The PCI bus driver already checks for
 	 * other values out of range.
 	 */
-	num_tc = netdev_get_num_tc(adapter->netdev);
-
-	if (num_tc > 4) {
-		if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
-			e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
-			return -EPERM;
-		}
-	} else if ((num_tc > 1) && (num_tc <= 4)) {
-		if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
-			e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
-			return -EPERM;
-		}
-	} else {
-		if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
-			e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
-			return -EPERM;
-		}
+	num_tc = adapter->hw_tcs;
+	num_rx_pools = adapter->num_rx_pools;
+	limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+		(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
+
+	if (num_vfs > (limit - num_rx_pools)) {
+		e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+			  num_tc, num_rx_pools - 1, limit - num_rx_pools);
+		return -EPERM;
 	}
 
 	err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +351,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 	int err;
 #ifdef CONFIG_PCI_IOV
 	u32 current_flags = adapter->flags;
+	int prev_num_vf = pci_num_vf(dev);
 #endif
 
 	err = ixgbe_disable_sriov(adapter);
 
 	/* Only reinit if no error and state changed */
 #ifdef CONFIG_PCI_IOV
-	if (!err && current_flags != adapter->flags)
+	if (!err && (current_flags != adapter->flags ||
+		     prev_num_vf != pci_num_vf(dev)))
 		ixgbe_sriov_reinit(adapter);
 #endif
 
@@ -738,7 +713,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
-	u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+	u8 num_tcs = adapter->hw_tcs;
 
 	/* remove VLAN filters beloning to this VF */
 	ixgbe_clear_vf_vlans(adapter, vf);
@@ -946,7 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
 {
 	u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
 	u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
-	u8 tcs = netdev_get_num_tc(adapter->netdev);
+	u8 tcs = adapter->hw_tcs;
 
 	if (adapter->vfinfo[vf].pf_vlan || tcs) {
 		e_warn(drv,
@@ -1034,7 +1009,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
 	struct net_device *dev = adapter->netdev;
 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 	unsigned int default_tc = 0;
-	u8 num_tcs = netdev_get_num_tc(dev);
+	u8 num_tcs = adapter->hw_tcs;
 
 	/* verify the PF is supporting the correct APIs */
 	switch (adapter->vfinfo[vf].vf_api) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5cd0f5..ca45359686d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
 	struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
 };
 
+#define NVM_OROM_OFFSET		0x17
+#define NVM_OROM_BLK_LOW	0x83
+#define NVM_OROM_BLK_HI		0x84
+#define NVM_OROM_PATCH_MASK	0xFF
+#define NVM_OROM_SHIFT		8
+
+#define NVM_VER_MASK		0x00FF	/* version mask */
+#define NVM_VER_SHIFT		8	/* version bit shift */
+#define NVM_OEM_PROD_VER_PTR	0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L	0x2  /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H	0x3  /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW		0x2D /* version low order word */
+#define NVM_ETK_OFF_HI		0x2E /* version high order word */
+#define NVM_ETK_SHIFT		16   /* high version word shift */
+#define NVM_VER_INVALID		0xFFFF
+#define NVM_ETK_VALID		0x8000
+#define NVM_INVALID_PTR		0xFFFF
+#define NVM_VER_SIZE		32   /* version sting size */
+
+struct ixgbe_nvm_version {
+	u32 etk_id;
+	u8  nvm_major;
+	u16 nvm_minor;
+	u8  nvm_id;
+
+	bool oem_valid;
+	u8   oem_major;
+	u8   oem_minor;
+	u16  oem_release;
+
+	bool or_valid;
+	u8  or_major;
+	u16 or_build;
+	u8  or_patch;
+};
+
 /* Interrupt Registers */
 #define IXGBE_EICR      0x00800
 #define IXGBE_EICS      0x00808
@@ -2321,11 +2360,6 @@ enum {
 #define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
 #define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
 
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP                  0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED            0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK               0x18000000
 /* Multiple Transmit Queue Command Register */
 #define IXGBE_MTQC_RT_ENA       0x1 /* DCB Enable */
 #define IXGBE_MTQC_VT_ENA       0x2 /* VMDQ2 Enable */
@@ -2377,6 +2411,9 @@ enum {
 #define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
 #define IXGBE_RXDADV_ERR_PE     0x08000000 /* Packet Error */
 #define IXGBE_RXDADV_ERR_OSE    0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL  0x08000000 /* overlap ERR_PE  */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH    0x10000000 /* overlap ERR_OSE */
+#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED   0x18000000
 #define IXGBE_RXDADV_ERR_USE    0x20000000 /* Undersize Error */
 #define IXGBE_RXDADV_ERR_TCPE   0x40000000 /* TCP/UDP Checksum Error */
 #define IXGBE_RXDADV_ERR_IPE    0x80000000 /* IP Checksum Error */
@@ -2398,6 +2435,7 @@ enum {
 #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
 #define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
 #define IXGBE_RXDADV_STAT_TS		0x00010000 /* IEEE 1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_SECP		0x00020000 /* IPsec/MACsec pkt found */
 
 /* PSRTYPE bit definitions */
 #define IXGBE_PSRTYPE_TCPHDR    0x00000010
@@ -2464,13 +2502,6 @@ enum {
 #define IXGBE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 /* ETQF has 8 indices */
 #define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4          /* Right-shift 4 bits */
 
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP         0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
-
 /* Masks to determine if packets should be dropped due to frame errors */
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
 				      IXGBE_RXD_ERR_CE | \
@@ -2484,6 +2515,8 @@ enum {
 				      IXGBE_RXDADV_ERR_LE | \
 				      IXGBE_RXDADV_ERR_PE | \
 				      IXGBE_RXDADV_ERR_OSE | \
+				      IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \
+				      IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \
 				      IXGBE_RXDADV_ERR_USE)
 
 /* Multicast bit mask */
@@ -2862,7 +2895,7 @@ union ixgbe_adv_rx_desc {
 /* Context descriptors */
 struct ixgbe_adv_tx_context_desc {
 	__le32 vlan_macip_lens;
-	__le32 seqnum_seed;
+	__le32 fceof_saidx;
 	__le32 type_tucmd_mlhl;
 	__le32 mss_l4len_idx;
 };
@@ -2893,6 +2926,7 @@ struct ixgbe_adv_tx_context_desc {
 				 IXGBE_ADVTXD_POPTS_SHIFT)
 #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
 				 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_IPSEC     0x00000400 /* IPSec offload request */
 #define IXGBE_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
 #define IXGBE_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
 #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2908,7 +2942,6 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
 #define IXGBE_ADVTXD_TUCMD_L4T_RSV     0x00001800 /* RSV L4 Packet TYPE */
 #define IXGBE_ADVTXD_TUCMD_MKRREQ    0x00002000 /*Req requires Markers and CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC      0x00000400 /* IPSec offload request */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
 #define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cb7da5f9c4da..f470d0204771 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -949,7 +949,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
 	u16 length, bufsz, i, start;
 	u16 *local_buffer;
 
-	bufsz = sizeof(buf) / sizeof(buf[0]);
+	bufsz = ARRAY_SIZE(buf);
 
 	/* Read a chunk at the pointer location */
 	if (!buffer) {
@@ -1642,10 +1642,12 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
 }
 
 /**
- *  ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
- *  @hw: pointer to hardware structure
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait_to_complete: unused
  *
- *  Configures the extern PHY and the integrated KR PHY for SFP support.
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
  */
 static s32
 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
@@ -1737,6 +1739,8 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
 /**
  * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
  * @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
  *
  * Configure the the integrated PHY for native SFP support.
  */
@@ -1784,6 +1788,8 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
 /**
  * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
  * @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
  *
  * Configure the the integrated PHY for SFP support.
  */
@@ -1859,7 +1865,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
  * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
  * @hw: pointer to hardware structure
  * @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait: true when waiting for completion is needed
  *
  * Setup internal/external PHY link speed based on link speed, then set
  * external PHY auto advertised link speed.
@@ -1943,6 +1949,8 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
 /**
  * ixgbe_setup_sgmii - Set up link for sgmii
  * @hw: pointer to hardware structure
+ * @speed: unused
+ * @autoneg_wait_to_complete: unused
  */
 static s32
 ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
@@ -2014,6 +2022,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
 /**
  * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
  * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait: true when waiting for completion is needed
  */
 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
 				bool autoneg_wait)
@@ -3735,6 +3745,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
  * ixgbe_read_phy_reg_x550a - Reads specified PHY register
  * @hw: pointer to hardware structure
  * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
  * @phy_data: Pointer to read data from PHY register
  *
  * Reads a value from a specified PHY register using the SWFW lock and PHY
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index ff9d05f308ee..4400e49090b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
 	IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
 	IXGBEVF_NETDEV_STAT(multicast),
 	IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+	IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
+	IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+	IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
 };
 
 #define IXGBEVF_QUEUE_STATS_LEN ( \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 581f44bbd7b3..f6952425c87d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer {
 struct ixgbevf_rx_buffer {
 	dma_addr_t dma;
 	struct page *page;
-	unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else
+	__u16 page_offset;
+#endif
+	__u16 pagecnt_bias;
 };
 
 struct ixgbevf_stats {
@@ -79,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
 struct ixgbevf_rx_queue_stats {
 	u64 alloc_rx_page_failed;
 	u64 alloc_rx_buff_failed;
+	u64 alloc_rx_page;
 	u64 csum_err;
 };
 
@@ -260,6 +266,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
 #define MIN_MSIX_Q_VECTORS	1
 #define MIN_MSIX_COUNT		(MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 
+#define IXGBEVF_RX_DMA_ATTR \
+	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
 /* board specific private data structure */
 struct ixgbevf_adapter {
 	/* this field must be first, see ixgbevf_process_skb_fields */
@@ -287,8 +296,9 @@ struct ixgbevf_adapter {
 	u64 hw_csum_rx_error;
 	u64 hw_rx_no_dma_resources;
 	int num_msix_vectors;
-	u32 alloc_rx_page_failed;
-	u32 alloc_rx_buff_failed;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+	u64 alloc_rx_page;
 
 	struct msix_entry *msix_entries;
 
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a69134ade..9b3d43d28106 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
 	}
 }
 
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
-					struct ixgbevf_tx_buffer *tx_buffer)
-{
-	if (tx_buffer->skb) {
-		dev_kfree_skb_any(tx_buffer->skb);
-		if (dma_unmap_len(tx_buffer, len))
-			dma_unmap_single(tx_ring->dev,
-					 dma_unmap_addr(tx_buffer, dma),
-					 dma_unmap_len(tx_buffer, len),
-					 DMA_TO_DEVICE);
-	} else if (dma_unmap_len(tx_buffer, len)) {
-		dma_unmap_page(tx_ring->dev,
-			       dma_unmap_addr(tx_buffer, dma),
-			       dma_unmap_len(tx_buffer, len),
-			       DMA_TO_DEVICE);
-	}
-	tx_buffer->next_to_watch = NULL;
-	tx_buffer->skb = NULL;
-	dma_unmap_len_set(tx_buffer, len, 0);
-	/* tx_buffer must be completely set up in the transmit path */
-}
-
 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
 {
 	return ring->stats.packets;
@@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 				 DMA_TO_DEVICE);
 
 		/* clear tx_buffer data */
-		tx_buffer->skb = NULL;
 		dma_unmap_len_set(tx_buffer, len, 0);
 
 		/* unmap remaining buffers */
@@ -554,7 +531,6 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
  * ixgbevf_is_non_eop - process handling of non-EOP buffers
  * @rx_ring: Rx ring being processed
  * @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
  *
  * This function updates next to clean.  If the buffer is an EOP buffer
  * this function exits returning false, otherwise it will place the
@@ -596,8 +572,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
 	}
 
 	/* map page for use */
-	dma = dma_map_page(rx_ring->dev, page, 0,
-			   PAGE_SIZE, DMA_FROM_DEVICE);
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+				 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
 
 	/* if mapping failed free memory back to system since
 	 * there isn't much point in holding memory we can't use
@@ -605,13 +581,15 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
 	if (dma_mapping_error(rx_ring->dev, dma)) {
 		__free_page(page);
 
-		rx_ring->rx_stats.alloc_rx_buff_failed++;
+		rx_ring->rx_stats.alloc_rx_page_failed++;
 		return false;
 	}
 
 	bi->dma = dma;
 	bi->page = page;
 	bi->page_offset = 0;
+	bi->pagecnt_bias = 1;
+	rx_ring->rx_stats.alloc_rx_page++;
 
 	return true;
 }
@@ -640,6 +618,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
 		if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
 			break;
 
+		/* sync the buffer for use by the device */
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset,
+						 IXGBEVF_RX_BUFSZ,
+						 DMA_FROM_DEVICE);
+
 		/* Refresh the desc even if pkt_addr didn't change
 		 * because each write-back erases this info.
 		 */
@@ -654,8 +638,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
 			i -= rx_ring->count;
 		}
 
-		/* clear the hdr_addr for the next_to_use descriptor */
-		rx_desc->read.hdr_addr = 0;
+		/* clear the length for the next_to_use descriptor */
+		rx_desc->wb.upper.length = 0;
 
 		cleaned_count--;
 	} while (cleaned_count);
@@ -742,12 +726,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
 	new_buff->page = old_buff->page;
 	new_buff->dma = old_buff->dma;
 	new_buff->page_offset = old_buff->page_offset;
-
-	/* sync the buffer for use by the device */
-	dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-					 new_buff->page_offset,
-					 IXGBEVF_RX_BUFSZ,
-					 DMA_FROM_DEVICE);
+	new_buff->pagecnt_bias = old_buff->pagecnt_bias;
 }
 
 static inline bool ixgbevf_page_is_reserved(struct page *page)
@@ -755,6 +734,45 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
+				      struct page *page,
+				      const unsigned int truesize)
+{
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
+	/* avoid re-using remote pages */
+	if (unlikely(ixgbevf_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely(page_ref_count(page) != pagecnt_bias))
+		return false;
+
+	/* flip page offset to other buffer */
+	rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
+
+#else
+	/* move offset up to the next cache line */
+	rx_buffer->page_offset += truesize;
+
+	if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+		return false;
+
+#endif
+
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
+	 */
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
+
+	return true;
+}
+
 /**
  * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
@@ -772,12 +790,12 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
  **/
 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
 				struct ixgbevf_rx_buffer *rx_buffer,
+				u16 size,
 				union ixgbe_adv_rx_desc *rx_desc,
 				struct sk_buff *skb)
 {
 	struct page *page = rx_buffer->page;
 	unsigned char *va = page_address(page) + rx_buffer->page_offset;
-	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = IXGBEVF_RX_BUFSZ;
 #else
@@ -796,7 +814,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
 			return true;
 
 		/* this page cannot be reused so discard it */
-		put_page(page);
 		return false;
 	}
 
@@ -816,32 +833,7 @@ add_tail_frag:
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 			(unsigned long)va & ~PAGE_MASK, size, truesize);
 
-	/* avoid re-using remote pages */
-	if (unlikely(ixgbevf_page_is_reserved(page)))
-		return false;
-
-#if (PAGE_SIZE < 8192)
-	/* if we are only owner of page we can reuse it */
-	if (unlikely(page_count(page) != 1))
-		return false;
-
-	/* flip page offset to other buffer */
-	rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
-
-#else
-	/* move offset up to the next cache line */
-	rx_buffer->page_offset += truesize;
-
-	if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
-		return false;
-
-#endif
-	/* Even if we own the page, we are not allowed to use atomic_set()
-	 * This would break get_page_unless_zero() users.
-	 */
-	page_ref_inc(page);
-
-	return true;
+	return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
 }
 
 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
@@ -850,11 +842,19 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
 {
 	struct ixgbevf_rx_buffer *rx_buffer;
 	struct page *page;
+	u16 size = le16_to_cpu(rx_desc->wb.upper.length);
 
 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
 	page = rx_buffer->page;
 	prefetchw(page);
 
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      rx_buffer->dma,
+				      rx_buffer->page_offset,
+				      size,
+				      DMA_FROM_DEVICE);
+
 	if (likely(!skb)) {
 		void *page_addr = page_address(page) +
 				  rx_buffer->page_offset;
@@ -880,21 +880,18 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
 		prefetchw(skb->data);
 	}
 
-	/* we are reusing so sync this buffer for CPU use */
-	dma_sync_single_range_for_cpu(rx_ring->dev,
-				      rx_buffer->dma,
-				      rx_buffer->page_offset,
-				      IXGBEVF_RX_BUFSZ,
-				      DMA_FROM_DEVICE);
-
 	/* pull page into skb */
-	if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+	if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
 		/* hand second half of page back to the ring */
 		ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
 	} else {
-		/* we are not reusing the buffer so unmap it */
-		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-			       PAGE_SIZE, DMA_FROM_DEVICE);
+		/* We are not reusing the buffer so unmap it and free
+		 * any references we are holding to it
+		 */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     PAGE_SIZE, DMA_FROM_DEVICE,
+				     IXGBEVF_RX_DMA_ATTR);
+		__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
 	}
 
 	/* clear contents of buffer_info */
@@ -931,7 +928,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 
 		rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
-		if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+		if (!rx_desc->wb.upper.length)
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -944,8 +941,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 		skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
 
 		/* exit if we failed to retrieve a buffer */
-		if (!skb)
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
 			break;
+		}
 
 		cleaned_count++;
 
@@ -1554,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
 	txdctl |= (1u << 8) |    /* HTHRESH = 1 */
 		   32;           /* PTHRESH = 32 */
 
+	/* reinitialize tx_buffer_info */
+	memset(ring->tx_buffer_info, 0,
+	       sizeof(struct ixgbevf_tx_buffer) * ring->count);
+
 	clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
 
 	IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -1722,6 +1725,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
 				      struct ixgbevf_ring *ring)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	union ixgbe_adv_rx_desc *rx_desc;
 	u64 rdba = ring->dma;
 	u32 rxdctl;
 	u8 reg_idx = ring->reg_idx;
@@ -1750,6 +1754,14 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
 	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
 	ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
 
+	/* initialize rx_buffer_info */
+	memset(ring->rx_buffer_info, 0,
+	       sizeof(struct ixgbevf_rx_buffer) * ring->count);
+
+	/* initialize Rx descriptor 0 */
+	rx_desc = IXGBEVF_RX_DESC(ring, 0);
+	rx_desc->wb.upper.length = 0;
+
 	/* reset ntu and ntc to place SW in sync with hardwdare */
 	ring->next_to_clean = 0;
 	ring->next_to_use = 0;
@@ -1896,10 +1908,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
 	unsigned int flags = netdev->flags;
 	int xcast_mode;
 
-	xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
-		     (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
-		     IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
 	/* request the most inclusive mode we need */
 	if (flags & IFF_PROMISC)
 		xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
@@ -2108,9 +2116,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
  **/
 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
 {
-	struct device *dev = rx_ring->dev;
-	unsigned long size;
-	unsigned int i;
+	u16 i = rx_ring->next_to_clean;
 
 	/* Free Rx ring sk_buff */
 	if (rx_ring->skb) {
@@ -2118,29 +2124,39 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
 		rx_ring->skb = NULL;
 	}
 
-	/* ring already cleared, nothing to do */
-	if (!rx_ring->rx_buffer_info)
-		return;
-
 	/* Free all the Rx ring pages */
-	for (i = 0; i < rx_ring->count; i++) {
+	while (i != rx_ring->next_to_alloc) {
 		struct ixgbevf_rx_buffer *rx_buffer;
 
 		rx_buffer = &rx_ring->rx_buffer_info[i];
-		if (rx_buffer->dma)
-			dma_unmap_page(dev, rx_buffer->dma,
-				       PAGE_SIZE, DMA_FROM_DEVICE);
-		rx_buffer->dma = 0;
-		if (rx_buffer->page)
-			__free_page(rx_buffer->page);
-		rx_buffer->page = NULL;
-	}
 
-	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
-	memset(rx_ring->rx_buffer_info, 0, size);
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev,
+					      rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      IXGBEVF_RX_BUFSZ,
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev,
+				     rx_buffer->dma,
+				     PAGE_SIZE,
+				     DMA_FROM_DEVICE,
+				     IXGBEVF_RX_DMA_ATTR);
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
 
-	/* Zero out the descriptor ring */
-	memset(rx_ring->desc, 0, rx_ring->size);
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+	}
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
 }
 
 /**
@@ -2149,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
  **/
 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
 {
-	struct ixgbevf_tx_buffer *tx_buffer_info;
-	unsigned long size;
-	unsigned int i;
+	u16 i = tx_ring->next_to_clean;
+	struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 
-	if (!tx_ring->tx_buffer_info)
-		return;
+	while (i != tx_ring->next_to_use) {
+		union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
 
-	/* Free all the Tx ring sk_buffs */
-	for (i = 0; i < tx_ring->count; i++) {
-		tx_buffer_info = &tx_ring->tx_buffer_info[i];
-		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+		/* Free all the Tx ring sk_buffs */
+		dev_kfree_skb_any(tx_buffer->skb);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len),
+				 DMA_TO_DEVICE);
+
+		/* check for eop_desc to determine the end of the packet */
+		eop_desc = tx_buffer->next_to_watch;
+		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(i == tx_ring->count)) {
+				i = 0;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len))
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		i++;
+		if (unlikely(i == tx_ring->count)) {
+			i = 0;
+			tx_buffer = tx_ring->tx_buffer_info;
+		}
 	}
 
-	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
-	memset(tx_ring->tx_buffer_info, 0, size);
+	/* reset next_to_use and next_to_clean */
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
 
-	memset(tx_ring->desc, 0, tx_ring->size);
 }
 
 /**
@@ -2717,6 +2767,8 @@ out:
 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+	u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
 	int i;
 
 	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
@@ -2737,15 +2789,23 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 				adapter->stats.vfmprc);
 
 	for (i = 0;  i  < adapter->num_rx_queues;  i++) {
-		adapter->hw_csum_rx_error +=
-			adapter->rx_ring[i]->hw_csum_rx_error;
-		adapter->rx_ring[i]->hw_csum_rx_error = 0;
+		struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+		alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+		alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
 	}
+
+	adapter->hw_csum_rx_error = hw_csum_rx_error;
+	adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+	adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+	adapter->alloc_rx_page = alloc_rx_page;
 }
 
 /**
  * ixgbevf_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list struct
  **/
 static void ixgbevf_service_timer(struct timer_list *t)
 {
@@ -2888,7 +2948,7 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_watchdog_subtask - worker thread to bring link up
- * @work: pointer to work_struct containing our data
+ * @adapter: board private structure
  **/
 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
 {
@@ -2985,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
 	int size;
 
 	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
-	tx_ring->tx_buffer_info = vzalloc(size);
+	tx_ring->tx_buffer_info = vmalloc(size);
 	if (!tx_ring->tx_buffer_info)
 		goto err;
 
@@ -3045,7 +3105,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
 	int size;
 
 	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
-	rx_ring->rx_buffer_info = vzalloc(size);
+	rx_ring->rx_buffer_info = vmalloc(size);
 	if (!rx_ring->rx_buffer_info)
 		goto err;
 
@@ -3487,34 +3547,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
 			   struct ixgbevf_tx_buffer *first,
 			   const u8 hdr_len)
 {
-	dma_addr_t dma;
 	struct sk_buff *skb = first->skb;
 	struct ixgbevf_tx_buffer *tx_buffer;
 	union ixgbe_adv_tx_desc *tx_desc;
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned int data_len = skb->data_len;
-	unsigned int size = skb_headlen(skb);
-	unsigned int paylen = skb->len - hdr_len;
+	struct skb_frag_struct *frag;
+	dma_addr_t dma;
+	unsigned int data_len, size;
 	u32 tx_flags = first->tx_flags;
-	__le32 cmd_type;
+	__le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
 	u16 i = tx_ring->next_to_use;
 
 	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
 
-	ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
-	cmd_type = ixgbevf_tx_cmd_type(tx_flags);
+	ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
 
 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
-	if (dma_mapping_error(tx_ring->dev, dma))
-		goto dma_error;
 
-	/* record length, and DMA address */
-	dma_unmap_len_set(first, len, size);
-	dma_unmap_addr_set(first, dma, dma);
+	tx_buffer = first;
 
-	tx_desc->read.buffer_addr = cpu_to_le64(dma);
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		tx_desc->read.buffer_addr = cpu_to_le64(dma);
 
-	for (;;) {
 		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
 			tx_desc->read.cmd_type_len =
 				cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
@@ -3525,12 +3588,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
 				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
 				i = 0;
 			}
+			tx_desc->read.olinfo_status = 0;
 
 			dma += IXGBE_MAX_DATA_PER_TXD;
 			size -= IXGBE_MAX_DATA_PER_TXD;
 
 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
-			tx_desc->read.olinfo_status = 0;
 		}
 
 		if (likely(!data_len))
@@ -3544,23 +3607,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
 			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
 			i = 0;
 		}
+		tx_desc->read.olinfo_status = 0;
 
 		size = skb_frag_size(frag);
 		data_len -= size;
 
 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
 				       DMA_TO_DEVICE);
-		if (dma_mapping_error(tx_ring->dev, dma))
-			goto dma_error;
 
 		tx_buffer = &tx_ring->tx_buffer_info[i];
-		dma_unmap_len_set(tx_buffer, len, size);
-		dma_unmap_addr_set(tx_buffer, dma, dma);
-
-		tx_desc->read.buffer_addr = cpu_to_le64(dma);
-		tx_desc->read.olinfo_status = 0;
-
-		frag++;
 	}
 
 	/* write last descriptor with RS and EOP bits */
@@ -3594,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
 	return;
 dma_error:
 	dev_err(tx_ring->dev, "TX DMA map failed\n");
+	tx_buffer = &tx_ring->tx_buffer_info[i];
 
 	/* clear dma mappings for failed tx_buffer_info map */
-	for (;;) {
+	while (tx_buffer != first) {
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_page(tx_ring->dev,
+				       dma_unmap_addr(tx_buffer, dma),
+				       dma_unmap_len(tx_buffer, len),
+				       DMA_TO_DEVICE);
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		if (i-- == 0)
+			i += tx_ring->count;
 		tx_buffer = &tx_ring->tx_buffer_info[i];
-		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
-		if (tx_buffer == first)
-			break;
-		if (i == 0)
-			i = tx_ring->count;
-		i--;
 	}
 
+	if (dma_unmap_len(tx_buffer, len))
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len),
+				 DMA_TO_DEVICE);
+	dma_unmap_len_set(tx_buffer, len, 0);
+
+	dev_kfree_skb_any(tx_buffer->skb);
+	tx_buffer->skb = NULL;
+
 	tx_ring->next_to_use = i;
 }
 
@@ -4368,6 +4437,7 @@ static void __exit ixgbevf_exit_module(void)
 /**
  * ixgbevf_get_hw_dev_name - return device name string
  * used by hardware layer to print debugging information
+ * @hw: pointer to private hardware struct
  **/
 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
 {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c25006ce9af..38d3a327c1bc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -146,6 +146,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
 /**
  * Hyper-V variant; the VF/PF communication is through the PCI
  * config space.
+ * @hw: pointer to private hardware struct
  */
 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 {
@@ -285,7 +286,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 		ether_addr_copy(msg_addr, addr);
 
 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
-					     sizeof(msgbuf) / sizeof(u32));
+					     ARRAY_SIZE(msgbuf));
 	if (!ret_val) {
 		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 
@@ -303,7 +304,7 @@ static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 
 /**
  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
- * @adapter: pointer to the port handle
+ * @hw: pointer to hardware structure
  * @reta: buffer to fill with RETA contents.
  * @num_rx_queues: Number of Rx queues configured for this port
  *
@@ -455,8 +456,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 	ether_addr_copy(msg_addr, addr);
 
 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
-					     sizeof(msgbuf) / sizeof(u32));
-
+					     ARRAY_SIZE(msgbuf));
 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 
 	/* if nacked the address was rejected, use "perm_addr" */
@@ -536,6 +536,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 
 /**
  * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @netdev: unused
  */
 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 					     struct net_device *netdev)
@@ -571,7 +573,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 	msgbuf[1] = xcast_mode;
 
 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
-					 sizeof(msgbuf) / sizeof(u32));
+					 ARRAY_SIZE(msgbuf));
 	if (err)
 		return err;
 
@@ -584,6 +586,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 
 /**
  * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
  */
 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 {
@@ -609,7 +613,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 
 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
-					 sizeof(msgbuf) / sizeof(u32));
+					 ARRAY_SIZE(msgbuf));
 	if (err)
 		goto mbx_err;
 
@@ -626,6 +630,10 @@ mbx_err:
 
 /**
  * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
  */
 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 				  bool vlan_on)
@@ -655,7 +663,7 @@ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
  *  @link_up: true is link is up, false otherwise
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *  @autoneg_wait_to_complete: unused
  *
  *  Reads the links register to determine if link is up and the current speed
  **/
@@ -740,6 +748,10 @@ out:
 
 /**
  * Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to private hardware struct
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
  */
 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 					ixgbe_link_speed *speed,
@@ -813,7 +825,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 	msgbuf[1] = max_size;
 
 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
-					     sizeof(msgbuf) / sizeof(u32));
+					     ARRAY_SIZE(msgbuf));
 	if (ret_val)
 		return ret_val;
 	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -859,8 +871,7 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 	msg[1] = api;
 	msg[2] = 0;
 
-	err = ixgbevf_write_msg_read_ack(hw, msg, msg,
-					 sizeof(msg) / sizeof(u32));
+	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 	if (!err) {
 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 
@@ -911,8 +922,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 	msg[0] = IXGBE_VF_GET_QUEUE;
 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 
-	err = ixgbevf_write_msg_read_ack(hw, msg, msg,
-					 sizeof(msg) / sizeof(u32));
+	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 	if (!err) {
 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index da6fb825afea..ebe5c9148935 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,7 +60,7 @@ config MVNETA
 	depends on ARCH_MVEBU || COMPILE_TEST
 	depends on HAS_DMA
 	select MVMDIO
-	select FIXED_PHY
+	select PHYLINK
 	---help---
 	  This driver supports the network interface units in the
 	  Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a539263cd79c..25e9a551cc8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,7 +28,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
 #include <net/hwbm.h>
@@ -189,6 +189,7 @@
 #define MVNETA_GMAC_CTRL_0                       0x2c00
 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
+#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
@@ -204,13 +205,19 @@
 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
+#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
+#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
+#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
+#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
+#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
+#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
@@ -237,6 +244,12 @@
 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 
+#define MVNETA_LPI_CTRL_0                        0x2cc0
+#define MVNETA_LPI_CTRL_1                        0x2cc4
+#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
+#define MVNETA_LPI_CTRL_2                        0x2cc8
+#define MVNETA_LPI_STATUS                        0x2ccc
+
 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
 
 /* Descriptor ring Macros */
@@ -313,6 +326,11 @@
 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 
+enum {
+	ETHTOOL_STAT_EEE_WAKEUP,
+	ETHTOOL_MAX_STATS,
+};
+
 struct mvneta_statistic {
 	unsigned short offset;
 	unsigned short type;
@@ -321,6 +339,7 @@ struct mvneta_statistic {
 
 #define T_REG_32	32
 #define T_REG_64	64
+#define T_SW		1
 
 static const struct mvneta_statistic mvneta_statistics[] = {
 	{ 0x3000, T_REG_64, "good_octets_received", },
@@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = {
 	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
 	{ 0x3054, T_REG_32, "fc_sent", },
 	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 };
 
 struct mvneta_pcpu_stats {
@@ -407,20 +427,20 @@ struct mvneta_port {
 	u16 tx_ring_size;
 	u16 rx_ring_size;
 
-	struct mii_bus *mii_bus;
 	phy_interface_t phy_interface;
-	struct device_node *phy_node;
-	unsigned int link;
-	unsigned int duplex;
-	unsigned int speed;
+	struct device_node *dn;
 	unsigned int tx_csum_limit;
-	unsigned int use_inband_status:1;
+	struct phylink *phylink;
 
 	struct mvneta_bm *bm_priv;
 	struct mvneta_bm_pool *pool_long;
 	struct mvneta_bm_pool *pool_short;
 	int bm_win_id;
 
+	bool eee_enabled;
+	bool eee_active;
+	bool tx_lpi_enabled;
+
 	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 
 	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1214,10 +1234,6 @@ static void mvneta_port_disable(struct mvneta_port *pp)
 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
-	pp->link = 0;
-	pp->duplex = -1;
-	pp->speed = 0;
-
 	udelay(200);
 }
 
@@ -1277,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
 		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
 }
 
-static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
-{
-	u32 val;
-
-	if (enable) {
-		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-		val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
-			 MVNETA_GMAC_FORCE_LINK_DOWN |
-			 MVNETA_GMAC_AN_FLOW_CTRL_EN);
-		val |= MVNETA_GMAC_INBAND_AN_ENABLE |
-		       MVNETA_GMAC_AN_SPEED_EN |
-		       MVNETA_GMAC_AN_DUPLEX_EN;
-		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
-		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-		val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-	} else {
-		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
-		       MVNETA_GMAC_AN_SPEED_EN |
-		       MVNETA_GMAC_AN_DUPLEX_EN);
-		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
-		val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-		val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-	}
-}
-
 static void mvneta_percpu_unmask_interrupt(void *arg)
 {
 	struct mvneta_port *pp = arg;
@@ -1467,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
 	val &= ~MVNETA_PHY_POLLING_ENABLE;
 	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
 
-	mvneta_set_autoneg(pp, pp->use_inband_status);
 	mvneta_set_ucast_table(pp, -1);
 	mvneta_set_special_mcast_table(pp, -1);
 	mvneta_set_other_mcast_table(pp, -1);
@@ -2692,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int mvneta_fixed_link_update(struct mvneta_port *pp,
-				    struct phy_device *phy)
+static void mvneta_link_change(struct mvneta_port *pp)
 {
-	struct fixed_phy_status status;
-	struct fixed_phy_status changed = {};
 	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
-	status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
-	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
-		status.speed = SPEED_1000;
-	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
-		status.speed = SPEED_100;
-	else
-		status.speed = SPEED_10;
-	status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
-	changed.link = 1;
-	changed.speed = 1;
-	changed.duplex = 1;
-	fixed_phy_update_state(phy, &status, &changed);
-	return 0;
+	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
 }
 
 /* NAPI handler
@@ -2727,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 	u32 cause_rx_tx;
 	int rx_queue;
 	struct mvneta_port *pp = netdev_priv(napi->dev);
-	struct net_device *ndev = pp->dev;
 	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
 	if (!netif_running(pp->dev)) {
@@ -2741,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
 
 		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
-		if (pp->use_inband_status && (cause_misc &
-				(MVNETA_CAUSE_PHY_STATUS_CHANGE |
-				 MVNETA_CAUSE_LINK_CHANGE |
-				 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
-			mvneta_fixed_link_update(pp, ndev->phydev);
-		}
+
+		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+				  MVNETA_CAUSE_LINK_CHANGE))
+			mvneta_link_change(pp);
 	}
 
 	/* Release Tx descriptors */
@@ -3060,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
 	int cpu;
-	struct net_device *ndev = pp->dev;
 
 	mvneta_max_rx_size_set(pp, pp->pkt_size);
 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3085,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-		    MVNETA_CAUSE_LINK_CHANGE |
-		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		    MVNETA_CAUSE_LINK_CHANGE);
 
-	phy_start(ndev->phydev);
+	phylink_start(pp->phylink);
 	netif_tx_start_all_queues(pp->dev);
 }
 
 static void mvneta_stop_dev(struct mvneta_port *pp)
 {
 	unsigned int cpu;
-	struct net_device *ndev = pp->dev;
 
-	phy_stop(ndev->phydev);
+	phylink_stop(pp->phylink);
 
 	if (!pp->neta_armada3700) {
 		for_each_online_cpu(cpu) {
@@ -3251,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
 	return 0;
 }
 
-static void mvneta_adjust_link(struct net_device *ndev)
+static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+			    struct phylink_link_state *state)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+	/* We only support QSGMII, SGMII, 802.3z and RGMII modes */
+	if (state->interface != PHY_INTERFACE_MODE_NA &&
+	    state->interface != PHY_INTERFACE_MODE_QSGMII &&
+	    state->interface != PHY_INTERFACE_MODE_SGMII &&
+	    !phy_interface_mode_is_8023z(state->interface) &&
+	    !phy_interface_mode_is_rgmii(state->interface)) {
+		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+		return;
+	}
+
+	/* Allow all the expected bits */
+	phylink_set(mask, Autoneg);
+	phylink_set_port_modes(mask);
+
+	/* Asymmetric pause is unsupported */
+	phylink_set(mask, Pause);
+	/* Half-duplex at speeds higher than 100Mbit is unsupported */
+	phylink_set(mask, 1000baseT_Full);
+	phylink_set(mask, 1000baseX_Full);
+
+	if (!phy_interface_mode_is_8023z(state->interface)) {
+		/* 10M and 100M are only supported in non-802.3z mode */
+		phylink_set(mask, 10baseT_Half);
+		phylink_set(mask, 10baseT_Full);
+		phylink_set(mask, 100baseT_Half);
+		phylink_set(mask, 100baseT_Full);
+	}
+
+	bitmap_and(supported, supported, mask,
+		   __ETHTOOL_LINK_MODE_MASK_NBITS);
+	bitmap_and(state->advertising, state->advertising, mask,
+		   __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int mvneta_mac_link_state(struct net_device *ndev,
+				 struct phylink_link_state *state)
 {
 	struct mvneta_port *pp = netdev_priv(ndev);
-	struct phy_device *phydev = ndev->phydev;
-	int status_change = 0;
+	u32 gmac_stat;
+
+	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
-	if (phydev->link) {
-		if ((pp->speed != phydev->speed) ||
-		    (pp->duplex != phydev->duplex)) {
-			u32 val;
+	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+		state->speed = SPEED_1000;
+	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+		state->speed = SPEED_100;
+	else
+		state->speed = SPEED_10;
 
-			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-				 MVNETA_GMAC_CONFIG_GMII_SPEED |
-				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+
+	state->pause = 0;
+	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+		state->pause |= MLO_PAUSE_RX;
+	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+		state->pause |= MLO_PAUSE_TX;
+
+	return 1;
+}
 
-			if (phydev->duplex)
-				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+static void mvneta_mac_an_restart(struct net_device *ndev)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
 
-			if (phydev->speed == SPEED_1000)
-				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-			else if (phydev->speed == SPEED_100)
-				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
 
-			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
+	const struct phylink_link_state *state)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+	u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+	u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+				   MVNETA_GMAC2_PORT_RESET);
+	new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+	new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+			     MVNETA_GMAC_INBAND_RESTART_AN |
+			     MVNETA_GMAC_CONFIG_MII_SPEED |
+			     MVNETA_GMAC_CONFIG_GMII_SPEED |
+			     MVNETA_GMAC_AN_SPEED_EN |
+			     MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
+			     MVNETA_GMAC_CONFIG_FLOW_CTRL |
+			     MVNETA_GMAC_AN_FLOW_CTRL_EN |
+			     MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+			     MVNETA_GMAC_AN_DUPLEX_EN);
 
-			pp->duplex = phydev->duplex;
-			pp->speed  = phydev->speed;
-		}
+	/* Even though it might look weird, when we're configured in
+	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
+	 */
+	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+	    state->interface == PHY_INTERFACE_MODE_SGMII ||
+	    phy_interface_mode_is_8023z(state->interface))
+		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+	if (phylink_test(state->advertising, Pause))
+		new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+	if (state->pause & MLO_PAUSE_TXRX_MASK)
+		new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+	if (!phylink_autoneg_inband(mode)) {
+		/* Phy or fixed speed */
+		if (state->duplex)
+			new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+		if (state->speed == SPEED_1000)
+			new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+		else if (state->speed == SPEED_100)
+			new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+		/* SGMII mode receives the state from the PHY */
+		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+				     MVNETA_GMAC_FORCE_LINK_PASS)) |
+			 MVNETA_GMAC_INBAND_AN_ENABLE |
+			 MVNETA_GMAC_AN_SPEED_EN |
+			 MVNETA_GMAC_AN_DUPLEX_EN;
+	} else {
+		/* 802.3z negotiation - only 1000base-X */
+		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+				     MVNETA_GMAC_FORCE_LINK_PASS)) |
+			 MVNETA_GMAC_INBAND_AN_ENABLE |
+			 MVNETA_GMAC_CONFIG_GMII_SPEED |
+			 /* The MAC only supports FD mode */
+			 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+		if (state->pause & MLO_PAUSE_AN && state->an_enabled)
+			new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
 	}
 
-	if (phydev->link != pp->link) {
-		if (!phydev->link) {
-			pp->duplex = -1;
-			pp->speed = 0;
-		}
+	/* Armada 370 documentation says we can only change the port mode
+	 * and in-band enable when the link is down, so force it down
+	 * while making these changes. We also do this for GMAC_CTRL2 */
+	if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
+	    (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
+	    (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+			    (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
+			    MVNETA_GMAC_FORCE_LINK_DOWN);
+	}
 
-		pp->link = phydev->link;
-		status_change = 1;
+	if (new_ctrl0 != gmac_ctrl0)
+		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+	if (new_ctrl2 != gmac_ctrl2)
+		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+	if (new_clk != gmac_clk)
+		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
+	if (new_an != gmac_an)
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
+
+	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+			MVNETA_GMAC2_PORT_RESET) != 0)
+			continue;
 	}
+}
 
-	if (status_change) {
-		if (phydev->link) {
-			if (!pp->use_inband_status) {
-				u32 val = mvreg_read(pp,
-						  MVNETA_GMAC_AUTONEG_CONFIG);
-				val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
-				val |= MVNETA_GMAC_FORCE_LINK_PASS;
-				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
-					    val);
-			}
-			mvneta_port_up(pp);
-		} else {
-			if (!pp->use_inband_status) {
-				u32 val = mvreg_read(pp,
-						  MVNETA_GMAC_AUTONEG_CONFIG);
-				val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
-				val |= MVNETA_GMAC_FORCE_LINK_DOWN;
-				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
-					    val);
-			}
-			mvneta_port_down(pp);
-		}
-		phy_print_status(phydev);
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
+{
+	u32 lpi_ctl1;
+
+	lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+	if (enable)
+		lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+	else
+		lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 val;
+
+	mvneta_port_down(pp);
+
+	if (!phylink_autoneg_inband(mode)) {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
 	}
+
+	pp->eee_active = false;
+	mvneta_set_eee(pp, false);
 }
 
-static int mvneta_mdio_probe(struct mvneta_port *pp)
+static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+			       struct phy_device *phy)
 {
-	struct phy_device *phy_dev;
-	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 val;
 
-	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
-				 pp->phy_interface);
-	if (!phy_dev) {
-		netdev_err(pp->dev, "could not find the PHY\n");
-		return -ENODEV;
+	if (!phylink_autoneg_inband(mode)) {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+		val |= MVNETA_GMAC_FORCE_LINK_PASS;
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
 	}
 
-	phy_ethtool_get_wol(phy_dev, &wol);
-	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+	mvneta_port_up(pp);
 
-	phy_dev->supported &= PHY_GBIT_FEATURES;
-	phy_dev->advertising = phy_dev->supported;
+	if (phy && pp->eee_enabled) {
+		pp->eee_active = phy_init_eee(phy, 0) >= 0;
+		mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+	}
+}
 
-	pp->link    = 0;
-	pp->duplex  = 0;
-	pp->speed   = 0;
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+	.validate = mvneta_validate,
+	.mac_link_state = mvneta_mac_link_state,
+	.mac_an_restart = mvneta_mac_an_restart,
+	.mac_config = mvneta_mac_config,
+	.mac_link_down = mvneta_mac_link_down,
+	.mac_link_up = mvneta_mac_link_up,
+};
 
-	return 0;
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+	if (err)
+		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+	phylink_ethtool_get_wol(pp->phylink, &wol);
+	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
+	return err;
 }
 
 static void mvneta_mdio_remove(struct mvneta_port *pp)
 {
-	struct net_device *ndev = pp->dev;
-
-	phy_disconnect(ndev->phydev);
+	phylink_disconnect_phy(pp->phylink);
 }
 
 /* Electing a CPU must be done in an atomic way: it should be done
@@ -3455,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-		    MVNETA_CAUSE_LINK_CHANGE |
-		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		    MVNETA_CAUSE_LINK_CHANGE);
 	netif_tx_start_all_queues(pp->dev);
 	spin_unlock(&pp->lock);
 	return 0;
@@ -3497,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-		    MVNETA_CAUSE_LINK_CHANGE |
-		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		    MVNETA_CAUSE_LINK_CHANGE);
 	netif_tx_start_all_queues(pp->dev);
 	return 0;
 }
@@ -3626,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev)
 
 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-	if (!dev->phydev)
-		return -ENOTSUPP;
+	struct mvneta_port *pp = netdev_priv(dev);
 
-	return phy_mii_ioctl(dev->phydev, ifr, cmd);
+	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
 }
 
 /* Ethtool methods */
@@ -3640,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
 				  const struct ethtool_link_ksettings *cmd)
 {
 	struct mvneta_port *pp = netdev_priv(ndev);
-	struct phy_device *phydev = ndev->phydev;
-
-	if (!phydev)
-		return -ENODEV;
-
-	if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
-		u32 val;
-
-		mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
-
-		if (cmd->base.autoneg == AUTONEG_DISABLE) {
-			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-				 MVNETA_GMAC_CONFIG_GMII_SPEED |
-				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
-
-			if (phydev->duplex)
-				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
 
-			if (phydev->speed == SPEED_1000)
-				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-			else if (phydev->speed == SPEED_100)
-				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
 
-			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-		}
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+				  struct ethtool_link_ksettings *cmd)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
 
-		pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
-		netdev_info(pp->dev, "autoneg status set to %i\n",
-			    pp->use_inband_status);
+	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
 
-		if (netif_running(ndev)) {
-			mvneta_port_down(pp);
-			mvneta_port_up(pp);
-		}
-	}
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
 
-	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+	return phylink_ethtool_nway_reset(pp->phylink);
 }
 
 /* Set interrupt coalescing for ethtools */
@@ -3769,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
 	return 0;
 }
 
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+					  struct ethtool_pauseparam *pause)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+					 struct ethtool_pauseparam *pause)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
 				       u8 *data)
 {
@@ -3785,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
 {
 	const struct mvneta_statistic *s;
 	void __iomem *base = pp->base;
-	u32 high, low, val;
-	u64 val64;
+	u32 high, low;
+	u64 val;
 	int i;
 
 	for (i = 0, s = mvneta_statistics;
 	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
 	     s++, i++) {
+		val = 0;
+
 		switch (s->type) {
 		case T_REG_32:
 			val = readl_relaxed(base + s->offset);
-			pp->ethtool_stats[i] += val;
 			break;
 		case T_REG_64:
 			/* Docs say to read low 32-bit then high */
 			low = readl_relaxed(base + s->offset);
 			high = readl_relaxed(base + s->offset + 4);
-			val64 = (u64)high << 32 | low;
-			pp->ethtool_stats[i] += val64;
+			val = (u64)high << 32 | low;
+			break;
+		case T_SW:
+			switch (s->offset) {
+			case ETHTOOL_STAT_EEE_WAKEUP:
+				val = phylink_get_eee_err(pp->phylink);
+				break;
+			}
 			break;
 		}
+
+		pp->ethtool_stats[i] += val;
 	}
 }
 
@@ -3939,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
 static void mvneta_ethtool_get_wol(struct net_device *dev,
 				   struct ethtool_wolinfo *wol)
 {
-	wol->supported = 0;
-	wol->wolopts = 0;
+	struct mvneta_port *pp = netdev_priv(dev);
 
-	if (dev->phydev)
-		phy_ethtool_get_wol(dev->phydev, wol);
+	phylink_ethtool_get_wol(pp->phylink, wol);
 }
 
 static int mvneta_ethtool_set_wol(struct net_device *dev,
 				  struct ethtool_wolinfo *wol)
 {
+	struct mvneta_port *pp = netdev_priv(dev);
 	int ret;
 
-	if (!dev->phydev)
-		return -EOPNOTSUPP;
-
-	ret = phy_ethtool_set_wol(dev->phydev, wol);
+	ret = phylink_ethtool_set_wol(pp->phylink, wol);
 	if (!ret)
 		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
 
 	return ret;
 }
 
+static int mvneta_ethtool_get_module_info(struct net_device *dev,
+					  struct ethtool_modinfo *modinfo)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	return phylink_ethtool_get_module_info(pp->phylink, modinfo);
+}
+
+static int mvneta_ethtool_get_module_eeprom(struct net_device *dev,
+					    struct ethtool_eeprom *ee, u8 *buf)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf);
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+				  struct ethtool_eee *eee)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	u32 lpi_ctl0;
+
+	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+	eee->eee_enabled = pp->eee_enabled;
+	eee->eee_active = pp->eee_active;
+	eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+	eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+	return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+				  struct ethtool_eee *eee)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	u32 lpi_ctl0;
+
+	/* The Armada 37x documents do not give limits for this other than
+	 * it being an 8-bit register. */
+	if (eee->tx_lpi_enabled &&
+	    (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+		return -EINVAL;
+
+	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+	lpi_ctl0 &= ~(0xff << 8);
+	lpi_ctl0 |= eee->tx_lpi_timer << 8;
+	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+	pp->eee_enabled = eee->eee_enabled;
+	pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+	mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+	return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_open            = mvneta_open,
 	.ndo_stop            = mvneta_stop,
@@ -3974,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = {
 };
 
 static const struct ethtool_ops mvneta_eth_tool_ops = {
-	.nway_reset	= phy_ethtool_nway_reset,
+	.nway_reset	= mvneta_ethtool_nway_reset,
 	.get_link       = ethtool_op_get_link,
 	.set_coalesce   = mvneta_ethtool_set_coalesce,
 	.get_coalesce   = mvneta_ethtool_get_coalesce,
 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
 	.get_ringparam  = mvneta_ethtool_get_ringparam,
 	.set_ringparam	= mvneta_ethtool_set_ringparam,
+	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
+	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
 	.get_strings	= mvneta_ethtool_get_strings,
 	.get_ethtool_stats = mvneta_ethtool_get_stats,
 	.get_sset_count	= mvneta_ethtool_get_sset_count,
@@ -3988,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
 	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
 	.get_rxfh	= mvneta_ethtool_get_rxfh,
 	.set_rxfh	= mvneta_ethtool_set_rxfh,
-	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
 	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
 	.get_wol        = mvneta_ethtool_get_wol,
 	.set_wol        = mvneta_ethtool_set_wol,
+	.get_module_info = mvneta_ethtool_get_module_info,
+	.get_module_eeprom = mvneta_ethtool_get_module_eeprom,
+	.get_eee	= mvneta_ethtool_get_eee,
+	.set_eee	= mvneta_ethtool_set_eee,
 };
 
 /* Initialize hw */
@@ -4091,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
 /* Power up the port */
 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 {
-	u32 ctrl;
-
 	/* MAC Cause register should be cleared */
 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
-	ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
-	/* Even though it might look weird, when we're configured in
-	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
-	 */
-	switch(phy_mode) {
-	case PHY_INTERFACE_MODE_QSGMII:
+	if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
-		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
-		break;
-	case PHY_INTERFACE_MODE_SGMII:
+	else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+		 phy_mode == PHY_INTERFACE_MODE_1000BASEX)
 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
-		break;
-	case PHY_INTERFACE_MODE_RGMII:
-	case PHY_INTERFACE_MODE_RGMII_ID:
-	case PHY_INTERFACE_MODE_RGMII_RXID:
-	case PHY_INTERFACE_MODE_RGMII_TXID:
-		ctrl |= MVNETA_GMAC2_PORT_RGMII;
-		break;
-	default:
+	else if (!phy_interface_mode_is_rgmii(phy_mode))
 		return -EINVAL;
-	}
-
-	/* Cancel Port Reset */
-	ctrl &= ~MVNETA_GMAC2_PORT_RESET;
-	mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
-
-	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
-		MVNETA_GMAC2_PORT_RESET) != 0)
-		continue;
 
 	return 0;
 }
@@ -4136,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	struct device_node *dn = pdev->dev.of_node;
-	struct device_node *phy_node;
 	struct device_node *bm_node;
 	struct mvneta_port *pp;
 	struct net_device *dev;
+	struct phylink *phylink;
 	const char *dt_mac_addr;
 	char hw_mac_addr[ETH_ALEN];
 	const char *mac_from;
-	const char *managed;
 	int tx_csum_limit;
 	int phy_mode;
 	int err;
@@ -4159,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev)
 		goto err_free_netdev;
 	}
 
-	phy_node = of_parse_phandle(dn, "phy", 0);
-	if (!phy_node) {
-		if (!of_phy_is_fixed_link(dn)) {
-			dev_err(&pdev->dev, "no PHY specified\n");
-			err = -ENODEV;
-			goto err_free_irq;
-		}
-
-		err = of_phy_register_fixed_link(dn);
-		if (err < 0) {
-			dev_err(&pdev->dev, "cannot register fixed PHY\n");
-			goto err_free_irq;
-		}
-
-		/* In the case of a fixed PHY, the DT node associated
-		 * to the PHY is the Ethernet MAC DT node.
-		 */
-		phy_node = of_node_get(dn);
-	}
-
 	phy_mode = of_get_phy_mode(dn);
 	if (phy_mode < 0) {
 		dev_err(&pdev->dev, "incorrect phy-mode\n");
 		err = -EINVAL;
-		goto err_put_phy_node;
+		goto err_free_irq;
+	}
+
+	phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
+				 &mvneta_phylink_ops);
+	if (IS_ERR(phylink)) {
+		err = PTR_ERR(phylink);
+		goto err_free_irq;
 	}
 
 	dev->tx_queue_len = MVNETA_MAX_TXD;
@@ -4194,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	pp = netdev_priv(dev);
 	spin_lock_init(&pp->lock);
-	pp->phy_node = phy_node;
+	pp->phylink = phylink;
 	pp->phy_interface = phy_mode;
-
-	err = of_property_read_string(dn, "managed", &managed);
-	pp->use_inband_status = (err == 0 &&
-				 strcmp(managed, "in-band-status") == 0);
+	pp->dn = dn;
 
 	pp->rxq_def = rxq_def;
 
@@ -4221,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev)
 		pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
 		err = PTR_ERR(pp->clk);
-		goto err_put_phy_node;
+		goto err_free_phylink;
 	}
 
 	clk_prepare_enable(pp->clk);
@@ -4358,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, pp->dev);
 
-	if (pp->use_inband_status) {
-		struct phy_device *phy = of_phy_find_device(dn);
-
-		mvneta_fixed_link_update(pp, phy);
-
-		put_device(&phy->mdio.dev);
-	}
-
 	return 0;
 
 err_netdev:
@@ -4382,10 +4506,9 @@ err_free_ports:
 err_clk:
 	clk_disable_unprepare(pp->clk_bus);
 	clk_disable_unprepare(pp->clk);
-err_put_phy_node:
-	of_node_put(phy_node);
-	if (of_phy_is_fixed_link(dn))
-		of_phy_deregister_fixed_link(dn);
+err_free_phylink:
+	if (pp->phylink)
+		phylink_destroy(pp->phylink);
 err_free_irq:
 	irq_dispose_mapping(dev->irq);
 err_free_netdev:
@@ -4397,7 +4520,6 @@ err_free_netdev:
 static int mvneta_remove(struct platform_device *pdev)
 {
 	struct net_device  *dev = platform_get_drvdata(pdev);
-	struct device_node *dn = pdev->dev.of_node;
 	struct mvneta_port *pp = netdev_priv(dev);
 
 	unregister_netdev(dev);
@@ -4405,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev)
 	clk_disable_unprepare(pp->clk);
 	free_percpu(pp->ports);
 	free_percpu(pp->stats);
-	if (of_phy_is_fixed_link(dn))
-		of_phy_deregister_fixed_link(dn);
 	irq_dispose_mapping(dev->irq);
-	of_node_put(pp->phy_node);
+	phylink_destroy(pp->phylink);
 	free_netdev(dev);
 
 	if (pp->bm_priv) {
@@ -4426,8 +4546,10 @@ static int mvneta_suspend(struct device *device)
 	struct net_device *dev = dev_get_drvdata(device);
 	struct mvneta_port *pp = netdev_priv(dev);
 
+	rtnl_lock();
 	if (netif_running(dev))
 		mvneta_stop(dev);
+	rtnl_unlock();
 	netif_device_detach(dev);
 	clk_disable_unprepare(pp->clk_bus);
 	clk_disable_unprepare(pp->clk);
@@ -4460,14 +4582,13 @@ static int mvneta_resume(struct device *device)
 		return err;
 	}
 
-	if (pp->use_inband_status)
-		mvneta_fixed_link_update(pp, dev->phydev);
-
 	netif_device_attach(dev);
+	rtnl_lock();
 	if (netif_running(dev)) {
 		mvneta_open(dev);
 		mvneta_set_rx_mode(dev);
 	}
+	rtnl_unlock();
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 634b2f41cc9e..a1d7b88cf083 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -10,6 +10,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/acpi.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -454,11 +455,11 @@
 /* Various constants */
 
 /* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH	15
+#define MVPP2_TXDONE_COAL_PKTS_THRESH	64
 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
 #define MVPP2_TXDONE_COAL_USEC		1000
 #define MVPP2_RX_COAL_PKTS		32
-#define MVPP2_RX_COAL_USEC		100
+#define MVPP2_RX_COAL_USEC		64
 
 /* The two bytes Marvell header. Either contains a special value used
  * by Marvell switches when a specific hardware mode is enabled (not
@@ -504,10 +505,12 @@
 #define MVPP2_DEFAULT_RXQ		4
 
 /* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD			128
+#define MVPP2_MAX_RXD_MAX		1024
+#define MVPP2_MAX_RXD_DFLT		128
 
 /* Max number of Tx descriptors */
-#define MVPP2_MAX_TXD			1024
+#define MVPP2_MAX_TXD_MAX		2048
+#define MVPP2_MAX_TXD_DFLT		1024
 
 /* Amount of Tx descriptors that can be reserved at once by CPU */
 #define MVPP2_CPU_DESC_CHUNK		64
@@ -863,7 +866,7 @@ struct mvpp2 {
 
 	/* List of pointers to port structures */
 	int port_count;
-	struct mvpp2_port **port_list;
+	struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
 
 	/* Aggregated TXQs */
 	struct mvpp2_tx_queue *aggr_txqs;
@@ -930,6 +933,9 @@ struct mvpp2_port {
 
 	struct mvpp2 *priv;
 
+	/* Firmware node associated to the port */
+	struct fwnode_handle *fwnode;
+
 	/* Per-port registers' base address */
 	void __iomem *base;
 	void __iomem *stats_base;
@@ -5802,6 +5808,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
 		txq_pcpu->reserved_num = 0;
 		txq_pcpu->txq_put_index = 0;
 		txq_pcpu->txq_get_index = 0;
+		txq_pcpu->tso_headers = NULL;
 
 		txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
 		txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
@@ -5829,10 +5836,13 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
 		kfree(txq_pcpu->buffs);
 
-		dma_free_coherent(port->dev->dev.parent,
-				  txq_pcpu->size * TSO_HEADER_SIZE,
-				  txq_pcpu->tso_headers,
-				  txq_pcpu->tso_headers_dma);
+		if (txq_pcpu->tso_headers)
+			dma_free_coherent(port->dev->dev.parent,
+					  txq_pcpu->size * TSO_HEADER_SIZE,
+					  txq_pcpu->tso_headers,
+					  txq_pcpu->tso_headers_dma);
+
+		txq_pcpu->tso_headers = NULL;
 	}
 
 	if (txq->descs)
@@ -6832,13 +6842,13 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
 	if (ring->rx_pending == 0 || ring->tx_pending == 0)
 		return -EINVAL;
 
-	if (ring->rx_pending > MVPP2_MAX_RXD)
-		new_rx_pending = MVPP2_MAX_RXD;
+	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+		new_rx_pending = MVPP2_MAX_RXD_MAX;
 	else if (!IS_ALIGNED(ring->rx_pending, 16))
 		new_rx_pending = ALIGN(ring->rx_pending, 16);
 
-	if (ring->tx_pending > MVPP2_MAX_TXD)
-		new_tx_pending = MVPP2_MAX_TXD;
+	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+		new_tx_pending = MVPP2_MAX_TXD_MAX;
 	else if (!IS_ALIGNED(ring->tx_pending, 32))
 		new_tx_pending = ALIGN(ring->tx_pending, 32);
 
@@ -7318,9 +7328,10 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 
-	c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
-	c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
-	c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
+	c->rx_coalesce_usecs       = port->rxqs[0]->time_coal;
+	c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+	c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+	c->tx_coalesce_usecs       = port->tx_time_coal;
 	return 0;
 }
 
@@ -7340,8 +7351,8 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 
-	ring->rx_max_pending = MVPP2_MAX_RXD;
-	ring->tx_max_pending = MVPP2_MAX_TXD;
+	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
 	ring->rx_pending = port->rx_ring_size;
 	ring->tx_pending = port->tx_ring_size;
 }
@@ -7492,7 +7503,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
 			strncpy(irqname, "rx-shared", sizeof(irqname));
 		}
 
-		v->irq = of_irq_get_byname(port_node, irqname);
+		if (port_node)
+			v->irq = of_irq_get_byname(port_node, irqname);
+		else
+			v->irq = fwnode_irq_get(port->fwnode, i);
 		if (v->irq <= 0) {
 			ret = -EINVAL;
 			goto err;
@@ -7704,17 +7718,16 @@ static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
 }
 
 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
-				     struct device_node *port_node,
+				     struct fwnode_handle *fwnode,
 				     char **mac_from)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 	char hw_mac_addr[ETH_ALEN] = {0};
-	const char *dt_mac_addr;
+	char fw_mac_addr[ETH_ALEN];
 
-	dt_mac_addr = of_get_mac_address(port_node);
-	if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
-		*mac_from = "device tree";
-		ether_addr_copy(dev->dev_addr, dt_mac_addr);
+	if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+		*mac_from = "firmware node";
+		ether_addr_copy(dev->dev_addr, fw_mac_addr);
 		return;
 	}
 
@@ -7733,13 +7746,14 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
 
 /* Ports initialization */
 static int mvpp2_port_probe(struct platform_device *pdev,
-			    struct device_node *port_node,
-			    struct mvpp2 *priv, int index)
+			    struct fwnode_handle *port_fwnode,
+			    struct mvpp2 *priv)
 {
 	struct device_node *phy_node;
-	struct phy *comphy;
+	struct phy *comphy = NULL;
 	struct mvpp2_port *port;
 	struct mvpp2_port_pcpu *port_pcpu;
+	struct device_node *port_node = to_of_node(port_fwnode);
 	struct net_device *dev;
 	struct resource *res;
 	char *mac_from = "";
@@ -7750,7 +7764,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 	int phy_mode;
 	int err, i, cpu;
 
-	has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+	if (port_node) {
+		has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+	} else {
+		has_tx_irqs = true;
+		queue_mode = MVPP2_QDIST_MULTI_MODE;
+	}
 
 	if (!has_tx_irqs)
 		queue_mode = MVPP2_QDIST_SINGLE_MODE;
@@ -7765,36 +7784,43 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 	if (!dev)
 		return -ENOMEM;
 
-	phy_node = of_parse_phandle(port_node, "phy", 0);
-	phy_mode = of_get_phy_mode(port_node);
+	if (port_node)
+		phy_node = of_parse_phandle(port_node, "phy", 0);
+	else
+		phy_node = NULL;
+
+	phy_mode = fwnode_get_phy_mode(port_fwnode);
 	if (phy_mode < 0) {
 		dev_err(&pdev->dev, "incorrect phy mode\n");
 		err = phy_mode;
 		goto err_free_netdev;
 	}
 
-	comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
-	if (IS_ERR(comphy)) {
-		if (PTR_ERR(comphy) == -EPROBE_DEFER) {
-			err = -EPROBE_DEFER;
-			goto err_free_netdev;
+	if (port_node) {
+		comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+		if (IS_ERR(comphy)) {
+			if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+				err = -EPROBE_DEFER;
+				goto err_free_netdev;
+			}
+			comphy = NULL;
 		}
-		comphy = NULL;
 	}
 
-	if (of_property_read_u32(port_node, "port-id", &id)) {
+	if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
 		err = -EINVAL;
 		dev_err(&pdev->dev, "missing port-id value\n");
 		goto err_free_netdev;
 	}
 
-	dev->tx_queue_len = MVPP2_MAX_TXD;
+	dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
 	dev->watchdog_timeo = 5 * HZ;
 	dev->netdev_ops = &mvpp2_netdev_ops;
 	dev->ethtool_ops = &mvpp2_eth_tool_ops;
 
 	port = netdev_priv(dev);
 	port->dev = dev;
+	port->fwnode = port_fwnode;
 	port->ntxqs = ntxqs;
 	port->nrxqs = nrxqs;
 	port->priv = priv;
@@ -7804,7 +7830,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 	if (err)
 		goto err_free_netdev;
 
-	port->link_irq = of_irq_get_byname(port_node, "link");
+	if (port_node)
+		port->link_irq = of_irq_get_byname(port_node, "link");
+	else
+		port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
 	if (port->link_irq == -EPROBE_DEFER) {
 		err = -EPROBE_DEFER;
 		goto err_deinit_qvecs;
@@ -7813,7 +7842,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 		/* the link irq is optional */
 		port->link_irq = 0;
 
-	if (of_property_read_bool(port_node, "marvell,loopback"))
+	if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
 		port->flags |= MVPP2_F_LOOPBACK;
 
 	port->id = id;
@@ -7838,8 +7867,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 				   MVPP21_MIB_COUNTERS_OFFSET +
 				   port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
 	} else {
-		if (of_property_read_u32(port_node, "gop-port-id",
-					 &port->gop_id)) {
+		if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
+					     &port->gop_id)) {
 			err = -EINVAL;
 			dev_err(&pdev->dev, "missing gop-port-id value\n");
 			goto err_deinit_qvecs;
@@ -7869,10 +7898,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 	mutex_init(&port->gather_stats_lock);
 	INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
 
-	mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
+	mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
 
-	port->tx_ring_size = MVPP2_MAX_TXD;
-	port->rx_ring_size = MVPP2_MAX_RXD;
+	port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+	port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	err = mvpp2_port_init(port);
@@ -7927,7 +7956,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 	}
 	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
 
-	priv->port_list[index] = port;
+	priv->port_list[priv->port_count++] = port;
+
 	return 0;
 
 err_free_port_pcpu:
@@ -8186,8 +8216,9 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
 
 static int mvpp2_probe(struct platform_device *pdev)
 {
-	struct device_node *dn = pdev->dev.of_node;
-	struct device_node *port_node;
+	const struct acpi_device_id *acpi_id;
+	struct fwnode_handle *fwnode = pdev->dev.fwnode;
+	struct fwnode_handle *port_fwnode;
 	struct mvpp2 *priv;
 	struct resource *res;
 	void __iomem *base;
@@ -8198,8 +8229,14 @@ static int mvpp2_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	priv->hw_version =
-		(unsigned long)of_device_get_match_data(&pdev->dev);
+	if (has_acpi_companion(&pdev->dev)) {
+		acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+					    &pdev->dev);
+		priv->hw_version = (unsigned long)acpi_id->driver_data;
+	} else {
+		priv->hw_version =
+			(unsigned long)of_device_get_match_data(&pdev->dev);
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	base = devm_ioremap_resource(&pdev->dev, res);
@@ -8213,10 +8250,23 @@ static int mvpp2_probe(struct platform_device *pdev)
 			return PTR_ERR(priv->lms_base);
 	} else {
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (has_acpi_companion(&pdev->dev)) {
+			/* In case the MDIO memory region is declared in
+			 * the ACPI, it can already appear as 'in-use'
+			 * in the OS. Because it is overlapped by second
+			 * region of the network controller, make
+			 * sure it is released, before requesting it again.
+			 * The care is taken by mvpp2 driver to avoid
+			 * concurrent access to this memory region.
+			 */
+			release_resource(res);
+		}
 		priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
 		if (IS_ERR(priv->iface_base))
 			return PTR_ERR(priv->iface_base);
+	}
 
+	if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
 		priv->sysctrl_base =
 			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
 							"marvell,system-controller");
@@ -8242,32 +8292,34 @@ static int mvpp2_probe(struct platform_device *pdev)
 	else
 		priv->max_port_rxqs = 32;
 
-	priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
-	if (IS_ERR(priv->pp_clk))
-		return PTR_ERR(priv->pp_clk);
-	err = clk_prepare_enable(priv->pp_clk);
-	if (err < 0)
-		return err;
-
-	priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
-	if (IS_ERR(priv->gop_clk)) {
-		err = PTR_ERR(priv->gop_clk);
-		goto err_pp_clk;
-	}
-	err = clk_prepare_enable(priv->gop_clk);
-	if (err < 0)
-		goto err_pp_clk;
+	if (dev_of_node(&pdev->dev)) {
+		priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+		if (IS_ERR(priv->pp_clk))
+			return PTR_ERR(priv->pp_clk);
+		err = clk_prepare_enable(priv->pp_clk);
+		if (err < 0)
+			return err;
 
-	if (priv->hw_version == MVPP22) {
-		priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
-		if (IS_ERR(priv->mg_clk)) {
-			err = PTR_ERR(priv->mg_clk);
-			goto err_gop_clk;
+		priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+		if (IS_ERR(priv->gop_clk)) {
+			err = PTR_ERR(priv->gop_clk);
+			goto err_pp_clk;
 		}
-
-		err = clk_prepare_enable(priv->mg_clk);
+		err = clk_prepare_enable(priv->gop_clk);
 		if (err < 0)
-			goto err_gop_clk;
+			goto err_pp_clk;
+
+		if (priv->hw_version == MVPP22) {
+			priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+			if (IS_ERR(priv->mg_clk)) {
+				err = PTR_ERR(priv->mg_clk);
+				goto err_gop_clk;
+			}
+
+			err = clk_prepare_enable(priv->mg_clk);
+			if (err < 0)
+				goto err_gop_clk;
+		}
 
 		priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
 		if (IS_ERR(priv->axi_clk)) {
@@ -8280,10 +8332,14 @@ static int mvpp2_probe(struct platform_device *pdev)
 			if (err < 0)
 				goto err_gop_clk;
 		}
-	}
 
-	/* Get system's tclk rate */
-	priv->tclk = clk_get_rate(priv->pp_clk);
+		/* Get system's tclk rate */
+		priv->tclk = clk_get_rate(priv->pp_clk);
+	} else if (device_property_read_u32(&pdev->dev, "clock-frequency",
+					    &priv->tclk)) {
+		dev_err(&pdev->dev, "missing clock-frequency value\n");
+		return -EINVAL;
+	}
 
 	if (priv->hw_version == MVPP22) {
 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
@@ -8306,30 +8362,19 @@ static int mvpp2_probe(struct platform_device *pdev)
 		goto err_mg_clk;
 	}
 
-	priv->port_count = of_get_available_child_count(dn);
+	/* Initialize ports */
+	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+		err = mvpp2_port_probe(pdev, port_fwnode, priv);
+		if (err < 0)
+			goto err_port_probe;
+	}
+
 	if (priv->port_count == 0) {
 		dev_err(&pdev->dev, "no ports enabled\n");
 		err = -ENODEV;
 		goto err_mg_clk;
 	}
 
-	priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count,
-				       sizeof(*priv->port_list),
-				       GFP_KERNEL);
-	if (!priv->port_list) {
-		err = -ENOMEM;
-		goto err_mg_clk;
-	}
-
-	/* Initialize ports */
-	i = 0;
-	for_each_available_child_of_node(dn, port_node) {
-		err = mvpp2_port_probe(pdev, port_node, priv, i);
-		if (err < 0)
-			goto err_port_probe;
-		i++;
-	}
-
 	/* Statistics must be gathered regularly because some of them (like
 	 * packets counters) are 32-bit registers and could overflow quite
 	 * quickly. For instance, a 10Gb link used at full bandwidth with the
@@ -8350,7 +8395,7 @@ static int mvpp2_probe(struct platform_device *pdev)
 
 err_port_probe:
 	i = 0;
-	for_each_available_child_of_node(dn, port_node) {
+	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
 		if (priv->port_list[i])
 			mvpp2_port_remove(priv->port_list[i]);
 		i++;
@@ -8369,14 +8414,14 @@ err_pp_clk:
 static int mvpp2_remove(struct platform_device *pdev)
 {
 	struct mvpp2 *priv = platform_get_drvdata(pdev);
-	struct device_node *dn = pdev->dev.of_node;
-	struct device_node *port_node;
+	struct fwnode_handle *fwnode = pdev->dev.fwnode;
+	struct fwnode_handle *port_fwnode;
 	int i = 0;
 
 	flush_workqueue(priv->stats_queue);
 	destroy_workqueue(priv->stats_queue);
 
-	for_each_available_child_of_node(dn, port_node) {
+	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
 		if (priv->port_list[i]) {
 			mutex_destroy(&priv->port_list[i]->gather_stats_lock);
 			mvpp2_port_remove(priv->port_list[i]);
@@ -8399,6 +8444,9 @@ static int mvpp2_remove(struct platform_device *pdev)
 				  aggr_txq->descs_dma);
 	}
 
+	if (is_acpi_node(port_fwnode))
+		return 0;
+
 	clk_disable_unprepare(priv->axi_clk);
 	clk_disable_unprepare(priv->mg_clk);
 	clk_disable_unprepare(priv->pp_clk);
@@ -8420,12 +8468,19 @@ static const struct of_device_id mvpp2_match[] = {
 };
 MODULE_DEVICE_TABLE(of, mvpp2_match);
 
+static const struct acpi_device_id mvpp2_acpi_match[] = {
+	{ "MRVL0110", MVPP22 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+
 static struct platform_driver mvpp2_driver = {
 	.probe = mvpp2_probe,
 	.remove = mvpp2_remove,
 	.driver = {
 		.name = MVPP2_DRIVER_NAME,
 		.of_match_table = mvpp2_match,
+		.acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
 	},
 };
 
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9efe1771423c..9fe85300e7b6 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
 			dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
 			return -ETIMEDOUT;
 		}
-		mdelay(1);
+		msleep(1);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index fc67e35b253e..29826dd15204 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1952,14 +1952,16 @@ static int mtk_hw_init(struct mtk_eth *eth)
 	}
 	regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 
-	/* Set GE2 driving and slew rate */
-	regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+	if (eth->pctl) {
+		/* Set GE2 driving and slew rate */
+		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
 
-	/* set GE2 TDSEL */
-	regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+		/* set GE2 TDSEL */
+		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
 
-	/* set GE2 TUNE */
-	regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+		/* set GE2 TUNE */
+		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+	}
 
 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
 	 * up with the more appropriate value when mtk_phy_link_adjust call is
@@ -2538,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
 		}
 	}
 
-	eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-						    "mediatek,pctl");
-	if (IS_ERR(eth->pctl)) {
-		dev_err(&pdev->dev, "no pctl regmap found\n");
-		return PTR_ERR(eth->pctl);
+	if (eth->soc->required_pctl) {
+		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							    "mediatek,pctl");
+		if (IS_ERR(eth->pctl)) {
+			dev_err(&pdev->dev, "no pctl regmap found\n");
+			return PTR_ERR(eth->pctl);
+		}
 	}
 
 	for (i = 0; i < 3; i++) {
@@ -2668,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
 
 static const struct mtk_soc_data mt2701_data = {
 	.caps = MTK_GMAC1_TRGMII,
-	.required_clks = MT7623_CLKS_BITMAP
+	.required_clks = MT7623_CLKS_BITMAP,
+	.required_pctl = true,
 };
 
 static const struct mtk_soc_data mt7622_data = {
 	.caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
-	.required_clks = MT7622_CLKS_BITMAP
+	.required_clks = MT7622_CLKS_BITMAP,
+	.required_pctl = false,
 };
 
 static const struct mtk_soc_data mt7623_data = {
 	.caps = MTK_GMAC1_TRGMII,
-	.required_clks = MT7623_CLKS_BITMAP
+	.required_clks = MT7623_CLKS_BITMAP,
+	.required_pctl = true,
 };
 
 const struct of_device_id of_mtk_match[] = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a3af4660de81..672b8c353c47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
  * @caps			Flags shown the extra capability for the SoC
  * @required_clks		Flags shown the bitmap for required clocks on
  *				the target SoC
+ * @required_pctl		A bool value to show whether the SoC requires
+ *				the extra setup for those pins used by GMAC.
  */
 struct mtk_soc_data {
 	u32		caps;
 	u32		required_clks;
+	bool		required_pctl;
 };
 
 /* currently no SoC has more than 2 macs */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5f41dc92aa68..1a0c3bf86ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
 		}
 
 		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
 		case IEEE_8021QAZ_TSA_STRICT:
 			break;
 		case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
 	/* higher TC means higher priority => lower pg */
 	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
 		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
+			pg[i] = MLX4_EN_TC_VENDOR;
+			tc_tx_bw[i] = MLX4_EN_BW_MAX;
+			break;
 		case IEEE_8021QAZ_TSA_STRICT:
 			pg[i] = num_strict++;
 			tc_tx_bw[i] = MLX4_EN_BW_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f04164885..ebc1f566a4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
 	if (param->rx_jumbo_pending || param->rx_mini_pending)
 		return -EINVAL;
 
+	if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+		en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+			__func__, param->rx_pending,
+			MLX4_EN_MIN_RX_SIZE);
+		return -EINVAL;
+	}
+	if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+		en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+			__func__, param->tx_pending,
+			MLX4_EN_MIN_TX_SIZE);
+		return -EINVAL;
+	}
+
 	rx_size = roundup_pow_of_two(param->rx_pending);
-	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
-	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
 	tx_size = roundup_pow_of_two(param->tx_pending);
-	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
-	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
 
 	if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
 					priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 99051a294fa6..8fc51bc29003 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 
 		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
 					   prof->rx_ring_size, priv->stride,
-					   node))
+					   node, i))
 			goto err;
+
 	}
 
 #ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
 	if (!mlx4_is_slave(priv->mdev->dev)) {
+		u8 prio;
+
+		for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+			priv->ets.prio_tc[prio] = prio;
+			priv->ets.tc_tsa[prio]  = IEEE_8021QAZ_TSA_VENDOR;
+		}
+
 		priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
 			DCB_CAP_DCBX_VER_IEEE;
 		priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 85e28efcda33..b4d144e67514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring **pring,
-			   u32 size, u16 stride, int node)
+			   u32 size, u16 stride, int node, int queue_index)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 	ring->log_stride = ffs(ring->stride) - 1;
 	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
 
+	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+		goto err_ring;
+
 	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
 					sizeof(struct mlx4_en_rx_alloc));
 	ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 		ring->rx_info = vzalloc(tmp);
 		if (!ring->rx_info) {
 			err = -ENOMEM;
-			goto err_ring;
+			goto err_xdp_info;
 		}
 	}
 
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 err_info:
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
+err_xdp_info:
+	xdp_rxq_info_unreg(&ring->xdp_rxq);
 err_ring:
 	kfree(ring);
 	*pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 					lockdep_is_held(&mdev->state_lock));
 	if (old_prog)
 		bpf_prog_put(old_prog);
+	xdp_rxq_info_unreg(&ring->xdp_rxq);
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
 	return 0;
 }
 #endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 		      netdev_features_t dev_features)
 {
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 		hdr += sizeof(struct vlan_hdr);
 	}
 
-	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
-		return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 #if IS_ENABLED(CONFIG_IPV6)
 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
 		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
 #endif
-	return 0;
+	return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 }
 
 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 	int cq_ring = cq->ring;
 	bool doorbell_pending;
 	struct mlx4_cqe *cqe;
+	struct xdp_buff xdp;
 	int polled = 0;
 	int index;
 
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 	/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
 	rcu_read_lock();
 	xdp_prog = rcu_dereference(ring->xdp_prog);
+	xdp.rxq = &ring->xdp_rxq;
 	doorbell_pending = 0;
 
 	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 		 * read bytes but not past the end of the frag.
 		 */
 		if (xdp_prog) {
-			struct xdp_buff xdp;
 			dma_addr_t dma;
 			void *orig_data;
 			u32 act;
@@ -814,33 +823,33 @@ xdp_drop_no_cnt:
 		if (likely(dev->features & NETIF_F_RXCSUM)) {
 			if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
 						      MLX4_CQE_STATUS_UDP)) {
-				if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
-				    cqe->checksum == cpu_to_be16(0xffff)) {
-					bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
-						(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
-
-					ip_summed = CHECKSUM_UNNECESSARY;
-					hash_type = PKT_HASH_TYPE_L4;
-					if (l2_tunnel)
-						skb->csum_level = 1;
-					ring->csum_ok++;
-				} else {
+				bool l2_tunnel;
+
+				if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+				      cqe->checksum == cpu_to_be16(0xffff)))
 					goto csum_none;
-				}
+
+				l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+					(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+				ip_summed = CHECKSUM_UNNECESSARY;
+				hash_type = PKT_HASH_TYPE_L4;
+				if (l2_tunnel)
+					skb->csum_level = 1;
+				ring->csum_ok++;
 			} else {
-				if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
-				    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-							       MLX4_CQE_STATUS_IPV6))) {
-					if (check_csum(cqe, skb, va, dev->features)) {
-						goto csum_none;
-					} else {
-						ip_summed = CHECKSUM_COMPLETE;
-						hash_type = PKT_HASH_TYPE_L3;
-						ring->csum_complete++;
-					}
-				} else {
+				if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+				      (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+								 MLX4_CQE_STATUS_IPV6))))
+#else
+								 0))))
+#endif
 					goto csum_none;
-				}
+				if (check_csum(cqe, skb, va, dev->features))
+					goto csum_none;
+				ip_summed = CHECKSUM_COMPLETE;
+				hash_type = PKT_HASH_TYPE_L3;
+				ring->csum_complete++;
 			}
 		} else {
 csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2b72677eccd4..f470ae37d937 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
 #endif
 #include <linux/cpu_rmap.h>
 #include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
 	unsigned long dropped;
 	int hwtstamp_rx_filter;
 	cpumask_var_t affinity_mask;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct mlx4_en_cq {
@@ -479,6 +481,7 @@ struct mlx4_en_frag_info {
 #define MLX4_EN_BW_MIN 1
 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
 
+#define MLX4_EN_TC_VENDOR 0
 #define MLX4_EN_TC_ETS 7
 
 enum dcb_pfc_type {
@@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
 void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring **pring,
-			   u32 size, u16 stride, int node);
+			   u32 size, u16 stride, int node, int queue_index);
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_rx_ring **pring,
 			     u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c7c0764991c9..2e84f10f59ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
 		    u32 *lkey, u32 *rkey)
 {
-	struct mlx4_cmd_mailbox *mailbox;
-	int err;
-
 	if (!fmr->maps)
 		return;
 
-	fmr->maps = 0;
+	/* To unmap: it is sufficient to take back ownership from HW */
+	*(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
 
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox)) {
-		err = PTR_ERR(mailbox);
-		pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
-		return;
-	}
+	/* Make sure MPT status is visible */
+	wmb();
 
-	err = mlx4_HW2SW_MPT(dev, NULL,
-			     key_to_hw_index(fmr->mr.key) &
-			     (dev->caps.num_mpts - 1));
-	mlx4_free_cmd_mailbox(dev, mailbox);
-	if (err) {
-		pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
-		return;
-	}
-	fmr->mr.enabled = MLX4_MPT_EN_SW;
+	fmr->maps = 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 
 	if (fmr->maps)
 		return -EBUSY;
+	if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+		/* In case of FMR was enabled and unmapped
+		 * make sure to give ownership of MPT back to HW
+		 * so HW2SW_MPT command will success.
+		 */
+		*(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+		/* Make sure MPT status is visible before changing MPT fields */
+		wmb();
+		fmr->mpt->length = 0;
+		fmr->mpt->start  = 0;
+		/* Make sure MPT data is visible after changing MPT status */
+		wmb();
+		*(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+		/* make sure MPT status is visible */
+		wmb();
+	}
 
 	ret = mlx4_mr_free(dev, &fmr->mr);
 	if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
 		fpga/ipsec.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
-		en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+		en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
 		en_arfs.o en_fs_ethtool.o en_selftest.o
 
 mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c2d89bfa1a70..4c9360b25532 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,8 +44,11 @@
 #include <linux/mlx5/port.h>
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/transobj.h>
+#include <linux/mlx5/fs.h>
 #include <linux/rhashtable.h>
 #include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
 #include "wq.h"
 #include "mlx5_core.h"
 #include "en_stats.h"
@@ -226,12 +229,6 @@ enum mlx5e_priv_flag {
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 #endif
 
-struct mlx5e_cq_moder {
-	u16 usec;
-	u16 pkts;
-	u8 cq_period_mode;
-};
-
 struct mlx5e_params {
 	u8  log_sq_size;
 	u8  rq_wq_type;
@@ -242,8 +239,8 @@ struct mlx5e_params {
 	u16 num_channels;
 	u8  num_tc;
 	bool rx_cqe_compress_def;
-	struct mlx5e_cq_moder rx_cq_moderation;
-	struct mlx5e_cq_moder tx_cq_moderation;
+	struct net_dim_cq_moder rx_cq_moderation;
+	struct net_dim_cq_moder tx_cq_moderation;
 	bool lro_en;
 	u32 lro_wqe_sz;
 	u16 tx_max_inline;
@@ -253,7 +250,7 @@ struct mlx5e_params {
 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 	bool vlan_strip_disable;
 	bool scatter_fcs_en;
-	bool rx_am_enabled;
+	bool rx_dim_enabled;
 	u32 lro_timeout;
 	u32 pflags;
 	struct bpf_prog *xdp_prog;
@@ -472,32 +469,6 @@ struct mlx5e_mpw_info {
 	u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
 };
 
-struct mlx5e_rx_am_stats {
-	int ppms; /* packets per msec */
-	int bpms; /* bytes per msec */
-	int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
-	ktime_t	time;
-	u32	pkt_ctr;
-	u32	byte_ctr;
-	u16	event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
-	u8					state;
-	struct mlx5e_rx_am_stats		prev_stats;
-	struct mlx5e_rx_am_sample		start_sample;
-	struct work_struct			work;
-	u8					profile_ix;
-	u8					mode;
-	u8					tune_state;
-	u8					steps_right;
-	u8					steps_left;
-	u8					tired;
-};
-
 /* a single cache unit is capable to serve one napi call (for non-striding rq)
  * or a MPWQE (for striding rq).
  */
@@ -558,7 +529,7 @@ struct mlx5e_rq {
 	unsigned long          state;
 	int                    ix;
 
-	struct mlx5e_rx_am     am; /* Adaptive Moderation */
+	struct net_dim         dim; /* Dynamic Interrupt Moderation */
 
 	/* XDP */
 	struct bpf_prog       *xdp_prog;
@@ -571,6 +542,9 @@ struct mlx5e_rq {
 	u32                    rqn;
 	struct mlx5_core_dev  *mdev;
 	struct mlx5_core_mkey  umr_mkey;
+
+	/* XDP read-mostly */
+	struct xdp_rxq_info    xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_channel {
@@ -587,6 +561,7 @@ struct mlx5e_channel {
 
 	/* data path - accessed per napi poll */
 	struct irq_desc *irq_desc;
+	struct mlx5e_ch_stats      stats;
 
 	/* control */
 	struct mlx5e_priv         *priv;
@@ -655,6 +630,7 @@ struct mlx5e_tc_table {
 	struct rhashtable               ht;
 
 	DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+	DECLARE_HASHTABLE(hairpin_tbl, 8);
 };
 
 struct mlx5e_vlan_table {
@@ -722,6 +698,11 @@ enum {
 	MLX5E_ARFS_FT_LEVEL
 };
 
+enum {
+	MLX5E_TC_FT_LEVEL = 0,
+	MLX5E_TC_TTC_FT_LEVEL,
+};
+
 struct mlx5e_ethtool_table {
 	struct mlx5_flow_table *ft;
 	int                    num_rules;
@@ -860,11 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
+void mlx5e_update_stats(struct mlx5e_priv *priv);
 
 int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -1054,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
 
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+struct ttc_params {
+	struct mlx5_flow_table_attr ft_attr;
+	u32 any_tt_tirn;
+	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+	struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
 
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+			   struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+			     struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+				 struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+				   struct mlx5e_ttc_table *ttc);
 
 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
 		     u32 underlay_qpn, u32 *tisn);
@@ -1071,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev);
 void mlx5e_update_stats_work(struct work_struct *work);
 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
 
+int mlx5e_bits_invert(unsigned long a, int size);
+
 /* ethtool helpers */
 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
 			       struct ethtool_drvinfo *drvinfo);
@@ -1110,4 +1104,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 			    struct mlx5e_params *params,
 			    u16 max_channels);
 u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
 #endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+	struct net_dim *dim = container_of(work, struct net_dim,
+					   work);
+	struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+	struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+								  dim->profile_ix);
+
+	mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+				       cur_profile.usec, cur_profile.pkts);
+
+	dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ea5fff2c3143..cc8048f68f11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
 		return;
 
 	mutex_lock(&priv->state_lock);
-	mlx5e_update_stats(priv, true);
+	mlx5e_update_stats(priv);
 	mutex_unlock(&priv->state_lock);
 
 	for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -295,7 +295,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 	struct mlx5e_channels new_channels = {};
 	u32 rx_pending_wqes;
 	u32 min_rq_size;
-	u32 max_rq_size;
 	u8 log_rq_size;
 	u8 log_sq_size;
 	u32 num_mtts;
@@ -314,8 +313,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 
 	min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
 					       1 << mlx5_min_log_rq_size(rq_wq_type));
-	max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-					       1 << mlx5_max_log_rq_size(rq_wq_type));
 	rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
 						   param->rx_pending);
 
@@ -325,12 +322,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 			    min_rq_size);
 		return -EINVAL;
 	}
-	if (param->rx_pending > max_rq_size) {
-		netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
-			    __func__, param->rx_pending,
-			    max_rq_size);
-		return -EINVAL;
-	}
 
 	num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
 	if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -346,12 +337,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 			    1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
 		return -EINVAL;
 	}
-	if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
-		netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
-			    __func__, param->tx_pending,
-			    1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
-		return -EINVAL;
-	}
 
 	log_rq_size = order_base_2(rx_pending_wqes);
 	log_sq_size = order_base_2(param->tx_pending);
@@ -479,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
 	coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
 	coal->tx_coalesce_usecs       = priv->channels.params.tx_cq_moderation.usec;
 	coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
-	coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+	coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
 
 	return 0;
 }
@@ -533,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
 	new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
 	new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
 	new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
-	new_channels.params.rx_am_enabled         = !!coal->use_adaptive_rx_coalesce;
+	new_channels.params.rx_dim_enabled        = !!coal->use_adaptive_rx_coalesce;
 
 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 		priv->channels.params = new_channels.params;
@@ -541,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
 	}
 	/* we are opened */
 
-	reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+	reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
 	if (!reset) {
 		mlx5e_set_priv_channels_coalesce(priv, coal);
 		priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index def513484845..f64dda2bed31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
 	return err ? ERR_PTR(err) : rule;
 }
 
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
+					  struct ttc_params *params,
+					  struct mlx5e_ttc_table *ttc)
 {
 	struct mlx5_flow_destination dest = {};
-	struct mlx5e_ttc_table *ttc;
 	struct mlx5_flow_handle **rules;
 	struct mlx5_flow_table *ft;
 	int tt;
 	int err;
 
-	ttc = &priv->fs.ttc;
 	ft = ttc->ft.t;
 	rules = ttc->rules;
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 	for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
 		if (tt == MLX5E_TT_ANY)
-			dest.tir_num = priv->direct_tir[0].tirn;
+			dest.tir_num = params->any_tt_tirn;
 		else
-			dest.tir_num = priv->indir_tir[tt].tirn;
+			dest.tir_num = params->indir_tirn[tt];
 		rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
 						    ttc_rules[tt].etype,
 						    ttc_rules[tt].proto);
@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
 			goto del_rules;
 	}
 
-	if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+	if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
 		return 0;
 
 	rules     = ttc->tunnel_rules;
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft   = priv->fs.inner_ttc.ft.t;
+	dest.ft   = params->inner_ttc->ft.t;
 	for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
 		rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
 						    ttc_tunnel_rules[tt].etype,
@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
 	return err ? ERR_PTR(err) : rule;
 }
 
-static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
+						struct ttc_params *params,
+						struct mlx5e_ttc_table *ttc)
 {
 	struct mlx5_flow_destination dest = {};
 	struct mlx5_flow_handle **rules;
-	struct mlx5e_ttc_table *ttc;
 	struct mlx5_flow_table *ft;
 	int err;
 	int tt;
 
-	ttc =  &priv->fs.inner_ttc;
 	ft = ttc->ft.t;
 	rules = ttc->rules;
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 	for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
 		if (tt == MLX5E_TT_ANY)
-			dest.tir_num = priv->direct_tir[0].tirn;
+			dest.tir_num = params->any_tt_tirn;
 		else
-			dest.tir_num = priv->inner_indir_tir[tt].tirn;
+			dest.tir_num = params->indir_tirn[tt];
 
 		rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
 							  ttc_rules[tt].etype,
@@ -1075,21 +1075,42 @@ err:
 	return err;
 }
 
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
+				struct ttc_params *ttc_params)
+{
+	ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
+	ttc_params->inner_ttc = &priv->fs.inner_ttc;
+}
+
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+{
+	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+	ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
+	ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
+	ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
+
+{
+	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+	ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
+	ft_attr->level = MLX5E_TTC_FT_LEVEL;
+	ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+				 struct mlx5e_ttc_table *ttc)
 {
-	struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
-	struct mlx5_flow_table_attr ft_attr = {};
 	struct mlx5e_flow_table *ft = &ttc->ft;
 	int err;
 
 	if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
 		return 0;
 
-	ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
-	ft_attr.level   = MLX5E_INNER_TTC_FT_LEVEL;
-	ft_attr.prio    = MLX5E_NIC_PRIO;
-
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
 	if (err)
 		goto err;
 
-	err = mlx5e_generate_inner_ttc_table_rules(priv);
+	err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
 	if (err)
 		goto err;
 
@@ -1111,10 +1132,9 @@ err:
 	return err;
 }
 
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+				   struct mlx5e_ttc_table *ttc)
 {
-	struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
-
 	if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
 		return;
 
@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
 	mlx5e_destroy_flow_table(&ttc->ft);
 }
 
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+			     struct mlx5e_ttc_table *ttc)
 {
-	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-
 	mlx5e_cleanup_ttc_rules(ttc);
 	mlx5e_destroy_flow_table(&ttc->ft);
 }
 
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+			   struct mlx5e_ttc_table *ttc)
 {
 	bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
-	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-	struct mlx5_flow_table_attr ft_attr = {};
 	struct mlx5e_flow_table *ft = &ttc->ft;
 	int err;
 
-	ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
-	ft_attr.level = MLX5E_TTC_FT_LEVEL;
-	ft_attr.prio = MLX5E_NIC_PRIO;
-
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
 	if (err)
 		goto err;
 
-	err = mlx5e_generate_ttc_table_rules(priv);
+	err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
 	if (err)
 		goto err;
 
@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
 
 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 {
-	int err;
+	struct ttc_params ttc_params = {};
+	int tt, err;
 
 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
 					       MLX5_FLOW_NAMESPACE_KERNEL);
@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 		priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
 	}
 
-	err = mlx5e_create_inner_ttc_table(priv);
+	mlx5e_set_ttc_basic_params(priv, &ttc_params);
+	mlx5e_set_inner_ttc_ft_params(&ttc_params);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+		ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+	err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
 	if (err) {
 		netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
 			   err);
 		goto err_destroy_arfs_tables;
 	}
 
-	err = mlx5e_create_ttc_table(priv);
+	mlx5e_set_ttc_ft_params(&ttc_params);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+		ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+	err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
 	if (err) {
 		netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
 			   err);
@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 err_destroy_l2_table:
 	mlx5e_destroy_l2_table(priv);
 err_destroy_ttc_table:
-	mlx5e_destroy_ttc_table(priv);
+	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
 err_destroy_inner_ttc_table:
-	mlx5e_destroy_inner_ttc_table(priv);
+	mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
 err_destroy_arfs_tables:
 	mlx5e_arfs_destroy_tables(priv);
 
@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
 {
 	mlx5e_destroy_vlan_table(priv);
 	mlx5e_destroy_l2_table(priv);
-	mlx5e_destroy_ttc_table(priv);
-	mlx5e_destroy_inner_ttc_table(priv);
+	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+	mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
 	mlx5e_arfs_destroy_tables(priv);
 	mlx5e_ethtool_cleanup_steering(priv);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d8aefeed124d..47bab842c5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -173,182 +173,23 @@ unlock:
 	rtnl_unlock();
 }
 
-static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
-	struct mlx5e_sw_stats temp, *s = &temp;
-	struct mlx5e_rq_stats *rq_stats;
-	struct mlx5e_sq_stats *sq_stats;
-	int i, j;
-
-	memset(s, 0, sizeof(*s));
-	for (i = 0; i < priv->channels.num; i++) {
-		struct mlx5e_channel *c = priv->channels.c[i];
-
-		rq_stats = &c->rq.stats;
-
-		s->rx_packets	+= rq_stats->packets;
-		s->rx_bytes	+= rq_stats->bytes;
-		s->rx_lro_packets += rq_stats->lro_packets;
-		s->rx_lro_bytes	+= rq_stats->lro_bytes;
-		s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
-		s->rx_csum_none	+= rq_stats->csum_none;
-		s->rx_csum_complete += rq_stats->csum_complete;
-		s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
-		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
-		s->rx_xdp_drop += rq_stats->xdp_drop;
-		s->rx_xdp_tx += rq_stats->xdp_tx;
-		s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
-		s->rx_wqe_err   += rq_stats->wqe_err;
-		s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
-		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
-		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
-		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
-		s->rx_page_reuse  += rq_stats->page_reuse;
-		s->rx_cache_reuse += rq_stats->cache_reuse;
-		s->rx_cache_full  += rq_stats->cache_full;
-		s->rx_cache_empty += rq_stats->cache_empty;
-		s->rx_cache_busy  += rq_stats->cache_busy;
-		s->rx_cache_waive += rq_stats->cache_waive;
-
-		for (j = 0; j < priv->channels.params.num_tc; j++) {
-			sq_stats = &c->sq[j].stats;
-
-			s->tx_packets		+= sq_stats->packets;
-			s->tx_bytes		+= sq_stats->bytes;
-			s->tx_tso_packets	+= sq_stats->tso_packets;
-			s->tx_tso_bytes		+= sq_stats->tso_bytes;
-			s->tx_tso_inner_packets	+= sq_stats->tso_inner_packets;
-			s->tx_tso_inner_bytes	+= sq_stats->tso_inner_bytes;
-			s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
-			s->tx_queue_stopped	+= sq_stats->stopped;
-			s->tx_queue_wake	+= sq_stats->wake;
-			s->tx_queue_dropped	+= sq_stats->dropped;
-			s->tx_xmit_more		+= sq_stats->xmit_more;
-			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
-			s->tx_csum_none		+= sq_stats->csum_none;
-			s->tx_csum_partial	+= sq_stats->csum_partial;
-		}
-	}
-
-	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
-				priv->stats.pport.phy_counters,
-				counter_set.phys_layer_cntrs.link_down_events);
-	memcpy(&priv->stats.sw, s, sizeof(*s));
-}
-
-static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
-	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
-	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	MLX5_SET(query_vport_counter_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
-	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
-	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
-
-	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
-{
-	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
-	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-	int prio;
-	void *out;
-
-	MLX5_SET(ppcnt_reg, in, local_port, 1);
-
-	out = pstats->IEEE_802_3_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	if (!full)
-		return;
-
-	out = pstats->RFC_2863_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	out = pstats->RFC_2819_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	out = pstats->phy_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
-		out = pstats->phy_statistical_counters;
-		MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
-		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-	}
-
-	if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
-		out = pstats->eth_ext_counters;
-		MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
-		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-	}
-
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
-	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
-		out = pstats->per_prio_counters[prio];
-		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
-		mlx5_core_access_reg(mdev, in, sz, out, sz,
-				     MLX5_REG_PPCNT, 0, 0);
-	}
-}
-
-static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
-{
-	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
-	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
-	int err;
-
-	if (!priv->q_counter)
-		return;
-
-	err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
-	if (err)
-		return;
-
-	qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
-}
-
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
-	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
-	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
-	void *out;
-
-	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
-		return;
-
-	out = pcie_stats->pcie_perf_counters;
-	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-}
+	int i;
 
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
-{
-	if (full) {
-		mlx5e_update_pcie_counters(priv);
-		mlx5e_ipsec_update_stats(priv);
-	}
-	mlx5e_update_pport_counters(priv, full);
-	mlx5e_update_vport_counters(priv);
-	mlx5e_update_q_counter(priv);
-	mlx5e_update_sw_counters(priv);
+	for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+		if (mlx5e_stats_grps[i].update_stats)
+			mlx5e_stats_grps[i].update_stats(priv);
 }
 
 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
 {
-	mlx5e_update_stats(priv, false);
+	int i;
+
+	for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+		if (mlx5e_stats_grps[i].update_stats_mask &
+		    MLX5E_NDO_UPDATE_STATS)
+			mlx5e_stats_grps[i].update_stats(priv);
 }
 
 void mlx5e_update_stats_work(struct work_struct *work)
@@ -582,6 +423,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 		goto err_rq_wq_destroy;
 	}
 
+	err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+	if (err < 0)
+		goto err_rq_wq_destroy;
+
 	rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
 	rq->buff.headroom = params->rq_headroom;
 
@@ -674,8 +519,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 		wqe->data.lkey = rq->mkey_be;
 	}
 
-	INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
-	rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+	INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+	switch (params->rx_cq_moderation.cq_period_mode) {
+	case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+		break;
+	case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+	default:
+		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+	}
+
 	rq->page_cache.head = 0;
 	rq->page_cache.tail = 0;
 
@@ -687,6 +541,7 @@ err_destroy_umr_mkey:
 err_rq_wq_destroy:
 	if (rq->xdp_prog)
 		bpf_prog_put(rq->xdp_prog);
+	xdp_rxq_info_unreg(&rq->xdp_rxq);
 	mlx5_wq_destroy(&rq->wq_ctrl);
 
 	return err;
@@ -699,6 +554,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
 	if (rq->xdp_prog)
 		bpf_prog_put(rq->xdp_prog);
 
+	xdp_rxq_info_unreg(&rq->xdp_rxq);
+
 	switch (rq->wq_type) {
 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 		mlx5e_rq_free_mpwqe_info(rq);
@@ -919,7 +776,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
 	if (err)
 		goto err_destroy_rq;
 
-	if (params->rx_am_enabled)
+	if (params->rx_dim_enabled)
 		c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
 
 	return 0;
@@ -952,7 +809,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
-	cancel_work_sync(&rq->am.work);
+	cancel_work_sync(&rq->dim.work);
 	mlx5e_destroy_rq(rq);
 	mlx5e_free_rx_descs(rq);
 	mlx5e_free_rq(rq);
@@ -1565,7 +1422,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
 }
 
 static int mlx5e_open_cq(struct mlx5e_channel *c,
-			 struct mlx5e_cq_moder moder,
+			 struct net_dim_cq_moder moder,
 			 struct mlx5e_cq_param *param,
 			 struct mlx5e_cq *cq)
 {
@@ -1747,7 +1604,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 			      struct mlx5e_channel_param *cparam,
 			      struct mlx5e_channel **cp)
 {
-	struct mlx5e_cq_moder icocq_moder = {0, 0};
+	struct net_dim_cq_moder icocq_moder = {0, 0};
 	struct net_device *netdev = priv->netdev;
 	int cpu = mlx5e_get_cpu(priv, ix);
 	struct mlx5e_channel *c;
@@ -1999,7 +1856,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
 
 	mlx5e_build_common_cq_param(priv, param);
 
-	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+	param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 }
 
 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2203,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc)
 	       MLX5_RX_HASH_FN_INVERTED_XOR8;
 }
 
-static int mlx5e_bits_invert(unsigned long a, int size)
+int mlx5e_bits_invert(unsigned long a, int size)
 {
 	int inv = 0;
 	int i;
@@ -2765,6 +2622,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
 	if (err)
 		return err;
 
+	/* Mark as unused given "Drop-RQ" packets never reach XDP */
+	xdp_rxq_info_unused(&rq->xdp_rxq);
+
 	rq->mdev = mdev;
 
 	return 0;
@@ -3084,9 +2944,6 @@ out:
 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
 				     struct tc_cls_flower_offload *cls_flower)
 {
-	if (cls_flower->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_flower->command) {
 	case TC_CLSFLOWER_REPLACE:
 		return mlx5e_configure_flower(priv, cls_flower);
@@ -3104,7 +2961,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 {
 	struct mlx5e_priv *priv = cb_priv;
 
-	if (!tc_can_offload(priv->netdev))
+	if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -3738,26 +3595,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
 	return features;
 }
 
+static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
+					struct mlx5e_txqsq *sq)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int irqn_not_used, eqn;
+	struct mlx5_eq *eq;
+	u32 eqe_count;
+
+	if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
+		return false;
+
+	eq = mlx5_eqn2eq(mdev, eqn);
+	if (IS_ERR(eq))
+		return false;
+
+	netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+		   eqn, eq->cons_index, eq->irqn);
+
+	eqe_count = mlx5_eq_poll_irq_disabled(eq);
+	if (!eqe_count)
+		return false;
+
+	netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+	sq->channel->stats.eq_rearm++;
+	return true;
+}
+
 static void mlx5e_tx_timeout(struct net_device *dev)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	bool sched_work = false;
+	bool reopen_channels = false;
 	int i;
 
 	netdev_err(dev, "TX timeout detected\n");
 
 	for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
 		struct mlx5e_txqsq *sq = priv->txq2sq[i];
 
-		if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+		if (!netif_xmit_stopped(dev_queue))
 			continue;
-		sched_work = true;
-		clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
-			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+			   jiffies_to_usecs(jiffies - dev_queue->trans_start));
+
+		/* If we recover a lost interrupt, most likely TX timeout will
+		 * be resolved, skip reopening channels
+		 */
+		if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
+			clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+			reopen_channels = true;
+		}
 	}
 
-	if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+	if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
 		schedule_work(&priv->tx_timeout_work);
 }
 
@@ -4044,9 +3937,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
 		params->rx_cq_moderation.usec =
 			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
 
-	if (params->rx_am_enabled)
-		params->rx_cq_moderation =
-			mlx5e_am_get_def_profile(cq_period_mode);
+	if (params->rx_dim_enabled) {
+		switch (cq_period_mode) {
+		case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+			params->rx_cq_moderation =
+				net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+			break;
+		case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+		default:
+			params->rx_cq_moderation =
+				net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+		}
+	}
 
 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
 			params->rx_cq_moderation.cq_period_mode ==
@@ -4108,7 +4010,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 	cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
 			MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
 			MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-	params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 	mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
 
@@ -4315,9 +4217,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
 {
 	mlx5e_ipsec_cleanup(priv);
 	mlx5e_vxlan_cleanup(priv);
-
-	if (priv->channels.params.xdp_prog)
-		bpf_prog_put(priv->channels.params.xdp_prog);
 }
 
 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3409d86eb06b..363d8dcb7f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 	return 0;
 }
 
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep)
+{
+	struct mlx5e_rep_sq *rep_sq, *tmp;
+	struct mlx5e_rep_priv *rpriv;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return;
+
+	rpriv = mlx5e_rep_to_rep_priv(rep);
+	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+		list_del(&rep_sq->list);
+		kfree(rep_sq);
+	}
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep,
+				 u16 *sqns_array, int sqns_num)
+{
+	struct mlx5_flow_handle *flow_rule;
+	struct mlx5e_rep_priv *rpriv;
+	struct mlx5e_rep_sq *rep_sq;
+	int err;
+	int i;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return 0;
+
+	rpriv = mlx5e_rep_to_rep_priv(rep);
+	for (i = 0; i < sqns_num; i++) {
+		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+		if (!rep_sq) {
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		/* Add re-inject rule to the PF/representor sqs */
+		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+								rep->vport,
+								sqns_array[i]);
+		if (IS_ERR(flow_rule)) {
+			err = PTR_ERR(flow_rule);
+			kfree(rep_sq);
+			goto out_err;
+		}
+		rep_sq->send_to_vport_rule = flow_rule;
+		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+	}
+	return 0;
+
+out_err:
+	mlx5e_sqs2vport_stop(esw, rep);
+	return err;
+}
+
 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
 {
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
 			sqs[num_sqs++] = c->sq[tc].sqn;
 	}
 
-	err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
 	kfree(sqs);
 
 out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
 	struct mlx5_eswitch_rep *rep = rpriv->rep;
 
-	mlx5_eswitch_sqs2vport_stop(esw, rep);
+	mlx5e_sqs2vport_stop(esw, rep);
 }
 
 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
 #endif
 	unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
 						DELAY_PROBE_TIME);
-	struct net_device *netdev = rpriv->rep->netdev;
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 
 	rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
 {
 	struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
 						    neigh_update.neigh_stats_work.work);
-	struct net_device *netdev = rpriv->rep->netdev;
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_neigh_hash_entry *nhe;
 
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
 	struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
 						    neigh_update.netevent_nb);
 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-	struct net_device *netdev = rpriv->rep->netdev;
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_neigh_hash_entry *nhe = NULL;
 	struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
 {
 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-	struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
 	unregister_netevent_notifier(&neigh_update->netevent_nb);
 
@@ -662,9 +719,6 @@ static int
 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
 			      struct tc_cls_flower_offload *cls_flower)
 {
-	if (cls_flower->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (cls_flower->command) {
 	case TC_CLSFLOWER_REPLACE:
 		return mlx5e_configure_flower(priv, cls_flower);
@@ -682,7 +736,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
 {
 	struct mlx5e_priv *priv = cb_priv;
 
-	if (!tc_can_offload(priv->netdev))
+	if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -827,7 +881,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
 	params->rq_wq_type  = MLX5_WQ_TYPE_LINKED_LIST;
 	params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
 
-	params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
 	params->tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
@@ -906,7 +960,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 		err = PTR_ERR(flow_rule);
 		goto err_destroy_direct_tirs;
 	}
-	rep->vport_rx_rule = flow_rule;
+	rpriv->vport_rx_rule = flow_rule;
 
 	err = mlx5e_tc_init(priv);
 	if (err)
@@ -915,7 +969,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 	return 0;
 
 err_del_flow_rule:
-	mlx5_del_flow_rules(rep->vport_rx_rule);
+	mlx5_del_flow_rules(rpriv->vport_rx_rule);
 err_destroy_direct_tirs:
 	mlx5e_destroy_direct_tirs(priv);
 err_destroy_direct_rqts:
@@ -926,10 +980,9 @@ err_destroy_direct_rqts:
 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
 {
 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
-	struct mlx5_eswitch_rep *rep = rpriv->rep;
 
 	mlx5e_tc_cleanup(priv);
-	mlx5_del_flow_rules(rep->vport_rx_rule);
+	mlx5_del_flow_rules(rpriv->vport_rx_rule);
 	mlx5e_destroy_direct_tirs(priv);
 	mlx5e_destroy_direct_rqts(priv);
 }
@@ -969,10 +1022,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
 /* e-Switch vport representors */
 
 static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
-	struct mlx5e_priv *priv = netdev_priv(rep->netdev);
-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
 	int err;
 
@@ -994,10 +1047,10 @@ err_remove_sqs:
 }
 
 static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
 {
-	struct mlx5e_priv *priv = netdev_priv(rep->netdev);
-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
 		mlx5e_remove_sqs_fwd_rules(priv);
@@ -1010,8 +1063,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 }
 
 static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
+	struct mlx5e_rep_priv *uplink_rpriv;
 	struct mlx5e_rep_priv *rpriv;
 	struct net_device *netdev;
 	struct mlx5e_priv *upriv;
@@ -1021,7 +1075,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 	if (!rpriv)
 		return -ENOMEM;
 
-	netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+	netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
 	if (!netdev) {
 		pr_warn("Failed to create representor netdev for vport %d\n",
 			rep->vport);
@@ -1029,8 +1083,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 		return -EINVAL;
 	}
 
-	rep->netdev = netdev;
+	rpriv->netdev = netdev;
 	rpriv->rep = rep;
+	rep->rep_if[REP_ETH].priv = rpriv;
+	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
 
 	err = mlx5e_attach_netdev(netdev_priv(netdev));
 	if (err) {
@@ -1046,7 +1102,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 		goto err_detach_netdev;
 	}
 
-	upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+	upriv = netdev_priv(uplink_rpriv->netdev);
 	err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
 					 upriv);
 	if (err)
@@ -1078,16 +1135,19 @@ err_destroy_netdev:
 }
 
 static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
 {
-	struct net_device *netdev = rep->netdev;
+	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+	struct mlx5e_rep_priv *uplink_rpriv;
 	void *ppriv = priv->ppriv;
 	struct mlx5e_priv *upriv;
 
-	unregister_netdev(rep->netdev);
-	upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+	unregister_netdev(netdev);
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+						    REP_ETH);
+	upriv = netdev_priv(uplink_rpriv->netdev);
 	tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
 				     upriv);
 	mlx5e_rep_neigh_cleanup(rpriv);
@@ -1102,18 +1162,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
 	struct mlx5_eswitch *esw   = mdev->priv.eswitch;
 	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
 	int vport;
-	u8 mac[ETH_ALEN];
-
-	mlx5_query_nic_vport_mac_address(mdev, 0, mac);
 
 	for (vport = 1; vport < total_vfs; vport++) {
-		struct mlx5_eswitch_rep rep;
+		struct mlx5_eswitch_rep_if rep_if = {};
 
-		rep.load = mlx5e_vport_rep_load;
-		rep.unload = mlx5e_vport_rep_unload;
-		rep.vport = vport;
-		ether_addr_copy(rep.hw_id, mac);
-		mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+		rep_if.load = mlx5e_vport_rep_load;
+		rep_if.unload = mlx5e_vport_rep_unload;
+		mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
 	}
 }
 
@@ -1125,21 +1180,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
 	int vport;
 
 	for (vport = 1; vport < total_vfs; vport++)
-		mlx5_eswitch_unregister_vport_rep(esw, vport);
+		mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
 }
 
 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_eswitch *esw   = mdev->priv.eswitch;
-	struct mlx5_eswitch_rep rep;
+	struct mlx5_eswitch_rep_if rep_if;
+	struct mlx5e_rep_priv *rpriv;
+
+	rpriv = priv->ppriv;
+	rpriv->netdev = priv->netdev;
 
-	mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
-	rep.load = mlx5e_nic_rep_load;
-	rep.unload = mlx5e_nic_rep_unload;
-	rep.vport = FDB_UPLINK_VPORT;
-	rep.netdev = priv->netdev;
-	mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+	rep_if.load = mlx5e_nic_rep_load;
+	rep_if.unload = mlx5e_nic_rep_unload;
+	rep_if.priv = rpriv;
+	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+	mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
 
 	mlx5e_rep_register_vf_vports(priv); /* VFs vports */
 }
@@ -1150,7 +1208,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
 	struct mlx5_eswitch *esw   = mdev->priv.eswitch;
 
 	mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
-	mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+	mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
 }
 
 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9f51e6..b9b481f2833a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
 struct mlx5e_rep_priv {
 	struct mlx5_eswitch_rep *rep;
 	struct mlx5e_neigh_update_table neigh_update;
+	struct net_device      *netdev;
+	struct mlx5_flow_handle *vport_rx_rule;
+	struct list_head       vport_sqs_list;
 };
 
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+	return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
 struct mlx5e_neigh {
 	struct net_device *dev;
 	union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
 	int encap_size;
 };
 
+struct mlx5e_rep_sq {
+	struct mlx5_flow_handle	*send_to_vport_rule;
+	struct list_head	 list;
+};
+
 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
 void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..0d4bb0688faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
 	mlx5_cqwq_pop(&cq->wq);
 
 	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
-		WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
-			  cqe->op_own);
+		netdev_WARN_ONCE(cq->channel->netdev,
+				 "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
 		return;
 	}
 
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
 	}
 
 	if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
-		WARN_ONCE(true,
-			  "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
-			  icowi->opcode);
+		netdev_WARN_ONCE(cq->channel->netdev,
+				 "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
 }
 
 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -632,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 		return;
 	}
 
-	if (is_last_ethertype_ip(skb, &network_depth)) {
+	if (likely(is_last_ethertype_ip(skb, &network_depth))) {
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
 		if (network_depth > ETH_HLEN)
@@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + *len;
 	xdp.data_hard_start = va;
+	xdp.rxq = &rq->xdp_rxq;
 
 	act = bpf_prog_run_xdp(prog, &xdp);
 	switch (act) {
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 					 u32 cqe_bcnt,
 					 struct sk_buff *skb)
 {
+	struct hwtstamp_config *tstamp;
 	struct net_device *netdev;
+	struct mlx5e_priv *priv;
 	char *pseudo_header;
 	u32 qpn;
 	u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 		return;
 	}
 
+	priv = mlx5i_epriv(netdev);
+	tstamp = &priv->tstamp;
+
 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
 	if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 	skb->ip_summed = CHECKSUM_COMPLETE;
 	skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
 
-	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
 		skb_hwtstamps(skb)->hwtstamp =
 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index b69a705fd787..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
-	{1,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{8,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{64,  MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
-	{2,  256},             \
-	{8,  128},             \
-	{16, 64},              \
-	{32, 64},              \
-	{64, 64}               \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
-	MLX5_AM_EQE_PROFILES,
-	MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
-	struct mlx5e_cq_moder cq_moder;
-
-	cq_moder = profile[cq_period_mode][ix];
-	cq_moder.cq_period_mode = cq_period_mode;
-	return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
-	int default_profile_ix;
-
-	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
-		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
-	else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
-		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
-	return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
-	MLX5E_AM_START_MEASURE,
-	MLX5E_AM_MEASURE_IN_PROGRESS,
-	MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
-	MLX5E_AM_PARKING_ON_TOP,
-	MLX5E_AM_PARKING_TIRED,
-	MLX5E_AM_GOING_RIGHT,
-	MLX5E_AM_GOING_LEFT,
-};
-
-enum {
-	MLX5E_AM_STATS_WORSE,
-	MLX5E_AM_STATS_SAME,
-	MLX5E_AM_STATS_BETTER,
-};
-
-enum {
-	MLX5E_AM_STEPPED,
-	MLX5E_AM_TOO_TIRED,
-	MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		return true;
-	case MLX5E_AM_GOING_RIGHT:
-		return (am->steps_left > 1) && (am->steps_right == 1);
-	default: /* MLX5E_AM_GOING_LEFT */
-		return (am->steps_right > 1) && (am->steps_left == 1);
-	}
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		break;
-	case MLX5E_AM_GOING_RIGHT:
-		am->tune_state = MLX5E_AM_GOING_LEFT;
-		am->steps_left = 0;
-		break;
-	case MLX5E_AM_GOING_LEFT:
-		am->tune_state = MLX5E_AM_GOING_RIGHT;
-		am->steps_right = 0;
-		break;
-	}
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
-	if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
-		return MLX5E_AM_TOO_TIRED;
-
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		break;
-	case MLX5E_AM_GOING_RIGHT:
-		if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
-			return MLX5E_AM_ON_EDGE;
-		am->profile_ix++;
-		am->steps_right++;
-		break;
-	case MLX5E_AM_GOING_LEFT:
-		if (am->profile_ix == 0)
-			return MLX5E_AM_ON_EDGE;
-		am->profile_ix--;
-		am->steps_left++;
-		break;
-	}
-
-	am->tired++;
-	return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
-	am->steps_right  = 0;
-	am->steps_left   = 0;
-	am->tired        = 0;
-	am->tune_state   = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
-	am->steps_right  = 0;
-	am->steps_left   = 0;
-	am->tune_state   = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
-	am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
-					  MLX5E_AM_GOING_RIGHT;
-	mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
-	(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
-				  struct mlx5e_rx_am_stats *prev)
-{
-	if (!prev->bpms)
-		return curr->bpms ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_SAME;
-
-	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
-		return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
-						   MLX5E_AM_STATS_WORSE;
-
-	if (!prev->ppms)
-		return curr->ppms ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_SAME;
-
-	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
-		return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
-						   MLX5E_AM_STATS_WORSE;
-	if (!prev->epms)
-		return MLX5E_AM_STATS_SAME;
-
-	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
-		return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
-						   MLX5E_AM_STATS_WORSE;
-
-	return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
-			      struct mlx5e_rx_am *am)
-{
-	int prev_state = am->tune_state;
-	int prev_ix = am->profile_ix;
-	int stats_res;
-	int step_res;
-
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
-		if (stats_res != MLX5E_AM_STATS_SAME)
-			mlx5e_am_exit_parking(am);
-		break;
-
-	case MLX5E_AM_PARKING_TIRED:
-		am->tired--;
-		if (!am->tired)
-			mlx5e_am_exit_parking(am);
-		break;
-
-	case MLX5E_AM_GOING_RIGHT:
-	case MLX5E_AM_GOING_LEFT:
-		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
-		if (stats_res != MLX5E_AM_STATS_BETTER)
-			mlx5e_am_turn(am);
-
-		if (mlx5e_am_on_top(am)) {
-			mlx5e_am_park_on_top(am);
-			break;
-		}
-
-		step_res = mlx5e_am_step(am);
-		switch (step_res) {
-		case MLX5E_AM_ON_EDGE:
-			mlx5e_am_park_on_top(am);
-			break;
-		case MLX5E_AM_TOO_TIRED:
-			mlx5e_am_park_tired(am);
-			break;
-		}
-
-		break;
-	}
-
-	if ((prev_state     != MLX5E_AM_PARKING_ON_TOP) ||
-	    (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
-		am->prev_stats = *curr_stats;
-
-	return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
-			    struct mlx5e_rx_am_sample *s)
-{
-	s->time	     = ktime_get();
-	s->pkt_ctr   = rq->stats.packets;
-	s->byte_ctr  = rq->stats.bytes;
-	s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
-				struct mlx5e_rx_am_sample *end,
-				struct mlx5e_rx_am_stats *curr_stats)
-{
-	/* u32 holds up to 71 minutes, should be enough */
-	u32 delta_us = ktime_us_delta(end->time, start->time);
-	u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
-	u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
-			     start->byte_ctr);
-
-	if (!delta_us)
-		return;
-
-	curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
-	curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
-	curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
-					delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
-	struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
-					      work);
-	struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
-	struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
-	mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
-				       cur_profile.usec, cur_profile.pkts);
-
-	am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
-	struct mlx5e_rx_am *am = &rq->am;
-	struct mlx5e_rx_am_sample end_sample;
-	struct mlx5e_rx_am_stats curr_stats;
-	u16 nevents;
-
-	switch (am->state) {
-	case MLX5E_AM_MEASURE_IN_PROGRESS:
-		nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
-				  am->start_sample.event_ctr);
-		if (nevents < MLX5E_AM_NEVENTS)
-			break;
-		mlx5e_am_sample(rq, &end_sample);
-		mlx5e_am_calc_stats(&am->start_sample, &end_sample,
-				    &curr_stats);
-		if (mlx5e_am_decision(&curr_stats, am)) {
-			am->state = MLX5E_AM_APPLY_NEW_PROFILE;
-			schedule_work(&am->work);
-			break;
-		}
-		/* fall through */
-	case MLX5E_AM_START_MEASURE:
-		mlx5e_am_sample(rq, &am->start_sample);
-		am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
-		break;
-	case MLX5E_AM_APPLY_NEW_PROFILE:
-		break;
-	}
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b74ddc7984bc..5f0f3493d747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -71,6 +71,7 @@ static const struct counter_desc sw_stats_desc[] = {
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
 };
 
@@ -99,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 	return idx;
 }
 
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_sw_stats temp, *s = &temp;
+	struct mlx5e_rq_stats *rq_stats;
+	struct mlx5e_sq_stats *sq_stats;
+	struct mlx5e_ch_stats *ch_stats;
+	int i, j;
+
+	memset(s, 0, sizeof(*s));
+	for (i = 0; i < priv->channels.num; i++) {
+		struct mlx5e_channel *c = priv->channels.c[i];
+
+		rq_stats = &c->rq.stats;
+		ch_stats = &c->stats;
+
+		s->rx_packets	+= rq_stats->packets;
+		s->rx_bytes	+= rq_stats->bytes;
+		s->rx_lro_packets += rq_stats->lro_packets;
+		s->rx_lro_bytes	+= rq_stats->lro_bytes;
+		s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
+		s->rx_csum_none	+= rq_stats->csum_none;
+		s->rx_csum_complete += rq_stats->csum_complete;
+		s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
+		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+		s->rx_xdp_drop += rq_stats->xdp_drop;
+		s->rx_xdp_tx += rq_stats->xdp_tx;
+		s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+		s->rx_wqe_err   += rq_stats->wqe_err;
+		s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+		s->rx_page_reuse  += rq_stats->page_reuse;
+		s->rx_cache_reuse += rq_stats->cache_reuse;
+		s->rx_cache_full  += rq_stats->cache_full;
+		s->rx_cache_empty += rq_stats->cache_empty;
+		s->rx_cache_busy  += rq_stats->cache_busy;
+		s->rx_cache_waive += rq_stats->cache_waive;
+		s->ch_eq_rearm += ch_stats->eq_rearm;
+
+		for (j = 0; j < priv->channels.params.num_tc; j++) {
+			sq_stats = &c->sq[j].stats;
+
+			s->tx_packets		+= sq_stats->packets;
+			s->tx_bytes		+= sq_stats->bytes;
+			s->tx_tso_packets	+= sq_stats->tso_packets;
+			s->tx_tso_bytes		+= sq_stats->tso_bytes;
+			s->tx_tso_inner_packets	+= sq_stats->tso_inner_packets;
+			s->tx_tso_inner_bytes	+= sq_stats->tso_inner_bytes;
+			s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+			s->tx_queue_stopped	+= sq_stats->stopped;
+			s->tx_queue_wake	+= sq_stats->wake;
+			s->tx_queue_dropped	+= sq_stats->dropped;
+			s->tx_xmit_more		+= sq_stats->xmit_more;
+			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+			s->tx_csum_none		+= sq_stats->csum_none;
+			s->tx_csum_partial	+= sq_stats->csum_partial;
+		}
+	}
+
+	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+				priv->stats.pport.phy_counters,
+				counter_set.phys_layer_cntrs.link_down_events);
+	memcpy(&priv->stats.sw, s, sizeof(*s));
+}
+
 static const struct counter_desc q_stats_desc[] = {
 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
 };
@@ -128,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 	return idx;
 }
 
+static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+	int err;
+
+	if (!priv->q_counter)
+		return;
+
+	err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+	if (err)
+		return;
+
+	qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
 static const struct counter_desc vport_stats_desc[] = {
 	{ "rx_vport_unicast_packets",
@@ -200,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
 #define PPORT_802_3_OFF(c) \
 	MLX5_BYTE_OFF(ppcnt_reg, \
 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
@@ -252,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	void *out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+	out = pstats->IEEE_802_3_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
 #define PPORT_2863_OFF(c) \
 	MLX5_BYTE_OFF(ppcnt_reg, \
 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -289,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	void *out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+	out = pstats->RFC_2863_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
 #define PPORT_2819_OFF(c) \
 	MLX5_BYTE_OFF(ppcnt_reg, \
 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
@@ -336,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	void *out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+	out = pstats->RFC_2819_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
 #define PPORT_PHY_STATISTICAL_OFF(c) \
 	MLX5_BYTE_OFF(ppcnt_reg, \
 		      counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -376,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 	return idx;
 }
 
+static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	void *out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+	out = pstats->phy_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+		return;
+
+	out = pstats->phy_statistical_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
 #define PPORT_ETH_EXT_OFF(c) \
 	MLX5_BYTE_OFF(ppcnt_reg, \
 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -418,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	void *out;
+
+	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
+		return;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+	out = pstats->eth_ext_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
 #define PCIE_PERF_OFF(c) \
 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
 static const struct counter_desc pcie_perf_stats_desc[] = {
@@ -505,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+	void *out;
+
+	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+		return;
+
+	out = pcie_stats->pcie_perf_counters;
+	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+}
+
 #define PPORT_PER_PRIO_OFF(c) \
 	MLX5_BYTE_OFF(ppcnt_reg, \
 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -656,6 +848,47 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
 	return idx;
 }
 
+static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+{
+	return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
+		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
+}
+
+static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
+					   int idx)
+{
+	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
+	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
+	return idx;
+}
+
+static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
+					 int idx)
+{
+	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
+	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
+	return idx;
+}
+
+static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	int prio;
+	void *out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		out = pstats->per_prio_counters[prio];
+		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+		mlx5_core_access_reg(mdev, in, sz, out, sz,
+				     MLX5_REG_PPCNT, 0, 0);
+	}
+}
+
 static const struct counter_desc mlx5e_pme_status_desc[] = {
 	{ "module_unplug", 8 },
 };
@@ -723,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx + mlx5e_ipsec_get_stats(priv, data + idx);
 }
 
+static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+	mlx5e_ipsec_update_stats(priv);
+}
+
 static const struct counter_desc rq_stats_desc[] = {
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -767,12 +1005,18 @@ static const struct counter_desc sq_stats_desc[] = {
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
 };
 
+static const struct counter_desc ch_stats_desc[] = {
+	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
+};
+
 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
+#define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
 
 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
 {
 	return (NUM_RQ_STATS * priv->channels.num) +
+		(NUM_CH_STATS * priv->channels.num) +
 		(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
 }
 
@@ -785,6 +1029,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
 		return idx;
 
 	for (i = 0; i < priv->channels.num; i++)
+		for (j = 0; j < NUM_CH_STATS; j++)
+			sprintf(data + (idx++) * ETH_GSTRING_LEN,
+				ch_stats_desc[j].format, i);
+
+	for (i = 0; i < priv->channels.num; i++)
 		for (j = 0; j < NUM_RQ_STATS; j++)
 			sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
 
@@ -808,6 +1057,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
 		return idx;
 
 	for (i = 0; i < channels->num; i++)
+		for (j = 0; j < NUM_CH_STATS; j++)
+			data[idx++] =
+				MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+						     ch_stats_desc, j);
+
+	for (i = 0; i < channels->num; i++)
 		for (j = 0; j < NUM_RQ_STATS; j++)
 			data[idx++] =
 				MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
@@ -823,61 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
 	return idx;
 }
 
+/* The stats groups order is opposite to the update_stats() order calls */
 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
 	{
 		.get_num_stats = mlx5e_grp_sw_get_num_stats,
 		.fill_strings = mlx5e_grp_sw_fill_strings,
 		.fill_stats = mlx5e_grp_sw_fill_stats,
+		.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+		.update_stats = mlx5e_grp_sw_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_q_get_num_stats,
 		.fill_strings = mlx5e_grp_q_fill_strings,
 		.fill_stats = mlx5e_grp_q_fill_stats,
+		.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+		.update_stats = mlx5e_grp_q_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_vport_get_num_stats,
 		.fill_strings = mlx5e_grp_vport_fill_strings,
 		.fill_stats = mlx5e_grp_vport_fill_stats,
+		.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+		.update_stats = mlx5e_grp_vport_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_802_3_get_num_stats,
 		.fill_strings = mlx5e_grp_802_3_fill_strings,
 		.fill_stats = mlx5e_grp_802_3_fill_stats,
+		.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+		.update_stats = mlx5e_grp_802_3_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_2863_get_num_stats,
 		.fill_strings = mlx5e_grp_2863_fill_strings,
 		.fill_stats = mlx5e_grp_2863_fill_stats,
+		.update_stats = mlx5e_grp_2863_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_2819_get_num_stats,
 		.fill_strings = mlx5e_grp_2819_fill_strings,
 		.fill_stats = mlx5e_grp_2819_fill_stats,
+		.update_stats = mlx5e_grp_2819_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_phy_get_num_stats,
 		.fill_strings = mlx5e_grp_phy_fill_strings,
 		.fill_stats = mlx5e_grp_phy_fill_stats,
+		.update_stats = mlx5e_grp_phy_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
 		.fill_strings = mlx5e_grp_eth_ext_fill_strings,
 		.fill_stats = mlx5e_grp_eth_ext_fill_stats,
+		.update_stats = mlx5e_grp_eth_ext_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_pcie_get_num_stats,
 		.fill_strings = mlx5e_grp_pcie_fill_strings,
 		.fill_stats = mlx5e_grp_pcie_fill_stats,
+		.update_stats = mlx5e_grp_pcie_update_stats,
 	},
 	{
-		.get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
-		.fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
-		.fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
-	},
-	{
-		.get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
-		.fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
-		.fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+		.get_num_stats = mlx5e_grp_per_prio_get_num_stats,
+		.fill_strings = mlx5e_grp_per_prio_fill_strings,
+		.fill_stats = mlx5e_grp_per_prio_fill_stats,
+		.update_stats = mlx5e_grp_per_prio_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_pme_get_num_stats,
@@ -888,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
 		.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
 		.fill_strings = mlx5e_grp_ipsec_fill_strings,
 		.fill_stats = mlx5e_grp_ipsec_fill_stats,
+		.update_stats = mlx5e_grp_ipsec_update_stats,
 	},
 	{
 		.get_num_stats = mlx5e_grp_channels_get_num_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index d679e21f686e..0b3320a2b072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,7 @@
 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
 
 struct counter_desc {
 	char		format[ETH_GSTRING_LEN];
@@ -88,6 +89,7 @@ struct mlx5e_sw_stats {
 	u64 rx_cache_empty;
 	u64 rx_cache_busy;
 	u64 rx_cache_waive;
+	u64 ch_eq_rearm;
 
 	/* Special handling counters */
 	u64 link_down_events_phy;
@@ -192,6 +194,10 @@ struct mlx5e_sq_stats {
 	u64 dropped;
 };
 
+struct mlx5e_ch_stats {
+	u64 eq_rearm;
+};
+
 struct mlx5e_stats {
 	struct mlx5e_sw_stats sw;
 	struct mlx5e_qcounter_stats qcnt;
@@ -201,11 +207,17 @@ struct mlx5e_stats {
 	struct mlx5e_pcie_stats pcie;
 };
 
+enum {
+	MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
 struct mlx5e_priv;
 struct mlx5e_stats_grp {
+	u16 update_stats_mask;
 	int (*get_num_stats)(struct mlx5e_priv *priv);
 	int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
 	int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+	void (*update_stats)(struct mlx5e_priv *priv);
 };
 
 extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 55979ec2e88a..fd98b0dc610f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,17 +51,22 @@
 #include "en_tc.h"
 #include "eswitch.h"
 #include "vxlan.h"
+#include "fs_core.h"
 
 struct mlx5_nic_flow_attr {
 	u32 action;
 	u32 flow_tag;
 	u32 mod_hdr_id;
+	u32 hairpin_tirn;
+	struct mlx5_flow_table	*hairpin_ft;
 };
 
 enum {
 	MLX5E_TC_FLOW_ESWITCH	= BIT(0),
 	MLX5E_TC_FLOW_NIC	= BIT(1),
 	MLX5E_TC_FLOW_OFFLOADED	= BIT(2),
+	MLX5E_TC_FLOW_HAIRPIN	= BIT(3),
+	MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
 };
 
 struct mlx5e_tc_flow {
@@ -71,6 +76,7 @@ struct mlx5e_tc_flow {
 	struct mlx5_flow_handle *rule;
 	struct list_head	encap;   /* flows sharing the same encap ID */
 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
+	struct list_head	hairpin; /* flows sharing the same hairpin */
 	union {
 		struct mlx5_esw_flow_attr esw_attr[0];
 		struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +99,32 @@ enum {
 #define MLX5E_TC_TABLE_NUM_GROUPS 4
 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
 
+struct mlx5e_hairpin {
+	struct mlx5_hairpin *pair;
+
+	struct mlx5_core_dev *func_mdev;
+	struct mlx5e_priv *func_priv;
+	u32 tdn;
+	u32 tirn;
+
+	int num_channels;
+	struct mlx5e_rqt indir_rqt;
+	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+	struct mlx5e_ttc_table ttc;
+};
+
+struct mlx5e_hairpin_entry {
+	/* a node of a hash table which keeps all the  hairpin entries */
+	struct hlist_node hairpin_hlist;
+
+	/* flows sharing the same hairpin */
+	struct list_head flows;
+
+	u16 peer_vhca_id;
+	u8 prio;
+	struct mlx5e_hairpin *hp;
+};
+
 struct mod_hdr_key {
 	int num_actions;
 	void *actions;
@@ -222,6 +254,417 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
 	}
 }
 
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+	struct net_device *netdev;
+	struct mlx5e_priv *priv;
+
+	netdev = __dev_get_by_index(net, ifindex);
+	priv = netdev_priv(netdev);
+	return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+	void *tirc;
+	int err;
+
+	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+	if (err)
+		goto alloc_tdn_err;
+
+	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
+	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+	err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+	if (err)
+		goto create_tir_err;
+
+	return 0;
+
+create_tir_err:
+	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+	return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+{
+	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
+	struct mlx5e_priv *priv = hp->func_priv;
+	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+
+	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
+				      hp->num_channels);
+
+	for (i = 0; i < sz; i++) {
+		ix = i;
+		if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+			ix = mlx5e_bits_invert(i, ilog2(sz));
+		ix = indirection_rqt[ix];
+		rqn = hp->pair->rqn[ix];
+		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+	}
+}
+
+static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
+{
+	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
+	struct mlx5e_priv *priv = hp->func_priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqtc;
+	u32 *in;
+
+	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+	in = kvzalloc(inlen, GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+
+	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
+	if (!err)
+		hp->indir_rqt.enabled = true;
+
+	kvfree(in);
+	return err;
+}
+
+static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+	struct mlx5e_priv *priv = hp->func_priv;
+	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
+	int tt, i, err;
+	void *tirc;
+
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
+		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
+		mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+		err = mlx5_core_create_tir(hp->func_mdev, in,
+					   MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+		if (err) {
+			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
+			goto err_destroy_tirs;
+		}
+	}
+	return 0;
+
+err_destroy_tirs:
+	for (i = 0; i < tt; i++)
+		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+	return err;
+}
+
+static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+	int tt;
+
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+}
+
+static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
+					 struct ttc_params *ttc_params)
+{
+	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+	int tt;
+
+	memset(ttc_params, 0, sizeof(*ttc_params));
+
+	ttc_params->any_tt_tirn = hp->tirn;
+
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+
+	ft_attr->max_fte = MLX5E_NUM_TT;
+	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
+	ft_attr->prio = MLX5E_TC_PRIO;
+}
+
+static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
+{
+	struct mlx5e_priv *priv = hp->func_priv;
+	struct ttc_params ttc_params;
+	int err;
+
+	err = mlx5e_hairpin_create_indirect_rqt(hp);
+	if (err)
+		return err;
+
+	err = mlx5e_hairpin_create_indirect_tirs(hp);
+	if (err)
+		goto err_create_indirect_tirs;
+
+	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
+	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
+	if (err)
+		goto err_create_ttc_table;
+
+	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
+		   hp->num_channels, hp->ttc.ft.t->id);
+
+	return 0;
+
+err_create_ttc_table:
+	mlx5e_hairpin_destroy_indirect_tirs(hp);
+err_create_indirect_tirs:
+	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+
+	return err;
+}
+
+static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
+{
+	struct mlx5e_priv *priv = hp->func_priv;
+
+	mlx5e_destroy_ttc_table(priv, &hp->ttc);
+	mlx5e_hairpin_destroy_indirect_tirs(hp);
+	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+		     int peer_ifindex)
+{
+	struct mlx5_core_dev *func_mdev, *peer_mdev;
+	struct mlx5e_hairpin *hp;
+	struct mlx5_hairpin *pair;
+	int err;
+
+	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+	if (!hp)
+		return ERR_PTR(-ENOMEM);
+
+	func_mdev = priv->mdev;
+	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+	if (IS_ERR(pair)) {
+		err = PTR_ERR(pair);
+		goto create_pair_err;
+	}
+	hp->pair = pair;
+	hp->func_mdev = func_mdev;
+	hp->func_priv = priv;
+	hp->num_channels = params->num_channels;
+
+	err = mlx5e_hairpin_create_transport(hp);
+	if (err)
+		goto create_transport_err;
+
+	if (hp->num_channels > 1) {
+		err = mlx5e_hairpin_rss_init(hp);
+		if (err)
+			goto rss_init_err;
+	}
+
+	return hp;
+
+rss_init_err:
+	mlx5e_hairpin_destroy_transport(hp);
+create_transport_err:
+	mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+	kfree(hp);
+	return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+	if (hp->num_channels > 1)
+		mlx5e_hairpin_rss_cleanup(hp);
+	mlx5e_hairpin_destroy_transport(hp);
+	mlx5_core_hairpin_destroy(hp->pair);
+	kvfree(hp);
+}
+
+static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
+{
+	return (peer_vhca_id << 16 | prio);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+						     u16 peer_vhca_id, u8 prio)
+{
+	struct mlx5e_hairpin_entry *hpe;
+	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
+
+	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+			       hairpin_hlist, hash_key) {
+		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+			return hpe;
+	}
+
+	return NULL;
+}
+
+#define UNKNOWN_MATCH_PRIO 8
+
+static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
+				  struct mlx5_flow_spec *spec, u8 *match_prio)
+{
+	void *headers_c, *headers_v;
+	u8 prio_val, prio_mask = 0;
+	bool vlan_present;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
+		netdev_warn(priv->netdev,
+			    "only PCP trust state supported for hairpin\n");
+		return -EOPNOTSUPP;
+	}
+#endif
+	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+
+	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
+	if (vlan_present) {
+		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+	}
+
+	if (!vlan_present || !prio_mask) {
+		prio_val = UNKNOWN_MATCH_PRIO;
+	} else if (prio_mask != 0x7) {
+		netdev_warn(priv->netdev,
+			    "masked priority match not supported for hairpin\n");
+		return -EOPNOTSUPP;
+	}
+
+	*match_prio = prio_val;
+	return 0;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+				  struct mlx5e_tc_flow *flow,
+				  struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+	int peer_ifindex = parse_attr->mirred_ifindex;
+	struct mlx5_hairpin_params params;
+	struct mlx5_core_dev *peer_mdev;
+	struct mlx5e_hairpin_entry *hpe;
+	struct mlx5e_hairpin *hp;
+	u64 link_speed64;
+	u32 link_speed;
+	u8 match_prio;
+	u16 peer_id;
+	int err;
+
+	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
+		netdev_warn(priv->netdev, "hairpin is not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+	if (err)
+		return err;
+	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
+	if (hpe)
+		goto attach_flow;
+
+	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+	if (!hpe)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&hpe->flows);
+	hpe->peer_vhca_id = peer_id;
+	hpe->prio = match_prio;
+
+	params.log_data_size = 15;
+	params.log_data_size = min_t(u8, params.log_data_size,
+				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+	params.log_data_size = max_t(u8, params.log_data_size,
+				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+
+	params.log_num_packets = params.log_data_size -
+				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
+	params.log_num_packets = min_t(u8, params.log_num_packets,
+				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+
+	params.q_counter = priv->q_counter;
+	/* set hairpin pair per each 50Gbs share of the link */
+	mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
+	link_speed = max_t(u32, link_speed, 50000);
+	link_speed64 = link_speed;
+	do_div(link_speed64, 50000);
+	params.num_channels = link_speed64;
+
+	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+	if (IS_ERR(hp)) {
+		err = PTR_ERR(hp);
+		goto create_hairpin_err;
+	}
+
+	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
+		   hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
+
+	hpe->hp = hp;
+	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+		 hash_hairpin_info(peer_id, match_prio));
+
+attach_flow:
+	if (hpe->hp->num_channels > 1) {
+		flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+	} else {
+		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+	}
+	list_add(&flow->hairpin, &hpe->flows);
+
+	return 0;
+
+create_hairpin_err:
+	kfree(hpe);
+	return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+				   struct mlx5e_tc_flow *flow)
+{
+	struct list_head *next = flow->hairpin.next;
+
+	list_del(&flow->hairpin);
+
+	/* no more hairpin flows for us, release the hairpin pair */
+	if (list_empty(next)) {
+		struct mlx5e_hairpin_entry *hpe;
+
+		hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+			   hpe->hp->pair->peer_mdev->priv.name);
+
+		mlx5e_hairpin_destroy(hpe->hp);
+		hash_del(&hpe->hairpin_hlist);
+		kfree(hpe);
+	}
+}
+
 static struct mlx5_flow_handle *
 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +672,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 {
 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
 	struct mlx5_core_dev *dev = priv->mdev;
-	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_destination dest[2] = {};
 	struct mlx5_flow_act flow_act = {
 		.action = attr->action,
 		.flow_tag = attr->flow_tag,
@@ -238,18 +681,37 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 	struct mlx5_fc *counter = NULL;
 	struct mlx5_flow_handle *rule;
 	bool table_created = false;
-	int err;
+	int err, dest_ix = 0;
 
-	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-		dest.ft = priv->fs.vlan.ft.t;
-	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-		counter = mlx5_fc_create(dev, true);
-		if (IS_ERR(counter))
-			return ERR_CAST(counter);
+	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+		if (err) {
+			rule = ERR_PTR(err);
+			goto err_add_hairpin_flow;
+		}
+		if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+			dest[dest_ix].ft = attr->hairpin_ft;
+		} else {
+			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+			dest[dest_ix].tir_num = attr->hairpin_tirn;
+		}
+		dest_ix++;
+	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+		dest[dest_ix].ft = priv->fs.vlan.ft.t;
+		dest_ix++;
+	}
 
-		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-		dest.counter = counter;
+	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		counter = mlx5_fc_create(dev, true);
+		if (IS_ERR(counter)) {
+			rule = ERR_CAST(counter);
+			goto err_fc_create;
+		}
+		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest[dest_ix].counter = counter;
+		dest_ix++;
 	}
 
 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -279,7 +741,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 							    MLX5E_TC_PRIO,
 							    tc_tbl_size,
 							    MLX5E_TC_TABLE_NUM_GROUPS,
-							    0, 0);
+							    MLX5E_TC_FT_LEVEL, 0);
 		if (IS_ERR(priv->fs.tc.t)) {
 			netdev_err(priv->netdev,
 				   "Failed to create tc offload table\n");
@@ -292,7 +754,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 
 	parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
-				   &flow_act, &dest, 1);
+				   &flow_act, dest, dest_ix);
 
 	if (IS_ERR(rule))
 		goto err_add_rule;
@@ -309,7 +771,10 @@ err_create_ft:
 		mlx5e_detach_mod_hdr(priv, flow);
 err_create_mod_hdr_id:
 	mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+		mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
 	return rule;
 }
 
@@ -330,6 +795,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
 
 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
 		mlx5e_detach_mod_hdr(priv, flow);
+
+	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+		mlx5e_hairpin_flow_del(priv, flow);
 }
 
 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -617,7 +1085,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
 						  f->mask);
 		struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-		struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+		struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+		struct net_device *up_dev = uplink_rpriv->netdev;
 		struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 
 		/* Full udp dst port must be given */
@@ -1421,6 +1890,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
 	return true;
 }
 
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+	struct mlx5_core_dev *fmdev, *pmdev;
+	u16 func_id, peer_id;
+
+	fmdev = priv->mdev;
+	pmdev = peer_priv->mdev;
+
+	func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+	peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+	return (func_id == peer_id);
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 				struct mlx5e_tc_flow_parse_attr *parse_attr,
 				struct mlx5e_tc_flow *flow)
@@ -1465,6 +1948,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 			return -EOPNOTSUPP;
 		}
 
+		if (is_tcf_mirred_egress_redirect(a)) {
+			struct net_device *peer_dev = tcf_mirred_dev(a);
+
+			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+			    same_hw_devs(priv, netdev_priv(peer_dev))) {
+				parse_attr->mirred_ifindex = peer_dev->ifindex;
+				flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+						MLX5_FLOW_CONTEXT_ACTION_COUNT;
+			} else {
+				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+					    peer_dev->name);
+				return -EINVAL;
+			}
+			continue;
+		}
+
 		if (is_tcf_skbedit_mark(a)) {
 			u32 mark = tcf_skbedit_mark(a);
 
@@ -1507,6 +2007,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 				   int *out_ttl)
 {
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5e_rep_priv *uplink_rpriv;
 	struct rtable *rt;
 	struct neighbour *n = NULL;
 
@@ -1520,9 +2021,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
 	return -EOPNOTSUPP;
 #endif
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
 	/* if the egress device isn't on the same HW e-switch, we use the uplink */
 	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
-		*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+		*out_dev = uplink_rpriv->netdev;
 	else
 		*out_dev = rt->dst.dev;
 
@@ -1547,6 +2049,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
 	struct dst_entry *dst;
 
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+	struct mlx5e_rep_priv *uplink_rpriv;
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 	int ret;
 
@@ -1557,9 +2060,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
 
 	*out_ttl = ip6_dst_hoplimit(dst);
 
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
 	/* if the egress device isn't on the same HW e-switch, we use the uplink */
 	if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
-		*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+		*out_dev = uplink_rpriv->netdev;
 	else
 		*out_dev = dst->dev;
 #else
@@ -1859,7 +2363,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
 			      struct mlx5e_tc_flow *flow)
 {
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+	struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+									   REP_ETH);
+	struct net_device *up_dev = uplink_rpriv->netdev;
 	unsigned short family = ip_tunnel_info_af(tun_info);
 	struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -1982,11 +2488,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 		}
 
 		if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
 			struct net_device *out_dev;
 			struct mlx5e_priv *out_priv;
 
-			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+			out_dev = tcf_mirred_dev(a);
 
 			if (switchdev_port_same_parent_id(priv->netdev,
 							  out_dev)) {
@@ -1996,7 +2501,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 				rpriv = out_priv->ppriv;
 				attr->out_rep = rpriv->rep;
 			} else if (encap) {
-				parse_attr->mirred_ifindex = ifindex;
+				parse_attr->mirred_ifindex = out_dev->ifindex;
 				parse_attr->tun_info = *info;
 				attr->parse_attr = parse_attr;
 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
@@ -2182,6 +2687,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
 	struct mlx5e_tc_table *tc = &priv->fs.tc;
 
 	hash_init(tc->mod_hdr_tbl);
+	hash_init(tc->hairpin_tbl);
 
 	tc->ht_params = mlx5e_tc_flow_ht_params;
 	return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 	for (i = 0; i < c->num_tc; i++)
 		mlx5e_cq_arm(&c->sq[i].cq);
 
-	if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
-		mlx5e_rx_am(&c->rq);
+	if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+		struct net_dim_sample dim_sample;
+		net_dim_sample(c->rq.cq.event_ctr,
+			       c->rq.stats.packets,
+			       c->rq.stats.bytes,
+			       &dim_sample);
+		net_dim(&c->rq.dim, dim_sample);
+	}
 
 	mlx5e_cq_arm(&c->rq.cq);
 	mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 14d57828945d..05c1157bc9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -534,6 +534,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
 	return IRQ_HANDLED;
 }
 
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them.  It is not recommended to use it, unless this is the last
+ * resort.
+ */
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
+{
+	u32 count_eqe;
+
+	disable_irq(eq->irqn);
+	count_eqe = eq->cons_index;
+	mlx5_eq_int(eq->irqn, eq);
+	count_eqe = eq->cons_index - count_eqe;
+	enable_irq(eq->irqn);
+
+	return count_eqe;
+}
+
 static void init_eq_buf(struct mlx5_eq *eq)
 {
 	struct mlx5_eqe *eqe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f517c4..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
 #include <linux/mlx5/fs.h>
 #include "mlx5_core.h"
 #include "eswitch.h"
+#include "fs_core.h"
 
 #define UPLINK_VPORT 0xFFFF
 
@@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
 	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
 		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
 
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+						    vport->vport);
 	if (!root_ns) {
-		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
 		return -EOPNOTSUPP;
 	}
 
@@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
 	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
 		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
 
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+						    vport->vport);
 	if (!root_ns) {
-		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+		esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
 		return -EOPNOTSUPP;
 	}
 
@@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
 				    struct mlx5_vport *vport)
 {
+	struct mlx5_fc *counter = vport->ingress.drop_counter;
+	struct mlx5_flow_destination drop_ctr_dst = {0};
+	struct mlx5_flow_destination *dst = NULL;
 	struct mlx5_flow_act flow_act = {0};
 	struct mlx5_flow_spec *spec;
+	int dest_num = 0;
 	int err = 0;
 	u8 *smac_v;
 
@@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
 
 	memset(spec, 0, sizeof(*spec));
 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+	/* Attach drop flow counter */
+	if (counter) {
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		drop_ctr_dst.counter = counter;
+		dst = &drop_ctr_dst;
+		dest_num++;
+	}
 	vport->ingress.drop_rule =
 		mlx5_add_flow_rules(vport->ingress.acl, spec,
-				    &flow_act, NULL, 0);
+				    &flow_act, dst, dest_num);
 	if (IS_ERR(vport->ingress.drop_rule)) {
 		err = PTR_ERR(vport->ingress.drop_rule);
 		esw_warn(esw->dev,
@@ -1208,8 +1224,12 @@ out:
 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
 				   struct mlx5_vport *vport)
 {
+	struct mlx5_fc *counter = vport->egress.drop_counter;
+	struct mlx5_flow_destination drop_ctr_dst = {0};
+	struct mlx5_flow_destination *dst = NULL;
 	struct mlx5_flow_act flow_act = {0};
 	struct mlx5_flow_spec *spec;
+	int dest_num = 0;
 	int err = 0;
 
 	esw_vport_cleanup_egress_rules(esw, vport);
@@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
 	/* Drop others rule (star rule) */
 	memset(spec, 0, sizeof(*spec));
 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+	/* Attach egress drop flow counter */
+	if (counter) {
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		drop_ctr_dst.counter = counter;
+		dst = &drop_ctr_dst;
+		dest_num++;
+	}
 	vport->egress.drop_rule =
 		mlx5_add_flow_rules(vport->egress.acl, spec,
-				    &flow_act, NULL, 0);
+				    &flow_act, dst, dest_num);
 	if (IS_ERR(vport->egress.drop_rule)) {
 		err = PTR_ERR(vport->egress.drop_rule);
 		esw_warn(esw->dev,
@@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
 
 	err = mlx5_create_scheduling_element_cmd(dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &tsar_ctx,
+						 tsar_ctx,
 						 &esw->qos.root_tsar_id);
 	if (err) {
 		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
 	if (vport->qos.enabled)
 		return -EEXIST;
 
-	MLX5_SET(scheduling_context, &sched_ctx, element_type,
+	MLX5_SET(scheduling_context, sched_ctx, element_type,
 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
 				  element_attributes);
 	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
 		 esw->qos.root_tsar_id);
-	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
 		 initial_max_rate);
-	MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+	MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
 
 	err = mlx5_create_scheduling_element_cmd(dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &sched_ctx,
+						 sched_ctx,
 						 &vport->qos.esw_tsar_ix);
 	if (err) {
 		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
 	if (!vport->qos.enabled)
 		return -EIO;
 
-	MLX5_SET(scheduling_context, &sched_ctx, element_type,
+	MLX5_SET(scheduling_context, sched_ctx, element_type,
 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
 				  element_attributes);
 	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
 		 esw->qos.root_tsar_id);
-	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
 		 max_rate);
-	MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
 
 	err = mlx5_modify_scheduling_element_cmd(dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &sched_ctx,
+						 sched_ctx,
 						 vport->qos.esw_tsar_ix,
 						 bitmask);
 	if (err) {
@@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
 	}
 }
 
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+	struct mlx5_core_dev *dev = vport->dev;
+
+	if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+		vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+		if (IS_ERR(vport->ingress.drop_counter)) {
+			esw_warn(dev,
+				 "vport[%d] configure ingress drop rule counter failed\n",
+				 vport->vport);
+			vport->ingress.drop_counter = NULL;
+		}
+	}
+
+	if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+		vport->egress.drop_counter = mlx5_fc_create(dev, false);
+		if (IS_ERR(vport->egress.drop_counter)) {
+			esw_warn(dev,
+				 "vport[%d] configure egress drop rule counter failed\n",
+				 vport->vport);
+			vport->egress.drop_counter = NULL;
+		}
+	}
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+	struct mlx5_core_dev *dev = vport->dev;
+
+	if (vport->ingress.drop_counter)
+		mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+	if (vport->egress.drop_counter)
+		mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 			     int enable_events)
 {
@@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 	if (!vport_num)
 		vport->info.trusted = true;
 
+	/* create steering drop counters for ingress and egress ACLs */
+	if (vport_num && esw->mode == SRIOV_LEGACY)
+		esw_vport_create_drop_counters(vport);
+
 	esw_vport_change_handle_locked(vport);
 
 	esw->enabled_vports++;
@@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 					      MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
 		esw_vport_disable_egress_acl(esw, vport);
 		esw_vport_disable_ingress_acl(esw, vport);
+		esw_vport_destroy_drop_counters(vport);
 	}
 	esw->enabled_vports--;
 	mutex_unlock(&esw->state_lock);
@@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 		goto abort;
 	}
 
-	esw->offloads.vport_reps =
-		kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
-			GFP_KERNEL);
-	if (!esw->offloads.vport_reps) {
-		err = -ENOMEM;
+	err = esw_offloads_init_reps(esw);
+	if (err)
 		goto abort;
-	}
 
 	hash_init(esw->offloads.encap_tbl);
 	hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 abort:
 	if (esw->work_queue)
 		destroy_workqueue(esw->work_queue);
+	esw_offloads_cleanup_reps(esw);
 	kfree(esw->vports);
-	kfree(esw->offloads.vport_reps);
 	kfree(esw);
 	return err;
 }
@@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 
 	esw->dev->priv.eswitch = NULL;
 	destroy_workqueue(esw->work_queue);
-	kfree(esw->offloads.vport_reps);
+	esw_offloads_cleanup_reps(esw);
 	kfree(esw->vports);
 	kfree(esw);
 }
@@ -2018,12 +2083,36 @@ unlock:
 	return err;
 }
 
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+						int vport_idx,
+						struct mlx5_vport_drop_stats *stats)
+{
+	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	struct mlx5_vport *vport = &esw->vports[vport_idx];
+	u64 bytes = 0;
+	u16 idx = 0;
+
+	if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+		return;
+
+	if (vport->egress.drop_counter) {
+		idx = vport->egress.drop_counter->id;
+		mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+	}
+
+	if (vport->ingress.drop_counter) {
+		idx = vport->ingress.drop_counter->id;
+		mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+	}
+}
+
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 				 int vport,
 				 struct ifla_vf_stats *vf_stats)
 {
 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+	struct mlx5_vport_drop_stats stats = {0};
 	int err = 0;
 	u32 *out;
 
@@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 	vf_stats->broadcast =
 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
 
+	mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+	vf_stats->rx_dropped = stats.rx_dropped;
+	vf_stats->tx_dropped = stats.tx_dropped;
+
 free_out:
 	kvfree(out);
 	return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7a399a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
 	SRIOV_OFFLOADS
 };
 
+enum {
+	REP_ETH,
+	NUM_REP_TYPES,
+};
+
 #ifdef CONFIG_MLX5_ESWITCH
 
 #define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -68,6 +73,7 @@ struct vport_ingress {
 	struct mlx5_flow_group *drop_grp;
 	struct mlx5_flow_handle  *allow_rule;
 	struct mlx5_flow_handle  *drop_rule;
+	struct mlx5_fc           *drop_counter;
 };
 
 struct vport_egress {
@@ -76,6 +82,12 @@ struct vport_egress {
 	struct mlx5_flow_group *drop_grp;
 	struct mlx5_flow_handle  *allowed_vlan;
 	struct mlx5_flow_handle  *drop_rule;
+	struct mlx5_fc           *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+	u64 rx_dropped;
+	u64 tx_dropped;
 };
 
 struct mlx5_vport_info {
@@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb {
 	};
 };
 
-struct mlx5_esw_sq {
-	struct mlx5_flow_handle	*send_to_vport_rule;
-	struct list_head	 list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+	int		       (*load)(struct mlx5_core_dev *dev,
+				       struct mlx5_eswitch_rep *rep);
+	void		       (*unload)(struct mlx5_eswitch_rep *rep);
+	void			*priv;
+	bool		       valid;
 };
 
 struct mlx5_eswitch_rep {
-	int		       (*load)(struct mlx5_eswitch *esw,
-				       struct mlx5_eswitch_rep *rep);
-	void		       (*unload)(struct mlx5_eswitch *esw,
-					 struct mlx5_eswitch_rep *rep);
+	struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
 	u16		       vport;
 	u8		       hw_id[ETH_ALEN];
-	struct net_device      *netdev;
-
-	struct mlx5_flow_handle *vport_rx_rule;
-	struct list_head       vport_sqs_list;
 	u16		       vlan;
 	u32		       vlan_refcount;
-	bool		       valid;
 };
 
 struct mlx5_esw_offload {
@@ -197,6 +205,8 @@ struct mlx5_eswitch {
 
 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
 
 /* E-Switch API */
 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 				 int vport,
 				 struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+				    u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
 
 struct mlx5_flow_spec;
 struct mlx5_esw_flow_attr;
@@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr {
 	struct mlx5e_tc_flow_parse_attr *parse_attr;
 };
 
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep,
-				 u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep);
-
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
 				     int vport_index,
-				     struct mlx5_eswitch_rep *rep);
+				     struct mlx5_eswitch_rep_if *rep_if,
+				     u8 rep_type);
 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-				       int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+				       int vport_index,
+				       u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
 				 struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80119bd..99f583a15cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
 	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
 	for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
 		rep = &esw->offloads.vport_reps[vf_vport];
-		if (!rep->valid)
+		if (!rep->rep_if[REP_ETH].valid)
 			continue;
 
 		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
 	return err;
 }
 
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
 {
 	struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
 	return flow_rule;
 }
 
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep)
-{
-	struct mlx5_esw_sq *esw_sq, *tmp;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return;
-
-	list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
-		mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
-		list_del(&esw_sq->list);
-		kfree(esw_sq);
-	}
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep,
-				 u16 *sqns_array, int sqns_num)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
 {
-	struct mlx5_flow_handle *flow_rule;
-	struct mlx5_esw_sq *esw_sq;
-	int err;
-	int i;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return 0;
-
-	for (i = 0; i < sqns_num; i++) {
-		esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
-		if (!esw_sq) {
-			err = -ENOMEM;
-			goto out_err;
-		}
-
-		/* Add re-inject rule to the PF/representor sqs */
-		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
-								rep->vport,
-								sqns_array[i]);
-		if (IS_ERR(flow_rule)) {
-			err = PTR_ERR(flow_rule);
-			kfree(esw_sq);
-			goto out_err;
-		}
-		esw_sq->send_to_vport_rule = flow_rule;
-		list_add(&esw_sq->list, &rep->vport_sqs_list);
-	}
-	return 0;
-
-out_err:
-	mlx5_eswitch_sqs2vport_stop(esw, rep);
-	return err;
+	mlx5_del_flow_rules(rule);
 }
 
 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
 	return err;
 }
 
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+	kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+	int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_esw_offload *offloads;
+	struct mlx5_eswitch_rep *rep;
+	u8 hw_id[ETH_ALEN];
+	int vport;
+
+	esw->offloads.vport_reps = kcalloc(total_vfs,
+					   sizeof(struct mlx5_eswitch_rep),
+					   GFP_KERNEL);
+	if (!esw->offloads.vport_reps)
+		return -ENOMEM;
+
+	offloads = &esw->offloads;
+	mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+	for (vport = 0; vport < total_vfs; vport++) {
+		rep = &offloads->vport_reps[vport];
+
+		rep->vport = vport;
+		ether_addr_copy(rep->hw_id, hw_id);
+	}
+
+	offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+	return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+					  u8 rep_type)
+{
+	struct mlx5_eswitch_rep *rep;
+	int vport;
+
+	for (vport = nvports - 1; vport >= 0; vport--) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->rep_if[rep_type].valid)
+			continue;
+
+		rep->rep_if[rep_type].unload(rep);
+	}
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+	u8 rep_type = NUM_REP_TYPES;
+
+	while (rep_type-- > 0)
+		esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+				       u8 rep_type)
 {
 	struct mlx5_eswitch_rep *rep;
 	int vport;
 	int err;
 
+	for (vport = 0; vport < nvports; vport++) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->rep_if[rep_type].valid)
+			continue;
+
+		err = rep->rep_if[rep_type].load(esw->dev, rep);
+		if (err)
+			goto err_reps;
+	}
+
+	return 0;
+
+err_reps:
+	esw_offloads_unload_reps_type(esw, vport, rep_type);
+	return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+	u8 rep_type = 0;
+	int err;
+
+	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+		err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+		if (err)
+			goto err_reps;
+	}
+
+	return err;
+
+err_reps:
+	while (rep_type-- > 0)
+		esw_offloads_unload_reps_type(esw, nvports, rep_type);
+	return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+	int err;
+
 	/* disable PF RoCE so missed packets don't go through RoCE steering */
 	mlx5_dev_list_lock();
 	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
 	if (err)
 		goto create_fg_err;
 
-	for (vport = 0; vport < nvports; vport++) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-
-		err = rep->load(esw, rep);
-		if (err)
-			goto err_reps;
-	}
+	err = esw_offloads_load_reps(esw, nvports);
+	if (err)
+		goto err_reps;
 
 	return 0;
 
 err_reps:
-	for (vport--; vport >= 0; vport--) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-		rep->unload(esw, rep);
-	}
 	esw_destroy_vport_rx_group(esw);
 
 create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 
 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
 {
-	struct mlx5_eswitch_rep *rep;
-	int vport;
-
-	for (vport = nvports - 1; vport >= 0; vport--) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-		rep->unload(esw, rep);
-	}
-
+	esw_offloads_unload_reps(esw, nvports);
 	esw_destroy_vport_rx_group(esw);
 	esw_destroy_offloads_table(esw);
 	esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
 
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
 				     int vport_index,
-				     struct mlx5_eswitch_rep *__rep)
+				     struct mlx5_eswitch_rep_if *__rep_if,
+				     u8 rep_type)
 {
 	struct mlx5_esw_offload *offloads = &esw->offloads;
-	struct mlx5_eswitch_rep *rep;
-
-	rep = &offloads->vport_reps[vport_index];
+	struct mlx5_eswitch_rep_if *rep_if;
 
-	memset(rep, 0, sizeof(*rep));
+	rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
 
-	rep->load   = __rep->load;
-	rep->unload = __rep->unload;
-	rep->vport  = __rep->vport;
-	rep->netdev = __rep->netdev;
-	ether_addr_copy(rep->hw_id, __rep->hw_id);
+	rep_if->load   = __rep_if->load;
+	rep_if->unload = __rep_if->unload;
+	rep_if->priv = __rep_if->priv;
 
-	INIT_LIST_HEAD(&rep->vport_sqs_list);
-	rep->valid = true;
+	rep_if->valid = true;
 }
 
 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-				       int vport_index)
+				       int vport_index, u8 rep_type)
 {
 	struct mlx5_esw_offload *offloads = &esw->offloads;
 	struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
 	rep = &offloads->vport_reps[vport_index];
 
 	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
-		rep->unload(esw, rep);
+		rep->rep_if[rep_type].unload(rep);
 
-	rep->valid = false;
+	rep->rep_if[rep_type].valid = false;
 }
 
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
 {
 #define UPLINK_REP_INDEX 0
 	struct mlx5_esw_offload *offloads = &esw->offloads;
 	struct mlx5_eswitch_rep *rep;
 
 	rep = &offloads->vport_reps[UPLINK_REP_INDEX];
-	return rep->netdev;
+	return rep->rep_if[rep_type].priv;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dfaad9ecb2b8..c025c98700e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -89,6 +89,9 @@
 /* One more level for tc */
 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
 
+#define KERNEL_NIC_TC_NUM_PRIOS  1
+#define KERNEL_NIC_TC_NUM_LEVELS 2
+
 #define ANCHOR_NUM_LEVELS 1
 #define ANCHOR_NUM_PRIOS 1
 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
@@ -134,7 +137,7 @@ static struct init_tree_node {
 			 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
 						  ETHTOOL_PRIO_NUM_LEVELS))),
 		ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
-			 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+			 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
 				ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
 						  KERNEL_NIC_PRIO_NUM_LEVELS))),
 		ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
@@ -2026,16 +2029,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 			return &steering->fdb_root_ns->ns;
 		else
 			return NULL;
-	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
-		if (steering->esw_egress_root_ns)
-			return &steering->esw_egress_root_ns->ns;
-		else
-			return NULL;
-	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
-		if (steering->esw_ingress_root_ns)
-			return &steering->esw_ingress_root_ns->ns;
-		else
-			return NULL;
 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
 		if (steering->sniffer_rx_root_ns)
 			return &steering->sniffer_rx_root_ns->ns;
@@ -2066,6 +2059,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 }
 EXPORT_SYMBOL(mlx5_get_flow_namespace);
 
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+							      enum mlx5_flow_namespace_type type,
+							      int vport)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+
+	if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+		return NULL;
+
+	switch (type) {
+	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+		if (steering->esw_egress_root_ns &&
+		    steering->esw_egress_root_ns[vport])
+			return &steering->esw_egress_root_ns[vport]->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+		if (steering->esw_ingress_root_ns &&
+		    steering->esw_ingress_root_ns[vport])
+			return &steering->esw_ingress_root_ns[vport]->ns;
+		else
+			return NULL;
+	default:
+		return NULL;
+	}
+}
+
 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
 				      unsigned int prio, int num_levels)
 {
@@ -2343,13 +2363,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
 	clean_tree(&root_ns->ns.node);
 }
 
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int i;
+
+	if (!steering->esw_egress_root_ns)
+		return;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+		cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+	kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int i;
+
+	if (!steering->esw_ingress_root_ns)
+		return;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+	kfree(steering->esw_ingress_root_ns);
+}
+
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_flow_steering *steering = dev->priv.steering;
 
 	cleanup_root_ns(steering->root_ns);
-	cleanup_root_ns(steering->esw_egress_root_ns);
-	cleanup_root_ns(steering->esw_ingress_root_ns);
+	cleanup_egress_acls_root_ns(dev);
+	cleanup_ingress_acls_root_ns(dev);
 	cleanup_root_ns(steering->fdb_root_ns);
 	cleanup_root_ns(steering->sniffer_rx_root_ns);
 	cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2418,34 +2466,86 @@ out_err:
 	return PTR_ERR(prio);
 }
 
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
 {
 	struct fs_prio *prio;
 
-	steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
-	if (!steering->esw_egress_root_ns)
+	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+	if (!steering->esw_egress_root_ns[vport])
 		return -ENOMEM;
 
 	/* create 1 prio*/
-	prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
-			      MLX5_TOTAL_VPORTS(steering->dev));
+	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
 	return PTR_ERR_OR_ZERO(prio);
 }
 
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
 {
 	struct fs_prio *prio;
 
-	steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
-	if (!steering->esw_ingress_root_ns)
+	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+	if (!steering->esw_ingress_root_ns[vport])
 		return -ENOMEM;
 
 	/* create 1 prio*/
-	prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
-			      MLX5_TOTAL_VPORTS(steering->dev));
+	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
 	return PTR_ERR_OR_ZERO(prio);
 }
 
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int err;
+	int i;
+
+	steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+					       sizeof(*steering->esw_egress_root_ns),
+					       GFP_KERNEL);
+	if (!steering->esw_egress_root_ns)
+		return -ENOMEM;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+		err = init_egress_acl_root_ns(steering, i);
+		if (err)
+			goto cleanup_root_ns;
+	}
+
+	return 0;
+
+cleanup_root_ns:
+	for (i--; i >= 0; i--)
+		cleanup_root_ns(steering->esw_egress_root_ns[i]);
+	kfree(steering->esw_egress_root_ns);
+	return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int err;
+	int i;
+
+	steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+						sizeof(*steering->esw_ingress_root_ns),
+						GFP_KERNEL);
+	if (!steering->esw_ingress_root_ns)
+		return -ENOMEM;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+		err = init_ingress_acl_root_ns(steering, i);
+		if (err)
+			goto cleanup_root_ns;
+	}
+
+	return 0;
+
+cleanup_root_ns:
+	for (i--; i >= 0; i--)
+		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+	kfree(steering->esw_ingress_root_ns);
+	return err;
+}
+
 int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_flow_steering *steering;
@@ -2488,12 +2588,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
 				goto err;
 		}
 		if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-			err = init_egress_acl_root_ns(steering);
+			err = init_egress_acls_root_ns(dev);
 			if (err)
 				goto err;
 		}
 		if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-			err = init_ingress_acl_root_ns(steering);
+			err = init_ingress_acls_root_ns(dev);
 			if (err)
 				goto err;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a621a4..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
 	struct kmem_cache               *ftes_cache;
 	struct mlx5_flow_root_namespace *root_ns;
 	struct mlx5_flow_root_namespace *fdb_root_ns;
-	struct mlx5_flow_root_namespace *esw_egress_root_ns;
-	struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+	struct mlx5_flow_root_namespace **esw_egress_root_ns;
+	struct mlx5_flow_root_namespace **esw_ingress_root_ns;
 	struct mlx5_flow_root_namespace	*sniffer_tx_root_ns;
 	struct mlx5_flow_root_namespace	*sniffer_rx_root_ns;
 };
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
 			      unsigned long delay);
 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
 				      unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+		  u64 *packets, u64 *bytes);
 
 int mlx5_init_fs(struct mlx5_core_dev *dev);
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
 	}
 }
 
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+		  u64 *packets, u64 *bytes)
+{
+	return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
 void mlx5_fc_query_cached(struct mlx5_fc *counter,
 			  u64 *bytes, u64 *packets, u64 *lastuse)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
 const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
 	.get_drvinfo        = mlx5i_get_drvinfo,
 	.get_link           = ethtool_op_get_link,
+	.get_ts_info        = mlx5i_get_ts_info,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index a281d95ce17c..f953378bd13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
 static int mlx5i_open(struct net_device *netdev);
 static int mlx5i_close(struct net_device *netdev);
 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
 static const struct net_device_ops mlx5i_netdev_ops = {
 	.ndo_open                = mlx5i_open,
@@ -242,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
 
 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
 {
-	int err;
+	struct ttc_params ttc_params = {};
+	int tt, err;
 
 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
 					       MLX5_FLOW_NAMESPACE_KERNEL);
@@ -257,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
 		priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
 	}
 
-	err = mlx5e_create_inner_ttc_table(priv);
+	mlx5e_set_ttc_basic_params(priv, &ttc_params);
+	mlx5e_set_inner_ttc_ft_params(&ttc_params);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+		ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+	err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
 	if (err) {
 		netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
 			   err);
 		goto err_destroy_arfs_tables;
 	}
 
-	err = mlx5e_create_ttc_table(priv);
+	mlx5e_set_ttc_ft_params(&ttc_params);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+		ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+	err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
 	if (err) {
 		netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
 			   err);
@@ -274,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
 	return 0;
 
 err_destroy_inner_ttc_table:
-	mlx5e_destroy_inner_ttc_table(priv);
+	mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
 err_destroy_arfs_tables:
 	mlx5e_arfs_destroy_tables(priv);
 
@@ -283,8 +292,8 @@ err_destroy_arfs_tables:
 
 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
 {
-	mlx5e_destroy_ttc_table(priv);
-	mlx5e_destroy_inner_ttc_table(priv);
+	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+	mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
 	mlx5e_arfs_destroy_tables(priv);
 }
 
@@ -398,7 +407,7 @@ int mlx5i_dev_init(struct net_device *dev)
 	return 0;
 }
 
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	struct mlx5e_priv *priv = mlx5i_epriv(dev);
 
@@ -486,7 +495,7 @@ static int mlx5i_close(struct net_device *netdev)
 	mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
 	mlx5i_uninit_underlay_qp(epriv);
 	mlx5e_deactivate_priv_channels(epriv);
-	mlx5e_close_channels(&epriv->channels);;
+	mlx5e_close_channels(&epriv->channels);
 unlock:
 	mutex_unlock(&epriv->state_lock);
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
 /* Get the net-device corresponding to the given underlay QPN */
 struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
 
-/* Shared ndo functionts */
+/* Shared ndo functions */
 int mlx5i_dev_init(struct net_device *dev);
 void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
 /* Parent profile functions */
 void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
 static int mlx5i_pkey_dev_init(struct net_device *dev);
 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
 static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
 static const struct net_device_ops mlx5i_pkey_netdev_ops = {
 	.ndo_open                = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
 	.ndo_init                = mlx5i_pkey_dev_init,
 	.ndo_uninit              = mlx5i_pkey_dev_cleanup,
 	.ndo_change_mtu          = mlx5i_pkey_change_mtu,
+	.ndo_do_ioctl            = mlx5i_pkey_ioctl,
 };
 
 /* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
 	return mlx5i_dev_init(dev);
 }
 
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	return mlx5i_ioctl(dev, ifr, cmd);
+}
+
 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
 {
 	return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b05868728da7..394552f36fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
 int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
 u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
 void mlx5_cq_tasklet_cb(unsigned long data);
 
 int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..9e38343a951f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,217 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+				  struct mlx5_hairpin_params *params, u32 *rqn)
+{
+	u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+	void *rqc, *wq;
+
+	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+	wq  = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	MLX5_SET(rqc, rqc, hairpin, 1);
+	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+	MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+	MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+	MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+	return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+				  struct mlx5_hairpin_params *params, u32 *sqn)
+{
+	u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+	void *sqc, *wq;
+
+	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+	wq  = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	MLX5_SET(sqc, sqc, hairpin, 1);
+	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+	MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+	MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+	return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+				      struct mlx5_hairpin_params *params)
+{
+	int i, j, err;
+
+	for (i = 0; i < hp->num_channels; i++) {
+		err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
+		if (err)
+			goto out_err_rq;
+	}
+
+	for (i = 0; i < hp->num_channels; i++) {
+		err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
+		if (err)
+			goto out_err_sq;
+	}
+
+	return 0;
+
+out_err_sq:
+	for (j = 0; j < i; j++)
+		mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
+	i = hp->num_channels;
+out_err_rq:
+	for (j = 0; j < i; j++)
+		mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
+	return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+	int i;
+
+	for (i = 0; i < hp->num_channels; i++) {
+		mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
+		mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+	}
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+				  int curr_state, int next_state,
+				  u16 peer_vhca, u32 peer_sq)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+	void *rqc;
+
+	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+	if (next_state == MLX5_RQC_STATE_RDY) {
+		MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+		MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+	}
+
+	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+	MLX5_SET(rqc, rqc, state, next_state);
+
+	return mlx5_core_modify_rq(func_mdev, rqn,
+				   in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+				  int curr_state, int next_state,
+				  u16 peer_vhca, u32 peer_rq)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+	void *sqc;
+
+	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+	if (next_state == MLX5_RQC_STATE_RDY) {
+		MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+		MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+	}
+
+	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+	MLX5_SET(sqc, sqc, state, next_state);
+
+	return mlx5_core_modify_sq(peer_mdev, sqn,
+				   in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+	int i, j, err;
+
+	/* set peer SQs */
+	for (i = 0; i < hp->num_channels; i++) {
+		err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
+					     MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+					     MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
+		if (err)
+			goto err_modify_sq;
+	}
+
+	/* set func RQs */
+	for (i = 0; i < hp->num_channels; i++) {
+		err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
+					     MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+					     MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
+		if (err)
+			goto err_modify_rq;
+	}
+
+	return 0;
+
+err_modify_rq:
+	for (j = 0; j < i; j++)
+		mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
+				       MLX5_RQC_STATE_RST, 0, 0);
+	i = hp->num_channels;
+err_modify_sq:
+	for (j = 0; j < i; j++)
+		mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
+				       MLX5_SQC_STATE_RST, 0, 0);
+	return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+	int i;
+
+	/* unset func RQs */
+	for (i = 0; i < hp->num_channels; i++)
+		mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+				       MLX5_RQC_STATE_RST, 0, 0);
+
+	/* unset peer SQs */
+	for (i = 0; i < hp->num_channels; i++)
+		mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+				       MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+			 struct mlx5_core_dev *peer_mdev,
+			 struct mlx5_hairpin_params *params)
+{
+	struct mlx5_hairpin *hp;
+	int size, err;
+
+	size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
+	hp = kzalloc(size, GFP_KERNEL);
+	if (!hp)
+		return ERR_PTR(-ENOMEM);
+
+	hp->func_mdev = func_mdev;
+	hp->peer_mdev = peer_mdev;
+	hp->num_channels = params->num_channels;
+
+	hp->rqn = (void *)hp + sizeof(*hp);
+	hp->sqn = hp->rqn + params->num_channels;
+
+	/* alloc and pair func --> peer hairpin */
+	err = mlx5_hairpin_create_queues(hp, params);
+	if (err)
+		goto err_create_queues;
+
+	err = mlx5_hairpin_pair_queues(hp);
+	if (err)
+		goto err_pair_queues;
+
+	return hp;
+
+err_pair_queues:
+	mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+	kfree(hp);
+	return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+	mlx5_hairpin_unpair_queues(hp);
+	mlx5_hairpin_destroy_queues(hp);
+	kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc874ad..3529b545675d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
 	struct mlxsw_thermal *thermal;
 	struct mlxsw_core_port *ports;
 	unsigned int max_ports;
+	bool reload_fail;
 	unsigned long driver_priv[0];
 	/* driver_priv has to be always the last item */
 };
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
 						     pool_type, p_cur, p_max);
 }
 
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+	int err;
+
+	if (!mlxsw_bus->reset)
+		return -EOPNOTSUPP;
+
+	mlxsw_core_bus_device_unregister(mlxsw_core, true);
+	mlxsw_bus->reset(mlxsw_core->bus_priv);
+	err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+					     mlxsw_core->bus,
+					     mlxsw_core->bus_priv, true,
+					     devlink);
+	if (err)
+		mlxsw_core->reload_fail = true;
+	return err;
+}
+
 static const struct devlink_ops mlxsw_devlink_ops = {
+	.reload				= mlxsw_devlink_core_bus_device_reload,
 	.port_type_set			= mlxsw_devlink_port_type_set,
 	.port_split			= mlxsw_devlink_port_split,
 	.port_unsplit			= mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
 
 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 				   const struct mlxsw_bus *mlxsw_bus,
-				   void *bus_priv)
+				   void *bus_priv, bool reload,
+				   struct devlink *devlink)
 {
 	const char *device_kind = mlxsw_bus_info->device_kind;
 	struct mlxsw_core *mlxsw_core;
 	struct mlxsw_driver *mlxsw_driver;
-	struct devlink *devlink;
 	size_t alloc_size;
 	int err;
 
 	mlxsw_driver = mlxsw_core_driver_get(device_kind);
 	if (!mlxsw_driver)
 		return -EINVAL;
-	alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
-	devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
-	if (!devlink) {
-		err = -ENOMEM;
-		goto err_devlink_alloc;
+
+	if (!reload) {
+		alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+		devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+		if (!devlink) {
+			err = -ENOMEM;
+			goto err_devlink_alloc;
+		}
 	}
 
 	mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 	if (err)
 		goto err_bus_init;
 
+	if (mlxsw_driver->resources_register && !reload) {
+		err = mlxsw_driver->resources_register(mlxsw_core);
+		if (err)
+			goto err_register_resources;
+	}
+
 	err = mlxsw_ports_init(mlxsw_core);
 	if (err)
 		goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 	if (err)
 		goto err_emad_init;
 
-	err = devlink_register(devlink, mlxsw_bus_info->dev);
-	if (err)
-		goto err_devlink_register;
+	if (!reload) {
+		err = devlink_register(devlink, mlxsw_bus_info->dev);
+		if (err)
+			goto err_devlink_register;
+	}
 
 	err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
 	if (err)
@@ -1057,7 +1090,8 @@ err_driver_init:
 	mlxsw_thermal_fini(mlxsw_core->thermal);
 err_thermal_init:
 err_hwmon_init:
-	devlink_unregister(devlink);
+	if (!reload)
+		devlink_unregister(devlink);
 err_devlink_register:
 	mlxsw_emad_fini(mlxsw_core);
 err_emad_init:
@@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
 err_ports_init:
 	mlxsw_bus->fini(bus_priv);
 err_bus_init:
-	devlink_free(devlink);
+	if (!reload)
+		devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+	if (!reload)
+		devlink_free(devlink);
 err_devlink_alloc:
 	mlxsw_core_driver_put(device_kind);
 	return err;
 }
 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
 
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+				      bool reload)
 {
 	const char *device_kind = mlxsw_core->bus_info->device_kind;
 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
+	if (mlxsw_core->reload_fail)
+		goto reload_fail;
+
 	if (mlxsw_core->driver->fini)
 		mlxsw_core->driver->fini(mlxsw_core);
 	mlxsw_thermal_fini(mlxsw_core->thermal);
-	devlink_unregister(devlink);
+	if (!reload)
+		devlink_unregister(devlink);
 	mlxsw_emad_fini(mlxsw_core);
 	kfree(mlxsw_core->lag.mapping);
 	mlxsw_ports_fini(mlxsw_core);
+	if (!reload)
+		devlink_resources_unregister(devlink, NULL);
 	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+	if (reload)
+		return;
+reload_fail:
 	devlink_free(devlink);
 	mlxsw_core_driver_put(device_kind);
 }
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
 }
 EXPORT_SYMBOL(mlxsw_core_flush_owq);
 
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+			     const struct mlxsw_config_profile *profile,
+			     u64 *p_single_size, u64 *p_double_size,
+			     u64 *p_linear_size)
+{
+	struct mlxsw_driver *driver = mlxsw_core->driver;
+
+	if (!driver->kvd_sizes_get)
+		return -EINVAL;
+
+	return driver->kvd_sizes_get(mlxsw_core, profile,
+				     p_single_size, p_double_size,
+				     p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
 static int __init mlxsw_core_module_init(void)
 {
 	int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af72fc4..5ddafd74dc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
 
 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 				   const struct mlxsw_bus *mlxsw_bus,
-				   void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+				   void *bus_priv, bool reload,
+				   struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
 
 struct mlxsw_tx_info {
 	u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
 				       u32 *p_cur, u32 *p_max);
 	void (*txhdr_construct)(struct sk_buff *skb,
 				const struct mlxsw_tx_info *tx_info);
+	int (*resources_register)(struct mlxsw_core *mlxsw_core);
+	int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+			     const struct mlxsw_config_profile *profile,
+			     u64 *p_single_size, u64 *p_double_size,
+			     u64 *p_linear_size);
 	u8 txhdr_len;
 	const struct mlxsw_config_profile *profile;
 };
 
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+			     const struct mlxsw_config_profile *profile,
+			     u64 *p_single_size, u64 *p_double_size,
+			     u64 *p_linear_size);
+
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
 			  enum mlxsw_res_id res_id);
 
@@ -332,6 +343,7 @@ struct mlxsw_bus {
 		    const struct mlxsw_config_profile *profile,
 		    struct mlxsw_res *res);
 	void (*fini)(void *bus_priv);
+	void (*reset)(void *bus_priv);
 	bool (*skb_transmit_busy)(void *bus_priv,
 				  const struct mlxsw_tx_info *tx_info);
 	int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 6a979a09ab72..b698fb481b2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -310,9 +310,33 @@ struct mlxsw_afa_block {
 	struct mlxsw_afa_set *first_set;
 	struct mlxsw_afa_set *cur_set;
 	unsigned int cur_act_index; /* In current set. */
-	struct list_head fwd_entry_ref_list;
+	struct list_head resource_list; /* List of resources held by actions
+					 * in this block.
+					 */
 };
 
+struct mlxsw_afa_resource {
+	struct list_head list;
+	void (*destructor)(struct mlxsw_afa_block *block,
+			   struct mlxsw_afa_resource *resource);
+};
+
+static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
+				   struct mlxsw_afa_resource *resource)
+{
+	list_add(&resource->list, &block->resource_list);
+}
+
+static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
+{
+	struct mlxsw_afa_resource *resource, *tmp;
+
+	list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
+		list_del(&resource->list);
+		resource->destructor(block, resource);
+	}
+}
+
 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
 {
 	struct mlxsw_afa_block *block;
@@ -320,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
 	block = kzalloc(sizeof(*block), GFP_KERNEL);
 	if (!block)
 		return NULL;
-	INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+	INIT_LIST_HEAD(&block->resource_list);
 	block->afa = mlxsw_afa;
 
 	/* At least one action set is always present, so just create it here */
@@ -336,8 +360,6 @@ err_first_set_create:
 }
 EXPORT_SYMBOL(mlxsw_afa_block_create);
 
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
-
 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
 {
 	struct mlxsw_afa_set *set = block->first_set;
@@ -348,7 +370,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
 		mlxsw_afa_set_put(block->afa, set);
 		set = next_set;
 	} while (set);
-	mlxsw_afa_fwd_entry_refs_destroy(block);
+	mlxsw_afa_resources_destroy(block);
 	kfree(block);
 }
 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
@@ -489,10 +511,29 @@ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
 }
 
 struct mlxsw_afa_fwd_entry_ref {
-	struct list_head list;
+	struct mlxsw_afa_resource resource;
 	struct mlxsw_afa_fwd_entry *fwd_entry;
 };
 
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+				struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+	mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+	kfree(fwd_entry_ref);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
+				   struct mlxsw_afa_resource *resource)
+{
+	struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+
+	fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
+				     resource);
+	mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
 static struct mlxsw_afa_fwd_entry_ref *
 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
 {
@@ -509,7 +550,8 @@ mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
 		goto err_fwd_entry_get;
 	}
 	fwd_entry_ref->fwd_entry = fwd_entry;
-	list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+	fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
+	mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
 	return fwd_entry_ref;
 
 err_fwd_entry_get:
@@ -517,23 +559,51 @@ err_fwd_entry_get:
 	return ERR_PTR(err);
 }
 
+struct mlxsw_afa_counter {
+	struct mlxsw_afa_resource resource;
+	u32 counter_index;
+};
+
 static void
-mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
-				struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
+			  struct mlxsw_afa_counter *counter)
 {
-	list_del(&fwd_entry_ref->list);
-	mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
-	kfree(fwd_entry_ref);
+	block->afa->ops->counter_index_put(block->afa->ops_priv,
+					   counter->counter_index);
+	kfree(counter);
+}
+
+static void
+mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
+			     struct mlxsw_afa_resource *resource)
+{
+	struct mlxsw_afa_counter *counter;
+
+	counter = container_of(resource, struct mlxsw_afa_counter, resource);
+	mlxsw_afa_counter_destroy(block, counter);
 }
 
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+static struct mlxsw_afa_counter *
+mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
 {
-	struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
-	struct mlxsw_afa_fwd_entry_ref *tmp;
+	struct mlxsw_afa_counter *counter;
+	int err;
+
+	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+	if (!counter)
+		return ERR_PTR(-ENOMEM);
+
+	err = block->afa->ops->counter_index_get(block->afa->ops_priv,
+						 &counter->counter_index);
+	if (err)
+		goto err_counter_index_get;
+	counter->resource.destructor = mlxsw_afa_counter_destructor;
+	mlxsw_afa_resource_add(block, &counter->resource);
+	return counter;
 
-	list_for_each_entry_safe(fwd_entry_ref, tmp,
-				 &block->fwd_entry_ref_list, list)
-		mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+err_counter_index_get:
+	kfree(counter);
+	return ERR_PTR(err);
 }
 
 #define MLXSW_AFA_ONE_ACTION_LEN 32
@@ -690,6 +760,16 @@ MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
  */
 MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
 
+/* afa_trapdisc_mirror_agent
+ * Mirror agent.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+
+/* afa_trapdisc_mirror_enable
+ * Mirror enable.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+
 static inline void
 mlxsw_afa_trapdisc_pack(char *payload,
 			enum mlxsw_afa_trapdisc_trap_action trap_action,
@@ -701,6 +781,14 @@ mlxsw_afa_trapdisc_pack(char *payload,
 	mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
 }
 
+static inline void
+mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
+			       u8 mirror_agent)
+{
+	mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
+	mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+}
+
 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
 {
 	char *act = mlxsw_afa_block_append_action(block,
@@ -746,6 +834,104 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
 }
 EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
 
+struct mlxsw_afa_mirror {
+	struct mlxsw_afa_resource resource;
+	int span_id;
+	u8 local_in_port;
+	u8 local_out_port;
+	bool ingress;
+};
+
+static void
+mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
+			 struct mlxsw_afa_mirror *mirror)
+{
+	block->afa->ops->mirror_del(block->afa->ops_priv,
+				    mirror->local_in_port,
+				    mirror->local_out_port,
+				    mirror->ingress);
+	kfree(mirror);
+}
+
+static void
+mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
+			    struct mlxsw_afa_resource *resource)
+{
+	struct mlxsw_afa_mirror *mirror;
+
+	mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
+	mlxsw_afa_mirror_destroy(block, mirror);
+}
+
+static struct mlxsw_afa_mirror *
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
+			u8 local_in_port, u8 local_out_port,
+			bool ingress)
+{
+	struct mlxsw_afa_mirror *mirror;
+	int err;
+
+	mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
+	if (!mirror)
+		return ERR_PTR(-ENOMEM);
+
+	err = block->afa->ops->mirror_add(block->afa->ops_priv,
+					  local_in_port, local_out_port,
+					  ingress, &mirror->span_id);
+	if (err)
+		goto err_mirror_add;
+
+	mirror->ingress = ingress;
+	mirror->local_out_port = local_out_port;
+	mirror->local_in_port = local_in_port;
+	mirror->resource.destructor = mlxsw_afa_mirror_destructor;
+	mlxsw_afa_resource_add(block, &mirror->resource);
+	return mirror;
+
+err_mirror_add:
+	kfree(mirror);
+	return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
+					u8 mirror_agent)
+{
+	char *act = mlxsw_afa_block_append_action(block,
+						  MLXSW_AFA_TRAPDISC_CODE,
+						  MLXSW_AFA_TRAPDISC_SIZE);
+	if (!act)
+		return -ENOBUFS;
+	mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+				MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
+	mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+	return 0;
+}
+
+int
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+			      u8 local_in_port, u8 local_out_port, bool ingress)
+{
+	struct mlxsw_afa_mirror *mirror;
+	int err;
+
+	mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+					 ingress);
+	if (IS_ERR(mirror))
+		return PTR_ERR(mirror);
+
+	err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
+	if (err)
+		goto err_append_allocated_mirror;
+
+	return 0;
+
+err_append_allocated_mirror:
+	mlxsw_afa_mirror_destroy(block, mirror);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+
 /* Forwarding Action
  * -----------------
  * Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -853,11 +1039,10 @@ mlxsw_afa_polcnt_pack(char *payload,
 	mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
 }
 
-int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
-				   u32 counter_index)
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+					     u32 counter_index)
 {
-	char *act = mlxsw_afa_block_append_action(block,
-						  MLXSW_AFA_POLCNT_CODE,
+	char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
 						  MLXSW_AFA_POLCNT_SIZE);
 	if (!act)
 		return -ENOBUFS;
@@ -865,6 +1050,32 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
 			      counter_index);
 	return 0;
 }
+EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+				   u32 *p_counter_index)
+{
+	struct mlxsw_afa_counter *counter;
+	u32 counter_index;
+	int err;
+
+	counter = mlxsw_afa_counter_create(block);
+	if (IS_ERR(counter))
+		return PTR_ERR(counter);
+	counter_index = counter->counter_index;
+
+	err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
+	if (err)
+		goto err_append_allocated_counter;
+
+	if (p_counter_index)
+		*p_counter_index = counter_index;
+	return 0;
+
+err_append_allocated_counter:
+	mlxsw_afa_counter_destroy(block, counter);
+	return err;
+}
 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
 
 /* Virtual Router and Forwarding Domain Action
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index a8d3314c3a24..43132293475c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -46,6 +46,12 @@ struct mlxsw_afa_ops {
 	void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
 	int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
 	void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+	int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
+	void (*counter_index_put)(void *priv, unsigned int counter_index);
+	int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+			  bool ingress, int *p_span_id);
+	void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+			   bool ingress);
 };
 
 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -63,12 +69,17 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
 					    u16 trap_id);
+int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+				  u8 local_in_port, u8 local_out_port,
+				  bool ingress);
 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
 			       u8 local_port, bool in_port);
 int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
 				       u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+					     u32 counter_index);
 int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
-				   u32 counter_index);
+				   u32 *p_counter_index);
 int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
 int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
 				    u16 expected_irif, u16 min_mtu,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa05b077..25f9915ebd82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
 	mlxsw_i2c->dev = &client->dev;
 
 	err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
-					     &mlxsw_i2c_bus, mlxsw_i2c);
+					     &mlxsw_i2c_bus, mlxsw_i2c, false,
+					     NULL);
 	if (err) {
 		dev_err(&client->dev, "Fail to register core bus\n");
 		return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
 {
 	struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
 
-	mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+	mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
 	mutex_destroy(&mlxsw_i2c->cmd.lock);
 
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 28427f0758c7..31c886edc791 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -42,7 +42,7 @@
 
 struct mlxsw_item {
 	unsigned short	offset;		/* bytes in container */
-	unsigned short	step;		/* step in bytes for indexed items */
+	short		step;		/* step in bytes for indexed items */
 	unsigned short	in_step_offset; /* offset within one step */
 	unsigned char	shift;		/* shift in bits */
 	unsigned char	element_size;	/* size of element in bit array */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 6ef20e5cc77d..85faa87bf42d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
 		} comp;
 	} cmd;
 	struct mlxsw_bus_info bus_info;
+	const struct pci_device_id *id;
 };
 
 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
 }
 
 static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+				const struct mlxsw_config_profile *profile,
 				struct mlxsw_res *res)
 {
-	u32 single_size, double_size, linear_size;
-
-	if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
-	    !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
-	    !profile->used_kvd_split_data)
-		return -EIO;
-
-	linear_size = profile->kvd_linear_size;
+	u64 single_size, double_size, linear_size;
+	int err;
 
-	/* The hash part is what left of the kvd without the
-	 * linear part. It is split to the single size and
-	 * double size by the parts ratio from the profile.
-	 * Both sizes must be a multiplications of the
-	 * granularity from the profile.
-	 */
-	double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
-	double_size *= profile->kvd_hash_double_parts;
-	double_size /= profile->kvd_hash_double_parts +
-		       profile->kvd_hash_single_parts;
-	double_size /= profile->kvd_hash_granularity;
-	double_size *= profile->kvd_hash_granularity;
-	single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
-		      linear_size;
-
-	/* Check results are legal. */
-	if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
-	    double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
-	    MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
-		return -EIO;
+	err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+				       &single_size, &double_size,
+				       &linear_size);
+	if (err)
+		return err;
 
 	MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
 	MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
 			mbox, profile->adaptive_routing_group_cap);
 	}
 	if (MLXSW_RES_VALID(res, KVD_SIZE)) {
-		err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+		err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
 		if (err)
 			return err;
 
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
 	return err;
 }
 
-static const struct mlxsw_bus mlxsw_pci_bus = {
-	.kind			= "pci",
-	.init			= mlxsw_pci_init,
-	.fini			= mlxsw_pci_fini,
-	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
-	.skb_transmit		= mlxsw_pci_skb_transmit,
-	.cmd_exec		= mlxsw_pci_cmd_exec,
-	.features		= MLXSW_BUS_F_TXRX,
-};
-
 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
 			      const struct pci_device_id *id)
 {
@@ -1660,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
 	return 0;
 }
 
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+	pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+	int err;
+
+	err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+	if (err < 0)
+		dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+	return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+	mlxsw_pci_free_irq_vectors(mlxsw_pci);
+	mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+	mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+	.kind			= "pci",
+	.init			= mlxsw_pci_init,
+	.fini			= mlxsw_pci_fini,
+	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
+	.skb_transmit		= mlxsw_pci_skb_transmit,
+	.cmd_exec		= mlxsw_pci_cmd_exec,
+	.features		= MLXSW_BUS_F_TXRX,
+	.reset			= mlxsw_pci_reset,
+};
+
 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	const char *driver_name = pdev->driver->name;
@@ -1721,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_sw_reset;
 	}
 
-	err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+	err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
 	if (err < 0) {
 		dev_err(&pdev->dev, "MSI-X init failed\n");
 		goto err_msix_init;
@@ -1730,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	mlxsw_pci->bus_info.device_kind = driver_name;
 	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
 	mlxsw_pci->bus_info.dev = &pdev->dev;
+	mlxsw_pci->id = id;
 
 	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
-					     &mlxsw_pci_bus, mlxsw_pci);
+					     &mlxsw_pci_bus, mlxsw_pci, false,
+					     NULL);
 	if (err) {
 		dev_err(&pdev->dev, "cannot register bus device\n");
 		goto err_bus_device_register;
@@ -1741,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	return 0;
 
 err_bus_device_register:
-	pci_free_irq_vectors(mlxsw_pci->pdev);
+	mlxsw_pci_free_irq_vectors(mlxsw_pci);
 err_msix_init:
 err_sw_reset:
 	iounmap(mlxsw_pci->hw_addr);
@@ -1760,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
 {
 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
 
-	mlxsw_core_bus_device_unregister(mlxsw_pci->core);
-	pci_free_irq_vectors(mlxsw_pci->pdev);
+	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+	mlxsw_pci_free_irq_vectors(mlxsw_pci);
 	iounmap(mlxsw_pci->hw_addr);
 	pci_release_regions(mlxsw_pci->pdev);
 	pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c4e08b8058a..0e08be41c8e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
 	mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
 }
 
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+				   MLXSW_REG_RDPM_LEN - \
+				   MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+		    -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+		    -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+				       u8 prio)
+{
+	mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+	mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
 /* RICNT - Router Interface Counter Register
  * -----------------------------------------
  * The RICNT register retrieves per port performance counters
@@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
 	MLXSW_REG(rtar),
 	MLXSW_REG(ratr),
 	MLXSW_REG(rtdp),
+	MLXSW_REG(rdpm),
 	MLXSW_REG(ricnt),
 	MLXSW_REG(rrcr),
 	MLXSW_REG(ralta),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3837ca7a705..3dcc58d61506 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -76,12 +76,7 @@
 #define MLXSW_FWREV_MAJOR 13
 #define MLXSW_FWREV_MINOR 1530
 #define MLXSW_FWREV_SUBMINOR 152
-
-static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
-	.major = MLXSW_FWREV_MAJOR,
-	.minor = MLXSW_FWREV_MINOR,
-	.subminor = MLXSW_FWREV_SUBMINOR
-};
+#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
 
 #define MLXSW_SP_FW_FILENAME \
 	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
@@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
 	return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
 }
 
-static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
-			       const struct mlxsw_fw_rev *b)
-{
-	if (a->major != b->major)
-		return a->major > b->major;
-	if (a->minor != b->minor)
-		return a->minor > b->minor;
-	return a->subminor >= b->subminor;
-}
-
 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
 {
 	const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
 	const struct firmware *firmware;
 	int err;
 
-	if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+	/* Validate driver & FW are compatible */
+	if (rev->major != MLXSW_FWREV_MAJOR) {
+		WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+		     rev->major, MLXSW_FWREV_MAJOR);
+		return -EINVAL;
+	}
+	if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+	    MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
 		return 0;
 
-	dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+	dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
 		 rev->major, rev->minor, rev->subminor);
-	dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+	dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
 		 MLXSW_SP_FW_FILENAME);
 
 	err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
@@ -576,7 +568,7 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
 	span_entry->used = false;
 }
 
-static struct mlxsw_sp_span_entry *
+struct mlxsw_sp_span_entry *
 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 {
 	int i;
@@ -677,13 +669,28 @@ mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
 static int
 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 				  struct mlxsw_sp_span_entry *span_entry,
-				  enum mlxsw_sp_span_type type)
+				  enum mlxsw_sp_span_type type,
+				  bool bind)
 {
-	struct mlxsw_sp_span_inspected_port *inspected_port;
 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 	char mpar_pl[MLXSW_REG_MPAR_LEN];
-	char sbib_pl[MLXSW_REG_SBIB_LEN];
 	int pa_id = span_entry->id;
+
+	/* bind the port to the SPAN entry */
+	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+			    (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+				 struct mlxsw_sp_span_entry *span_entry,
+				 enum mlxsw_sp_span_type type,
+				 bool bind)
+{
+	struct mlxsw_sp_span_inspected_port *inspected_port;
+	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+	char sbib_pl[MLXSW_REG_SBIB_LEN];
 	int err;
 
 	/* if it is an egress SPAN, bind a shared buffer to it */
@@ -699,12 +706,12 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 		}
 	}
 
-	/* bind the port to the SPAN entry */
-	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
-			    (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
-	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-	if (err)
-		goto err_mpar_reg_write;
+	if (bind) {
+		err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+							true);
+		if (err)
+			goto err_port_bind;
+	}
 
 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
 	if (!inspected_port) {
@@ -717,8 +724,11 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 
 	return 0;
 
-err_mpar_reg_write:
 err_inspected_port_alloc:
+	if (bind)
+		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+						  false);
+err_port_bind:
 	if (type == MLXSW_SP_SPAN_EGRESS) {
 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
@@ -727,25 +737,22 @@ err_inspected_port_alloc:
 }
 
 static void
-mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
-				    struct mlxsw_sp_span_entry *span_entry,
-				    enum mlxsw_sp_span_type type)
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+				 struct mlxsw_sp_span_entry *span_entry,
+				 enum mlxsw_sp_span_type type,
+				 bool bind)
 {
 	struct mlxsw_sp_span_inspected_port *inspected_port;
 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
-	char mpar_pl[MLXSW_REG_MPAR_LEN];
 	char sbib_pl[MLXSW_REG_SBIB_LEN];
-	int pa_id = span_entry->id;
 
 	inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
 	if (!inspected_port)
 		return;
 
-	/* remove the inspected port */
-	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
-			    (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
-	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-
+	if (bind)
+		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+						  false);
 	/* remove the SBIB buffer if it was egress SPAN */
 	if (type == MLXSW_SP_SPAN_EGRESS) {
 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
@@ -758,9 +765,9 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
 	kfree(inspected_port);
 }
 
-static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
-				    struct mlxsw_sp_port *to,
-				    enum mlxsw_sp_span_type type)
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+			     struct mlxsw_sp_port *to,
+			     enum mlxsw_sp_span_type type, bool bind)
 {
 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
 	struct mlxsw_sp_span_entry *span_entry;
@@ -773,7 +780,7 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
 	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
 		   span_entry->id);
 
-	err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+	err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
 	if (err)
 		goto err_port_bind;
 
@@ -784,9 +791,8 @@ err_port_bind:
 	return err;
 }
 
-static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
-					u8 destination_port,
-					enum mlxsw_sp_span_type type)
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
+			      enum mlxsw_sp_span_type type, bool bind)
 {
 	struct mlxsw_sp_span_entry *span_entry;
 
@@ -799,7 +805,7 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
 
 	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
 		   span_entry->id);
-	mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+	mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
 }
 
 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1571,14 +1577,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
 				      const struct tc_action *a,
 				      bool ingress)
 {
-	struct net *net = dev_net(mlxsw_sp_port->dev);
 	enum mlxsw_sp_span_type span_type;
 	struct mlxsw_sp_port *to_port;
 	struct net_device *to_dev;
-	int ifindex;
 
-	ifindex = tcf_mirred_ifindex(a);
-	to_dev = __dev_get_by_index(net, ifindex);
+	to_dev = tcf_mirred_dev(a);
 	if (!to_dev) {
 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
 		return -EINVAL;
@@ -1593,7 +1596,8 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
 	mirror->to_local_port = to_port->local_port;
 	mirror->ingress = ingress;
 	span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
-	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
+					true);
 }
 
 static void
@@ -1604,8 +1608,8 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
 
 	span_type = mirror->ingress ?
 			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
-	mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
-				    span_type);
+	mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+				 span_type, true);
 }
 
 static int
@@ -1734,9 +1738,6 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
 					  struct tc_cls_matchall_offload *f,
 					  bool ingress)
 {
-	if (f->common.chain_index)
-		return -EOPNOTSUPP;
-
 	switch (f->command) {
 	case TC_CLSMATCHALL_REPLACE:
 		return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
@@ -1750,72 +1751,187 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
-			     struct tc_cls_flower_offload *f,
-			     bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+			     struct tc_cls_flower_offload *f)
 {
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
 	switch (f->command) {
 	case TC_CLSFLOWER_REPLACE:
-		return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+		return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
 	case TC_CLSFLOWER_DESTROY:
-		mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+		mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
 		return 0;
 	case TC_CLSFLOWER_STATS:
-		return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+		return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-				      void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+					       void *type_data,
+					       void *cb_priv, bool ingress)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
 
-	if (!tc_can_offload(mlxsw_sp_port->dev))
-		return -EOPNOTSUPP;
-
 	switch (type) {
 	case TC_SETUP_CLSMATCHALL:
+		if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
+						   type_data))
+			return -EOPNOTSUPP;
+
 		return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
 						      ingress);
 	case TC_SETUP_CLSFLOWER:
-		return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
-						    ingress);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+						  void *type_data,
+						  void *cb_priv)
+{
+	return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+						   cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+						  void *type_data,
+						  void *cb_priv)
+{
+	return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+						   cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+					     void *type_data, void *cb_priv)
+{
+	struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+	switch (type) {
+	case TC_SETUP_CLSMATCHALL:
+		return 0;
+	case TC_SETUP_CLSFLOWER:
+		if (mlxsw_sp_acl_block_disabled(acl_block))
+			return -EOPNOTSUPP;
+
+		return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
-					 void *type_data, void *cb_priv)
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+				    struct tcf_block *block, bool ingress)
 {
-	return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct mlxsw_sp_acl_block *acl_block;
+	struct tcf_block_cb *block_cb;
+	int err;
+
+	block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+				       mlxsw_sp);
+	if (!block_cb) {
+		acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+		if (!acl_block)
+			return -ENOMEM;
+		block_cb = __tcf_block_cb_register(block,
+						   mlxsw_sp_setup_tc_block_cb_flower,
+						   mlxsw_sp, acl_block);
+		if (IS_ERR(block_cb)) {
+			err = PTR_ERR(block_cb);
+			goto err_cb_register;
+		}
+	} else {
+		acl_block = tcf_block_cb_priv(block_cb);
+	}
+	tcf_block_cb_incref(block_cb);
+	err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+				      mlxsw_sp_port, ingress);
+	if (err)
+		goto err_block_bind;
+
+	if (ingress)
+		mlxsw_sp_port->ing_acl_block = acl_block;
+	else
+		mlxsw_sp_port->eg_acl_block = acl_block;
+
+	return 0;
+
+err_block_bind:
+	if (!tcf_block_cb_decref(block_cb)) {
+		__tcf_block_cb_unregister(block_cb);
+err_cb_register:
+		mlxsw_sp_acl_block_destroy(acl_block);
+	}
+	return err;
 }
 
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
-					 void *type_data, void *cb_priv)
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+				      struct tcf_block *block, bool ingress)
 {
-	return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct mlxsw_sp_acl_block *acl_block;
+	struct tcf_block_cb *block_cb;
+	int err;
+
+	block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+				       mlxsw_sp);
+	if (!block_cb)
+		return;
+
+	if (ingress)
+		mlxsw_sp_port->ing_acl_block = NULL;
+	else
+		mlxsw_sp_port->eg_acl_block = NULL;
+
+	acl_block = tcf_block_cb_priv(block_cb);
+	err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+					mlxsw_sp_port, ingress);
+	if (!err && !tcf_block_cb_decref(block_cb)) {
+		__tcf_block_cb_unregister(block_cb);
+		mlxsw_sp_acl_block_destroy(acl_block);
+	}
 }
 
 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
 				   struct tc_block_offload *f)
 {
 	tc_setup_cb_t *cb;
+	bool ingress;
+	int err;
 
-	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
-		cb = mlxsw_sp_setup_tc_block_cb_ig;
-	else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
-		cb = mlxsw_sp_setup_tc_block_cb_eg;
-	else
+	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+		cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+		ingress = true;
+	} else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+		cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+		ingress = false;
+	} else {
 		return -EOPNOTSUPP;
+	}
 
 	switch (f->command) {
 	case TC_BLOCK_BIND:
-		return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
-					     mlxsw_sp_port);
+		err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+					    mlxsw_sp_port);
+		if (err)
+			return err;
+		err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+							  f->block, ingress);
+		if (err) {
+			tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+			return err;
+		}
+		return 0;
 	case TC_BLOCK_UNBIND:
+		mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+						      f->block, ingress);
 		tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
 		return 0;
 	default:
@@ -1833,11 +1949,69 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
 		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
 	case TC_SETUP_QDISC_RED:
 		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+	case TC_SETUP_QDISC_PRIO:
+		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+	if (!enable) {
+		if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+		    mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+		    !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+			return -EINVAL;
+		}
+		mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+		mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+	} else {
+		mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+		mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+	}
+	return 0;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+				   netdev_features_t wanted_features,
+				   netdev_features_t feature,
+				   mlxsw_sp_feature_handler feature_handler)
+{
+	netdev_features_t changes = wanted_features ^ dev->features;
+	bool enable = !!(wanted_features & feature);
+	int err;
+
+	if (!(changes & feature))
+		return 0;
+
+	err = feature_handler(dev, enable);
+	if (err) {
+		netdev_err(dev, "%s feature %pNF failed, err %d\n",
+			   enable ? "Enable" : "Disable", &feature, err);
+		return err;
+	}
+
+	if (enable)
+		dev->features |= feature;
+	else
+		dev->features &= ~feature;
+
+	return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+				 netdev_features_t features)
+{
+	return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+				       mlxsw_sp_feature_hw_tc);
+}
+
 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
 	.ndo_open		= mlxsw_sp_port_open,
 	.ndo_stop		= mlxsw_sp_port_stop,
@@ -1852,6 +2026,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
+	.ndo_set_features	= mlxsw_sp_set_features,
 };
 
 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3039,6 +3214,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 		goto err_port_fids_init;
 	}
 
+	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_qdiscs_init;
+	}
+
 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
 	if (IS_ERR(mlxsw_sp_port_vlan)) {
 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3067,6 +3249,8 @@ err_register_netdev:
 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
 err_port_vlan_get:
+	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
 err_port_fids_init:
 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3102,6 +3286,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 	mlxsw_sp->ports[local_port] = NULL;
 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -3933,6 +4118,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
 	.resource_query_enable		= 1,
 };
 
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+					   u64 size)
+{
+	const struct mlxsw_config_profile *profile;
+
+	profile = &mlxsw_sp_config_profile;
+	if (size % profile->kvd_hash_granularity) {
+		NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+		return false;
+	}
+	return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+				    struct netlink_ext_ack *extack)
+{
+	NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+	return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+					   struct netlink_ext_ack *extack)
+{
+	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+						struct netlink_ext_ack *extack)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+		return -EINVAL;
+
+	if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+		NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+						struct netlink_ext_ack *extack)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+		return -EINVAL;
+
+	if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+		NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+	return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+	.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+						 KVD_SINGLE_MIN_SIZE);
+	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+						 KVD_DOUBLE_MIN_SIZE);
+	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+	u32 linear_size_min = 0;
+
+	/* KVD top resource */
+	mlxsw_sp_kvd_size_params.size_min = kvd_size;
+	mlxsw_sp_kvd_size_params.size_max = kvd_size;
+	mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+	/* Linear part init */
+	mlxsw_sp_linear_size_params.size_min = linear_size_min;
+	mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+					       double_size_min;
+	mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+	/* Hash double part init */
+	mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+	mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+						    linear_size_min;
+	mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+	/* Hash single part init */
+	mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+	mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+						    linear_size_min;
+	mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+	struct devlink *devlink = priv_to_devlink(mlxsw_core);
+	u32 kvd_size, single_size, double_size, linear_size;
+	const struct mlxsw_config_profile *profile;
+	int err;
+
+	profile = &mlxsw_sp_config_profile;
+	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+		return -EIO;
+
+	mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+					true, kvd_size,
+					MLXSW_SP_RESOURCE_KVD,
+					DEVLINK_RESOURCE_ID_PARENT_TOP,
+					&mlxsw_sp_kvd_size_params,
+					&mlxsw_sp_resource_kvd_ops);
+	if (err)
+		return err;
+
+	linear_size = profile->kvd_linear_size;
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+					false, linear_size,
+					MLXSW_SP_RESOURCE_KVD_LINEAR,
+					MLXSW_SP_RESOURCE_KVD,
+					&mlxsw_sp_linear_size_params,
+					&mlxsw_sp_resource_kvd_linear_ops);
+	if (err)
+		return err;
+
+	double_size = kvd_size - linear_size;
+	double_size *= profile->kvd_hash_double_parts;
+	double_size /= profile->kvd_hash_double_parts +
+		       profile->kvd_hash_single_parts;
+	double_size = rounddown(double_size, profile->kvd_hash_granularity);
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+					false, double_size,
+					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+					MLXSW_SP_RESOURCE_KVD,
+					&mlxsw_sp_hash_double_size_params,
+					&mlxsw_sp_resource_kvd_hash_double_ops);
+	if (err)
+		return err;
+
+	single_size = kvd_size - double_size - linear_size;
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+					false, single_size,
+					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+					MLXSW_SP_RESOURCE_KVD,
+					&mlxsw_sp_hash_single_size_params,
+					&mlxsw_sp_resource_kvd_hash_single_ops);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+				  const struct mlxsw_config_profile *profile,
+				  u64 *p_single_size, u64 *p_double_size,
+				  u64 *p_linear_size)
+{
+	struct devlink *devlink = priv_to_devlink(mlxsw_core);
+	u32 double_size;
+	int err;
+
+	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+	    !profile->used_kvd_split_data)
+		return -EIO;
+
+	/* The hash part is what left of the kvd without the
+	 * linear part. It is split to the single size and
+	 * double size by the parts ratio from the profile.
+	 * Both sizes must be a multiplications of the
+	 * granularity from the profile. In case the user
+	 * provided the sizes they are obtained via devlink.
+	 */
+	err = devlink_resource_size_get(devlink,
+					MLXSW_SP_RESOURCE_KVD_LINEAR,
+					p_linear_size);
+	if (err)
+		*p_linear_size = profile->kvd_linear_size;
+
+	err = devlink_resource_size_get(devlink,
+					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+					p_double_size);
+	if (err) {
+		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+			      *p_linear_size;
+		double_size *= profile->kvd_hash_double_parts;
+		double_size /= profile->kvd_hash_double_parts +
+			       profile->kvd_hash_single_parts;
+		*p_double_size = rounddown(double_size,
+					   profile->kvd_hash_granularity);
+	}
+
+	err = devlink_resource_size_get(devlink,
+					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+					p_single_size);
+	if (err)
+		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+				 *p_double_size - *p_linear_size;
+
+	/* Check results are legal. */
+	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+		return -EIO;
+
+	return 0;
+}
+
 static struct mlxsw_driver mlxsw_sp_driver = {
 	.kind				= mlxsw_sp_driver_name,
 	.priv_size			= sizeof(struct mlxsw_sp),
@@ -3952,6 +4384,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
+	.resources_register		= mlxsw_sp_resources_register,
+	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
 	.txhdr_len			= MLXSW_TXHDR_LEN,
 	.profile			= &mlxsw_sp_config_profile,
 };
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 05ce1befd9b3..bdd8f94a452c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
 #define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
 #define MLXSW_SP_KVD_GRANULARITY 128
 
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+	MLXSW_SP_RESOURCE_KVD,
+	MLXSW_SP_RESOURCE_KVD_LINEAR,
+	MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+	MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
 struct mlxsw_sp_port;
 struct mlxsw_sp_rif;
 
@@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan {
 	struct list_head bridge_vlan_node;
 };
 
-enum mlxsw_sp_qdisc_type {
-	MLXSW_SP_QDISC_NO_QDISC,
-	MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
-	u32 handle;
-	enum mlxsw_sp_qdisc_type type;
-	struct red_stats xstats_base;
-	union {
-		struct {
-			u64 tail_drop_base;
-			u64 ecn_base;
-			u64 wred_drop_base;
-		} red;
-	} xstats;
-
-	u64 tx_bytes;
-	u64 tx_packets;
-	u64 drops;
-	u64 overlimits;
-};
-
 /* No need an internal lock; At worse - miss a single periodic iteration */
 struct mlxsw_sp_port_xstats {
 	u64 ecn;
@@ -269,7 +258,10 @@ struct mlxsw_sp_port {
 	} periodic_hw_stats;
 	struct mlxsw_sp_port_sample *sample;
 	struct list_head vlans_list;
-	struct mlxsw_sp_qdisc root_qdisc;
+	struct mlxsw_sp_qdisc *root_qdisc;
+	unsigned acl_rule_count;
+	struct mlxsw_sp_acl_block *ing_acl_block;
+	struct mlxsw_sp_acl_block *eg_acl_block;
 };
 
 static inline bool
@@ -404,6 +396,16 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+			     struct mlxsw_sp_port *to,
+			     enum mlxsw_sp_span_type type,
+			     bool bind);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
+			      u8 destination_port,
+			      enum mlxsw_sp_span_type type,
+			      bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
 
 /* spectrum_dcb.c */
 #ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -458,13 +460,13 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
 int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
 				   unsigned int entry_count,
 				   unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
 
 struct mlxsw_sp_acl_rule_info {
 	unsigned int priority;
 	struct mlxsw_afk_element_values values;
 	struct mlxsw_afa_block *act_block;
 	unsigned int counter_index;
-	bool counter_valid;
 };
 
 enum mlxsw_sp_acl_profile {
@@ -477,8 +479,11 @@ struct mlxsw_sp_acl_profile_ops {
 			   void *priv, void *ruleset_priv);
 	void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
 	int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
-			    struct net_device *dev, bool ingress);
-	void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+			    struct mlxsw_sp_port *mlxsw_sp_port,
+			    bool ingress);
+	void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+			       struct mlxsw_sp_port *mlxsw_sp_port,
+			       bool ingress);
 	u16 (*ruleset_group_id)(void *ruleset_priv);
 	size_t rule_priv_size;
 	int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -498,17 +503,34 @@ struct mlxsw_sp_acl_ops {
 				       enum mlxsw_sp_acl_profile profile);
 };
 
+struct mlxsw_sp_acl_block;
 struct mlxsw_sp_acl_ruleset;
 
 /* spectrum_acl.c */
 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+						     struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
+			    struct mlxsw_sp_port *mlxsw_sp_port,
+			    bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+			      struct mlxsw_sp_acl_block *block,
+			      struct mlxsw_sp_port *mlxsw_sp_port,
+			      bool ingress);
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			    bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block, u32 chain_index,
 			    enum mlxsw_sp_acl_profile profile);
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			 bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+			 struct mlxsw_sp_acl_block *block, u32 chain_index,
 			 enum mlxsw_sp_acl_profile profile);
 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
 			      struct mlxsw_sp_acl_ruleset *ruleset);
@@ -532,6 +554,10 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
 				u16 group_id);
 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+				  struct mlxsw_sp_acl_rule_info *rulei,
+				  struct mlxsw_sp_acl_block *block,
+				  struct net_device *out_dev);
 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
 			       struct mlxsw_sp_acl_rule_info *rulei,
 			       struct net_device *out_dev);
@@ -575,16 +601,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
 extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
 
 /* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
 			    struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+			     struct mlxsw_sp_acl_block *block,
 			     struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+			  struct mlxsw_sp_acl_block *block,
 			  struct tc_cls_flower_offload *f);
 
 /* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
 			  struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+			   struct tc_prio_qopt_offload *p);
 
 /* spectrum_fid.c */
 int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd315f7d6..0897a5435cc2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
+#include <net/net_namespace.h>
 #include <net/tc_act/tc_vlan.h>
 
 #include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
 	return acl->afk;
 }
 
-struct mlxsw_sp_acl_ruleset_ht_key {
-	struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+	struct list_head list;
+	struct net_device *dev;
+	struct mlxsw_sp_port *mlxsw_sp_port;
 	bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+	struct list_head binding_list;
+	struct mlxsw_sp_acl_ruleset *ruleset_zero;
+	struct mlxsw_sp *mlxsw_sp;
+	unsigned int rule_count;
+	unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+	struct mlxsw_sp_acl_block *block;
 	u32 chain_index;
 	const struct mlxsw_sp_acl_profile_ops *ops;
 };
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
 	return mlxsw_sp->acl->dummy_fid;
 }
 
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+	return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+	return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+	if (block)
+		block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+	if (block)
+		block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+	return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+			  struct mlxsw_sp_acl_block *block,
+			  struct mlxsw_sp_acl_block_binding *binding)
+{
+	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+				 binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
+			    struct mlxsw_sp_acl_block_binding *binding)
+{
+	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+			    binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+	return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+				struct mlxsw_sp_acl_ruleset *ruleset,
+				struct mlxsw_sp_acl_block *block)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+	int err;
+
+	block->ruleset_zero = ruleset;
+	list_for_each_entry(binding, &block->binding_list, list) {
+		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+		if (err)
+			goto rollback;
+	}
+	return 0;
+
+rollback:
+	list_for_each_entry_continue_reverse(binding, &block->binding_list,
+					     list)
+		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+	block->ruleset_zero = NULL;
+
+	return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+				  struct mlxsw_sp_acl_ruleset *ruleset,
+				  struct mlxsw_sp_acl_block *block)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+
+	list_for_each_entry(binding, &block->binding_list, list)
+		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+	block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+						     struct net *net)
+{
+	struct mlxsw_sp_acl_block *block;
+
+	block = kzalloc(sizeof(*block), GFP_KERNEL);
+	if (!block)
+		return NULL;
+	INIT_LIST_HEAD(&block->binding_list);
+	block->mlxsw_sp = mlxsw_sp;
+	return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+	WARN_ON(!list_empty(&block->binding_list));
+	kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+			  struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+
+	list_for_each_entry(binding, &block->binding_list, list)
+		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+		    binding->ingress == ingress)
+			return binding;
+	return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
+			    struct mlxsw_sp_port *mlxsw_sp_port,
+			    bool ingress)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+	int err;
+
+	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+		return -EEXIST;
+
+	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+	if (!binding)
+		return -ENOMEM;
+	binding->mlxsw_sp_port = mlxsw_sp_port;
+	binding->ingress = ingress;
+
+	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+		if (err)
+			goto err_ruleset_bind;
+	}
+
+	list_add(&binding->list, &block->binding_list);
+	return 0;
+
+err_ruleset_bind:
+	kfree(binding);
+	return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+			      struct mlxsw_sp_acl_block *block,
+			      struct mlxsw_sp_port *mlxsw_sp_port,
+			      bool ingress)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+
+	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+	if (!binding)
+		return -ENOENT;
+
+	list_del(&binding->list);
+
+	if (mlxsw_sp_acl_ruleset_block_bound(block))
+		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+	kfree(binding);
+	return 0;
+}
+
 static struct mlxsw_sp_acl_ruleset *
 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block, u32 chain_index,
 			    const struct mlxsw_sp_acl_profile_ops *ops)
 {
 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
 	if (!ruleset)
 		return ERR_PTR(-ENOMEM);
 	ruleset->ref_count = 1;
+	ruleset->ht_key.block = block;
+	ruleset->ht_key.chain_index = chain_index;
 	ruleset->ht_key.ops = ops;
 
 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
 	if (err)
 		goto err_ops_ruleset_add;
 
-	return ruleset;
-
-err_ops_ruleset_add:
-	rhashtable_destroy(&ruleset->rule_ht);
-err_rhashtable_init:
-	kfree(ruleset);
-	return ERR_PTR(err);
-}
-
-static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
-					 struct mlxsw_sp_acl_ruleset *ruleset)
-{
-	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-
-	ops->ruleset_del(mlxsw_sp, ruleset->priv);
-	rhashtable_destroy(&ruleset->rule_ht);
-	kfree(ruleset);
-}
-
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
-				     struct mlxsw_sp_acl_ruleset *ruleset,
-				     struct net_device *dev, bool ingress,
-				     u32 chain_index)
-{
-	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-	int err;
-
-	ruleset->ht_key.dev = dev;
-	ruleset->ht_key.ingress = ingress;
-	ruleset->ht_key.chain_index = chain_index;
 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
 				     mlxsw_sp_acl_ruleset_ht_params);
 	if (err)
-		return err;
-	if (!ruleset->ht_key.chain_index) {
+		goto err_ht_insert;
+
+	if (!chain_index) {
 		/* We only need ruleset with chain index 0, the implicit one,
 		 * to be directly bound to device. The rest of the rulesets
 		 * are bound by "Goto action set".
 		 */
-		err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
 		if (err)
-			goto err_ops_ruleset_bind;
+			goto err_ruleset_bind;
 	}
-	return 0;
 
-err_ops_ruleset_bind:
+	return ruleset;
+
+err_ruleset_bind:
 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
 			       mlxsw_sp_acl_ruleset_ht_params);
-	return err;
+err_ht_insert:
+	ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+	rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+	kfree(ruleset);
+	return ERR_PTR(err);
 }
 
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
-					struct mlxsw_sp_acl_ruleset *ruleset)
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+					 struct mlxsw_sp_acl_ruleset *ruleset)
 {
 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+	u32 chain_index = ruleset->ht_key.chain_index;
 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
 
-	if (!ruleset->ht_key.chain_index)
-		ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+	if (!chain_index)
+		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
 			       mlxsw_sp_acl_ruleset_ht_params);
+	ops->ruleset_del(mlxsw_sp, ruleset->priv);
+	rhashtable_destroy(&ruleset->rule_ht);
+	kfree(ruleset);
 }
 
 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
 {
 	if (--ruleset->ref_count)
 		return;
-	mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
 }
 
 static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
-			      bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+			      struct mlxsw_sp_acl_block *block, u32 chain_index,
 			      const struct mlxsw_sp_acl_profile_ops *ops)
 {
 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
 
 	memset(&ht_key, 0, sizeof(ht_key));
-	ht_key.dev = dev;
-	ht_key.ingress = ingress;
+	ht_key.block = block;
 	ht_key.chain_index = chain_index;
 	ht_key.ops = ops;
 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
 }
 
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			    bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block, u32 chain_index,
 			    enum mlxsw_sp_acl_profile profile)
 {
 	const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
 	if (!ops)
 		return ERR_PTR(-EINVAL);
-	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
-						chain_index, ops);
+	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
 	if (!ruleset)
 		return ERR_PTR(-ENOENT);
 	return ruleset;
 }
 
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			 bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+			 struct mlxsw_sp_acl_block *block, u32 chain_index,
 			 enum mlxsw_sp_acl_profile profile)
 {
 	const struct mlxsw_sp_acl_profile_ops *ops;
 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
 	struct mlxsw_sp_acl_ruleset *ruleset;
-	int err;
 
 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
 	if (!ops)
 		return ERR_PTR(-EINVAL);
 
-	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
-						chain_index, ops);
+	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
 	if (ruleset) {
 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
 		return ruleset;
 	}
-	ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
-	if (IS_ERR(ruleset))
-		return ruleset;
-	err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
-					ingress, chain_index);
-	if (err)
-		goto err_ruleset_bind;
-	return ruleset;
-
-err_ruleset_bind:
-	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
-	return ERR_PTR(err);
+	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
 }
 
 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -302,27 +462,6 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
 	return ops->ruleset_group_id(ruleset->priv);
 }
 
-static int
-mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
-				 struct mlxsw_sp_acl_rule_info *rulei)
-{
-	int err;
-
-	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
-	if (err)
-		return err;
-	rulei->counter_valid = true;
-	return 0;
-}
-
-static void
-mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
-				struct mlxsw_sp_acl_rule_info *rulei)
-{
-	rulei->counter_valid = false;
-	mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
-}
-
 struct mlxsw_sp_acl_rule_info *
 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
 {
@@ -427,6 +566,34 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
 					  local_port, in_port);
 }
 
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+				  struct mlxsw_sp_acl_rule_info *rulei,
+				  struct mlxsw_sp_acl_block *block,
+				  struct net_device *out_dev)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+	struct mlxsw_sp_port *out_port;
+	struct mlxsw_sp_port *in_port;
+
+	if (!list_is_singular(&block->binding_list))
+		return -EOPNOTSUPP;
+
+	binding = list_first_entry(&block->binding_list,
+				   struct mlxsw_sp_acl_block_binding, list);
+	in_port = binding->mlxsw_sp_port;
+	if (!mlxsw_sp_port_dev_check(out_dev))
+		return -EINVAL;
+
+	out_port = netdev_priv(out_dev);
+	if (out_port->mlxsw_sp != mlxsw_sp)
+		return -EINVAL;
+
+	return mlxsw_afa_block_append_mirror(rulei->act_block,
+					     in_port->local_port,
+					     out_port->local_port,
+					     binding->ingress);
+}
+
 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
 				struct mlxsw_sp_acl_rule_info *rulei,
 				u32 action, u16 vid, u16 proto, u8 prio)
@@ -459,7 +626,7 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
 				 struct mlxsw_sp_acl_rule_info *rulei)
 {
 	return mlxsw_afa_block_append_counter(rulei->act_block,
-					      rulei->counter_index);
+					      &rulei->counter_index);
 }
 
 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -493,13 +660,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
 		goto err_rulei_create;
 	}
 
-	err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
-	if (err)
-		goto err_counter_alloc;
 	return rule;
 
-err_counter_alloc:
-	mlxsw_sp_acl_rulei_destroy(rule->rulei);
 err_rulei_create:
 	kfree(rule);
 err_alloc:
@@ -512,7 +674,6 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
 {
 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
 
-	mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
 	kfree(rule);
 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -535,6 +696,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
 		goto err_rhashtable_insert;
 
 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+	ruleset->ht_key.block->rule_count++;
 	return 0;
 
 err_rhashtable_insert:
@@ -548,6 +710,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
 
+	ruleset->ht_key.block->rule_count--;
 	list_del(&rule->list);
 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
 			       mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 4d3340ed0291..6ca6894125f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -108,11 +108,77 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
 }
 
+static int
+mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+
+	return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index);
+}
+
+static void
+mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+
+	mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index);
+}
+
+static int
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+			bool ingress, int *p_span_id)
+{
+	struct mlxsw_sp_port *in_port, *out_port;
+	struct mlxsw_sp_span_entry *span_entry;
+	struct mlxsw_sp *mlxsw_sp = priv;
+	enum mlxsw_sp_span_type type;
+	int err;
+
+	type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+	out_port = mlxsw_sp->ports[local_out_port];
+	in_port = mlxsw_sp->ports[local_in_port];
+
+	err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
+	if (err)
+		return err;
+
+	span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
+	if (!span_entry) {
+		err = -ENOENT;
+		goto err_span_entry_find;
+	}
+
+	*p_span_id = span_entry->id;
+	return 0;
+
+err_span_entry_find:
+	mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+	return err;
+}
+
+static void
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
+			bool ingress)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp_port *in_port;
+	enum mlxsw_sp_span_type type;
+
+	type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+	in_port = mlxsw_sp->ports[local_in_port];
+
+	mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+}
+
 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
 	.kvdl_set_add		= mlxsw_sp_act_kvdl_set_add,
 	.kvdl_set_del		= mlxsw_sp_act_kvdl_set_del,
 	.kvdl_fwd_entry_add	= mlxsw_sp_act_kvdl_fwd_entry_add,
 	.kvdl_fwd_entry_del	= mlxsw_sp_act_kvdl_fwd_entry_del,
+	.counter_index_get	= mlxsw_sp_act_counter_index_get,
+	.counter_index_put	= mlxsw_sp_act_counter_index_put,
+	.mirror_add		= mlxsw_sp_act_mirror_add,
+	.mirror_del		= mlxsw_sp_act_mirror_del,
 };
 
 int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b46968..c6e180c2be1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
 	struct list_head region_list;
 	unsigned int region_count;
 	struct rhashtable chunk_ht;
-	struct {
-		u16 local_port;
-		bool ingress;
-	} bound;
 	struct mlxsw_sp_acl_tcam_group_ops *ops;
 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
 	unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
 static int
 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
 			     struct mlxsw_sp_acl_tcam_group *group,
-			     struct net_device *dev, bool ingress)
+			     struct mlxsw_sp_port *mlxsw_sp_port,
+			     bool ingress)
 {
-	struct mlxsw_sp_port *mlxsw_sp_port;
 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
 
-	if (!mlxsw_sp_port_dev_check(dev))
-		return -EINVAL;
-
-	mlxsw_sp_port = netdev_priv(dev);
-	group->bound.local_port = mlxsw_sp_port->local_port;
-	group->bound.ingress = ingress;
-	mlxsw_reg_ppbt_pack(ppbt_pl,
-			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
-						   MLXSW_REG_PXBT_E_EACL,
-			    MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+					       MLXSW_REG_PXBT_E_EACL,
+			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
 			    group->id);
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 }
 
 static void
 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
-			       struct mlxsw_sp_acl_tcam_group *group)
+			       struct mlxsw_sp_acl_tcam_group *group,
+			       struct mlxsw_sp_port *mlxsw_sp_port,
+			       bool ingress)
 {
 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
 
-	mlxsw_reg_ppbt_pack(ppbt_pl,
-			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
-						   MLXSW_REG_PXBT_E_EACL,
-			    MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+					       MLXSW_REG_PXBT_E_EACL,
+			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
 			    group->id);
 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 }
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
 static int
 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
 				      void *ruleset_priv,
-				      struct net_device *dev, bool ingress)
+				      struct mlxsw_sp_port *mlxsw_sp_port,
+				      bool ingress)
 {
 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
 
 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
-					    dev, ingress);
+					    mlxsw_sp_port, ingress);
 }
 
 static void
 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
-					void *ruleset_priv)
+					void *ruleset_priv,
+					struct mlxsw_sp_port *mlxsw_sp_port,
+					bool ingress)
 {
 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
 
-	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+				       mlxsw_sp_port, ingress);
 }
 
 static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba78acab..f56fa18d6b26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
 	.size_get = mlxsw_sp_dpipe_table_host4_size_get,
 };
 
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
 static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	int err;
 
-	return devlink_dpipe_table_register(devlink,
-					    MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
-					    &mlxsw_sp_host4_ops,
-					    mlxsw_sp, false);
+	err = devlink_dpipe_table_register(devlink,
+					   MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+					   &mlxsw_sp_host4_ops,
+					   mlxsw_sp, false);
+	if (err)
+		return err;
+
+	err = devlink_dpipe_table_resource_set(devlink,
+					       MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+					       MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+					       MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+	if (err)
+		goto err_resource_set;
+
+	return 0;
+
+err_resource_set:
+	devlink_dpipe_table_unregister(devlink,
+				       MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+	return err;
 }
 
 static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
 	.size_get = mlxsw_sp_dpipe_table_host6_size_get,
 };
 
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
 static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	int err;
 
-	return devlink_dpipe_table_register(devlink,
-					    MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
-					    &mlxsw_sp_host6_ops,
-					    mlxsw_sp, false);
+	err = devlink_dpipe_table_register(devlink,
+					   MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+					   &mlxsw_sp_host6_ops,
+					   mlxsw_sp, false);
+	if (err)
+		return err;
+
+	err = devlink_dpipe_table_resource_set(devlink,
+					       MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+					       MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+					       MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+	if (err)
+		goto err_resource_set;
+
+	return 0;
+
+err_resource_set:
+	devlink_dpipe_table_unregister(devlink,
+				       MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+	return err;
 }
 
 static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
 	.size_get = mlxsw_sp_dpipe_table_adj_size_get,
 };
 
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
 static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	int err;
 
-	return devlink_dpipe_table_register(devlink,
-					    MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
-					    &mlxsw_sp_dpipe_table_adj_ops,
-					    mlxsw_sp, false);
+	err = devlink_dpipe_table_register(devlink,
+					   MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+					   &mlxsw_sp_dpipe_table_adj_ops,
+					   mlxsw_sp, false);
+	if (err)
+		return err;
+
+	err = devlink_dpipe_table_resource_set(devlink,
+					       MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+					       MLXSW_SP_RESOURCE_KVD_LINEAR,
+					       MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+	if (err)
+		goto err_resource_set;
+
+	return 0;
+
+err_resource_set:
+	devlink_dpipe_table_unregister(devlink,
+				       MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+	return err;
 }
 
 static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f0e57857ea4..6ce00e28d4ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
+#include <net/net_namespace.h>
 #include <net/flow_dissector.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
 #include "core_acl_flex_keys.h"
 
 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
-					 struct net_device *dev, bool ingress,
+					 struct mlxsw_sp_acl_block *block,
 					 struct mlxsw_sp_acl_rule_info *rulei,
 					 struct tcf_exts *exts)
 {
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 			struct mlxsw_sp_acl_ruleset *ruleset;
 			u16 group_id;
 
-			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
-							      ingress,
+			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
 							      chain_index,
 							      MLXSW_SP_ACL_PROFILE_FLOWER);
 			if (IS_ERR(ruleset))
@@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 			if (err)
 				return err;
 		} else if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
 			struct net_device *out_dev;
 			struct mlxsw_sp_fid *fid;
 			u16 fid_index;
@@ -104,14 +103,18 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 			if (err)
 				return err;
 
-			out_dev = __dev_get_by_index(dev_net(dev), ifindex);
-			if (out_dev == dev)
-				out_dev = NULL;
-
+			out_dev = tcf_mirred_dev(a);
 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
 							 out_dev);
 			if (err)
 				return err;
+		} else if (is_tcf_mirred_egress_mirror(a)) {
+			struct net_device *out_dev = tcf_mirred_dev(a);
+
+			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
+							    block, out_dev);
+			if (err)
+				return err;
 		} else if (is_tcf_vlan(a)) {
 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
 			u32 action = tcf_vlan_action(a);
@@ -266,7 +269,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
-				 struct net_device *dev, bool ingress,
+				 struct mlxsw_sp_acl_block *block,
 				 struct mlxsw_sp_acl_rule_info *rulei,
 				 struct tc_cls_flower_offload *f)
 {
@@ -384,21 +387,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
 	if (err)
 		return err;
 
-	return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
-					     rulei, f->exts);
+	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
 }
 
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
 			    struct tc_cls_flower_offload *f)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-	struct net_device *dev = mlxsw_sp_port->dev;
 	struct mlxsw_sp_acl_rule_info *rulei;
 	struct mlxsw_sp_acl_ruleset *ruleset;
 	struct mlxsw_sp_acl_rule *rule;
 	int err;
 
-	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
 					   f->common.chain_index,
 					   MLXSW_SP_ACL_PROFILE_FLOWER);
 	if (IS_ERR(ruleset))
@@ -411,7 +412,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	}
 
 	rulei = mlxsw_sp_acl_rule_rulei(rule);
-	err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
 	if (err)
 		goto err_flower_parse;
 
@@ -435,15 +436,15 @@ err_rule_create:
 	return err;
 }
 
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+			     struct mlxsw_sp_acl_block *block,
 			     struct tc_cls_flower_offload *f)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	struct mlxsw_sp_acl_ruleset *ruleset;
 	struct mlxsw_sp_acl_rule *rule;
 
-	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
-					   ingress, f->common.chain_index,
+	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+					   f->common.chain_index,
 					   MLXSW_SP_ACL_PROFILE_FLOWER);
 	if (IS_ERR(ruleset))
 		return;
@@ -457,10 +458,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 }
 
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+			  struct mlxsw_sp_acl_block *block,
 			  struct tc_cls_flower_offload *f)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	struct mlxsw_sp_acl_ruleset *ruleset;
 	struct mlxsw_sp_acl_rule *rule;
 	u64 packets;
@@ -468,8 +469,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	u64 bytes;
 	int err;
 
-	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
-					   ingress, f->common.chain_index,
+	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+					   f->common.chain_index,
 					   MLXSW_SP_ACL_PROFILE_FLOWER);
 	if (WARN_ON(IS_ERR(ruleset)))
 		return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c38247b5c..55f9d2d70f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
 		mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
 }
 
+static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+	unsigned int nr_entries;
+	int bit = -1;
+	u64 occ = 0;
+
+	nr_entries = (part->info->end_index -
+		      part->info->start_index + 1) /
+		      part->info->alloc_size;
+	while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+		< nr_entries)
+		occ += part->info->alloc_size;
+	return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+	struct mlxsw_sp_kvdl_part *part;
+	u64 occ = 0;
+
+	list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+		occ += mlxsw_sp_kvdl_part_occ(part);
+
+	return occ;
+}
+
 int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 34a0b632e5dd..4c7f32d4288d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -243,7 +243,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
 	if (!afa_block)
 		return ERR_PTR(-ENOMEM);
 
-	err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+	err = mlxsw_afa_block_append_allocated_counter(afa_block,
+						       counter_index);
 	if (err)
 		goto err;
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index b5397da94d7f..0b7670459051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,6 +41,150 @@
 #include "spectrum.h"
 #include "reg.h"
 
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+
+enum mlxsw_sp_qdisc_type {
+	MLXSW_SP_QDISC_NO_QDISC,
+	MLXSW_SP_QDISC_RED,
+	MLXSW_SP_QDISC_PRIO,
+};
+
+struct mlxsw_sp_qdisc_ops {
+	enum mlxsw_sp_qdisc_type type;
+	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			    void *params);
+	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			 struct tc_qopt_offload_stats *stats_ptr);
+	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			  void *xstats_ptr);
+	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+	/* unoffload - to be used for a qdisc that stops being offloaded without
+	 * being destroyed.
+	 */
+	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+};
+
+struct mlxsw_sp_qdisc {
+	u32 handle;
+	u8 tclass_num;
+	union {
+		struct red_stats red;
+	} xstats_base;
+	struct mlxsw_sp_qdisc_stats {
+		u64 tx_bytes;
+		u64 tx_packets;
+		u64 drops;
+		u64 overlimits;
+		u64 backlog;
+	} stats_base;
+
+	struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+		       enum mlxsw_sp_qdisc_type type)
+{
+	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+	       mlxsw_sp_qdisc->ops->type == type &&
+	       mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+	int err = 0;
+
+	if (!mlxsw_sp_qdisc)
+		return 0;
+
+	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+						   mlxsw_sp_qdisc);
+
+	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+	mlxsw_sp_qdisc->ops = NULL;
+	return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+		       struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+	int err;
+
+	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+		/* In case this location contained a different qdisc of the
+		 * same type we can override the old qdisc configuration.
+		 * Otherwise, we need to remove the old qdisc before setting the
+		 * new one.
+		 */
+		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+	if (err)
+		goto err_bad_param;
+
+	err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+	if (err)
+		goto err_config;
+
+	if (mlxsw_sp_qdisc->handle != handle) {
+		mlxsw_sp_qdisc->ops = ops;
+		if (ops->clean_stats)
+			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+	}
+
+	mlxsw_sp_qdisc->handle = handle;
+	return 0;
+
+err_bad_param:
+err_config:
+	if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+	return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			 struct tc_qopt_offload_stats *stats_ptr)
+{
+	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+	    mlxsw_sp_qdisc->ops->get_stats)
+		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+						      mlxsw_sp_qdisc,
+						      stats_ptr);
+
+	return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			  void *xstats_ptr)
+{
+	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+	    mlxsw_sp_qdisc->ops->get_xstats)
+		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+						      mlxsw_sp_qdisc,
+						      xstats_ptr);
+
+	return -EOPNOTSUPP;
+}
+
 static int
 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
 				  int tclass_num, u32 min, u32 max,
@@ -80,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
-				    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-				    int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
 {
-	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+	struct mlxsw_sp_qdisc_stats *stats_base;
 	struct mlxsw_sp_port_xstats *xstats;
 	struct rtnl_link_stats64 *stats;
+	struct red_stats *red_base;
 
 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+	stats_base = &mlxsw_sp_qdisc->stats_base;
+	red_base = &mlxsw_sp_qdisc->xstats_base.red;
 
-	mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
-	mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+	stats_base->tx_packets = stats->tx_packets;
+	stats_base->tx_bytes = stats->tx_bytes;
 
-	switch (mlxsw_sp_qdisc->type) {
-	case MLXSW_SP_QDISC_RED:
-		xstats_base->prob_mark = xstats->ecn;
-		xstats_base->prob_drop = xstats->wred_drop[tclass_num];
-		xstats_base->pdrop = xstats->tail_drop[tclass_num];
+	red_base->prob_mark = xstats->ecn;
+	red_base->prob_drop = xstats->wred_drop[tclass_num];
+	red_base->pdrop = xstats->tail_drop[tclass_num];
 
-		mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
-					     xstats_base->prob_mark;
-		mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
-					xstats_base->pdrop;
-		break;
-	default:
-		break;
-	}
+	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+	stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+	stats_base->backlog = 0;
 }
 
 static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
-			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			   int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
 {
-	int err;
-
-	if (mlxsw_sp_qdisc->handle != handle)
-		return 0;
-
-	err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
-	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
-	mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
-	return err;
+	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+						  mlxsw_sp_qdisc->tclass_num);
 }
 
 static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
-			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			   int tclass_num,
-			   struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+				void *params)
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-	u32 min, max;
-	u64 prob;
-	int err = 0;
+	struct tc_red_qopt_offload_params *p = params;
 
 	if (p->min > p->max) {
 		dev_err(mlxsw_sp->bus_info->dev,
 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
 			p->max);
-		goto err_bad_param;
+		return -EINVAL;
 	}
 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
 		dev_err(mlxsw_sp->bus_info->dev,
 			"spectrum: RED: max value %u is too big\n", p->max);
-		goto err_bad_param;
+		return -EINVAL;
 	}
 	if (p->min == 0 || p->max == 0) {
 		dev_err(mlxsw_sp->bus_info->dev,
 			"spectrum: RED: 0 value is illegal for min and max\n");
-		goto err_bad_param;
+		return -EINVAL;
 	}
+	return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			   void *params)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct tc_red_qopt_offload_params *p = params;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+	u32 min, max;
+	u64 prob;
 
 	/* calculate probability in percentage */
 	prob = p->probability;
@@ -162,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
 	prob = DIV_ROUND_UP(prob, 1 << 16);
 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
-	err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
-						max, prob, p->is_ecn);
-	if (err)
-		goto err_config;
-
-	mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
-	if (mlxsw_sp_qdisc->handle != handle)
-		mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
-						    mlxsw_sp_qdisc,
-						    tclass_num);
+	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+						 max, prob, p->is_ecn);
+}
 
-	mlxsw_sp_qdisc->handle = handle;
-	return 0;
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			     void *params)
+{
+	struct tc_red_qopt_offload_params *p = params;
+	u64 backlog;
 
-err_bad_param:
-	err = -EINVAL;
-err_config:
-	mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
-				   mlxsw_sp_qdisc, tclass_num);
-	return err;
+	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+				       mlxsw_sp_qdisc->stats_base.backlog);
+	p->qstats->backlog -= backlog;
 }
 
 static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			      int tclass_num, struct red_stats *res)
+			      void *xstats_ptr)
 {
-	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
 	struct mlxsw_sp_port_xstats *xstats;
-
-	if (mlxsw_sp_qdisc->handle != handle ||
-	    mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
-		return -EOPNOTSUPP;
+	struct red_stats *res = xstats_ptr;
+	int early_drops, marks, pdrops;
 
 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
 
-	res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
-	res->prob_mark = xstats->ecn - xstats_base->prob_mark;
-	res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+	marks = xstats->ecn - xstats_base->prob_mark;
+	pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+	res->pdrop += pdrops;
+	res->prob_drop += early_drops;
+	res->prob_mark += marks;
+
+	xstats_base->pdrop += pdrops;
+	xstats_base->prob_drop += early_drops;
+	xstats_base->prob_mark += marks;
 	return 0;
 }
 
 static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			     int tclass_num,
-			     struct tc_red_qopt_offload_stats *res)
+			     struct tc_qopt_offload_stats *stats_ptr)
 {
-	u64 tx_bytes, tx_packets, overlimits, drops;
+	u64 tx_bytes, tx_packets, overlimits, drops, backlog;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+	struct mlxsw_sp_qdisc_stats *stats_base;
 	struct mlxsw_sp_port_xstats *xstats;
 	struct rtnl_link_stats64 *stats;
 
-	if (mlxsw_sp_qdisc->handle != handle ||
-	    mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
-		return -EOPNOTSUPP;
-
 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+	stats_base = &mlxsw_sp_qdisc->stats_base;
 
-	tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
-	tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+	tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+	tx_packets = stats->tx_packets - stats_base->tx_packets;
 	overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
-		     mlxsw_sp_qdisc->overlimits;
+		     stats_base->overlimits;
 	drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
-		mlxsw_sp_qdisc->drops;
-
-	_bstats_update(res->bstats, tx_bytes, tx_packets);
-	res->qstats->overlimits += overlimits;
-	res->qstats->drops += drops;
-	res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
-						xstats->backlog[tclass_num]);
-
-	mlxsw_sp_qdisc->drops +=  drops;
-	mlxsw_sp_qdisc->overlimits += overlimits;
-	mlxsw_sp_qdisc->tx_bytes += tx_bytes;
-	mlxsw_sp_qdisc->tx_packets += tx_packets;
+		stats_base->drops;
+	backlog = xstats->backlog[tclass_num];
+
+	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+	stats_ptr->qstats->overlimits += overlimits;
+	stats_ptr->qstats->drops += drops;
+	stats_ptr->qstats->backlog +=
+				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+						     backlog) -
+				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+						     stats_base->backlog);
+
+	stats_base->backlog = backlog;
+	stats_base->drops +=  drops;
+	stats_base->overlimits += overlimits;
+	stats_base->tx_bytes += tx_bytes;
+	stats_base->tx_packets += tx_packets;
 	return 0;
 }
 
 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
 
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+	.type = MLXSW_SP_QDISC_RED,
+	.check_params = mlxsw_sp_qdisc_red_check_params,
+	.replace = mlxsw_sp_qdisc_red_replace,
+	.unoffload = mlxsw_sp_qdisc_red_unoffload,
+	.destroy = mlxsw_sp_qdisc_red_destroy,
+	.get_stats = mlxsw_sp_qdisc_get_red_stats,
+	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
 			  struct tc_red_qopt_offload *p)
 {
 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
-	int tclass_num;
 
 	if (p->parent != TC_H_ROOT)
 		return -EOPNOTSUPP;
 
-	mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
-	tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+	mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+	if (p->command == TC_RED_REPLACE)
+		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+					      mlxsw_sp_qdisc,
+					      &mlxsw_sp_qdisc_ops_red,
+					      &p->set);
+
+	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+				    MLXSW_SP_QDISC_RED))
+		return -EOPNOTSUPP;
 
 	switch (p->command) {
-	case TC_RED_REPLACE:
-		return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
-						  mlxsw_sp_qdisc, tclass_num,
-						  &p->set);
 	case TC_RED_DESTROY:
-		return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
-						  mlxsw_sp_qdisc, tclass_num);
+		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
 	case TC_RED_XSTATS:
-		return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
-						     mlxsw_sp_qdisc, tclass_num,
-						     p->xstats);
+		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+						 p->xstats);
 	case TC_RED_STATS:
-		return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
-						    mlxsw_sp_qdisc, tclass_num,
-						    &p->stats);
+		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+						&p->stats);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+	int i;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+					  MLXSW_SP_PORT_DEFAULT_TCLASS);
+
+	return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+				 void *params)
+{
+	struct tc_prio_qopt_offload_params *p = params;
+
+	if (p->bands > IEEE_8021QAZ_MAX_TCS)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			    void *params)
+{
+	struct tc_prio_qopt_offload_params *p = params;
+	int tclass, i;
+	int err;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
+		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			      void *params)
+{
+	struct tc_prio_qopt_offload_params *p = params;
+	u64 backlog;
+
+	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+				       mlxsw_sp_qdisc->stats_base.backlog);
+	p->qstats->backlog -= backlog;
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			      struct tc_qopt_offload_stats *stats_ptr)
+{
+	u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
+	struct mlxsw_sp_qdisc_stats *stats_base;
+	struct mlxsw_sp_port_xstats *xstats;
+	struct rtnl_link_stats64 *stats;
+	int i;
+
+	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+	stats_base = &mlxsw_sp_qdisc->stats_base;
+
+	tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+	tx_packets = stats->tx_packets - stats_base->tx_packets;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		drops += xstats->tail_drop[i];
+		backlog += xstats->backlog[i];
+	}
+	drops = drops - stats_base->drops;
+
+	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+	stats_ptr->qstats->drops += drops;
+	stats_ptr->qstats->backlog +=
+				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+						     backlog) -
+				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+						     stats_base->backlog);
+	stats_base->backlog = backlog;
+	stats_base->drops += drops;
+	stats_base->tx_bytes += tx_bytes;
+	stats_base->tx_packets += tx_packets;
+	return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+	struct mlxsw_sp_qdisc_stats *stats_base;
+	struct mlxsw_sp_port_xstats *xstats;
+	struct rtnl_link_stats64 *stats;
+	int i;
+
+	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+	stats_base = &mlxsw_sp_qdisc->stats_base;
+
+	stats_base->tx_packets = stats->tx_packets;
+	stats_base->tx_bytes = stats->tx_bytes;
+
+	stats_base->drops = 0;
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		stats_base->drops += xstats->tail_drop[i];
+
+	mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+	.type = MLXSW_SP_QDISC_PRIO,
+	.check_params = mlxsw_sp_qdisc_prio_check_params,
+	.replace = mlxsw_sp_qdisc_prio_replace,
+	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
+	.destroy = mlxsw_sp_qdisc_prio_destroy,
+	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
+	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+			   struct tc_prio_qopt_offload *p)
+{
+	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+	if (p->parent != TC_H_ROOT)
+		return -EOPNOTSUPP;
+
+	mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+	if (p->command == TC_PRIO_REPLACE)
+		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+					      mlxsw_sp_qdisc,
+					      &mlxsw_sp_qdisc_ops_prio,
+					      &p->replace_params);
+
+	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+				    MLXSW_SP_QDISC_PRIO))
+		return -EOPNOTSUPP;
+
+	switch (p->command) {
+	case TC_PRIO_DESTROY:
+		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+	case TC_PRIO_STATS:
+		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+						&p->stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+					    GFP_KERNEL);
+	if (!mlxsw_sp_port->root_qdisc)
+		return -ENOMEM;
+
+	mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+	return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7042c855a5d6..f0b25baba09a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -71,6 +71,7 @@
 #include "spectrum_mr_tcam.h"
 #include "spectrum_router.h"
 
+struct mlxsw_sp_fib;
 struct mlxsw_sp_vr;
 struct mlxsw_sp_lpm_tree;
 struct mlxsw_sp_rif_ops;
@@ -84,6 +85,8 @@ struct mlxsw_sp_router {
 	struct rhashtable nexthop_ht;
 	struct list_head nexthop_list;
 	struct {
+		/* One tree for each protocol: IPv4 and IPv6 */
+		struct mlxsw_sp_lpm_tree *proto_trees[2];
 		struct mlxsw_sp_lpm_tree *trees;
 		unsigned int tree_count;
 	} lpm;
@@ -162,6 +165,15 @@ struct mlxsw_sp_rif_ops {
 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
 };
 
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+				  struct mlxsw_sp_lpm_tree *lpm_tree);
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+				     const struct mlxsw_sp_fib *fib,
+				     u8 tree_id);
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+				       const struct mlxsw_sp_fib *fib);
+
 static unsigned int *
 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
 			   enum mlxsw_sp_rif_counter_dir dir)
@@ -349,14 +361,6 @@ mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
 }
 
-static bool
-mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
-	struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
-
-	return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
-}
-
 static void
 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
@@ -398,7 +402,6 @@ enum mlxsw_sp_fib_entry_type {
 };
 
 struct mlxsw_sp_nexthop_group;
-struct mlxsw_sp_fib;
 
 struct mlxsw_sp_fib_node {
 	struct list_head entry_list;
@@ -445,6 +448,7 @@ struct mlxsw_sp_lpm_tree {
 	u8 id; /* tree ID */
 	unsigned int ref_count;
 	enum mlxsw_sp_l3proto proto;
+	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
 	struct mlxsw_sp_prefix_usage prefix_usage;
 };
 
@@ -453,8 +457,6 @@ struct mlxsw_sp_fib {
 	struct list_head node_list;
 	struct mlxsw_sp_vr *vr;
 	struct mlxsw_sp_lpm_tree *lpm_tree;
-	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
-	struct mlxsw_sp_prefix_usage prefix_usage;
 	enum mlxsw_sp_l3proto proto;
 };
 
@@ -469,12 +471,15 @@ struct mlxsw_sp_vr {
 
 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
 
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
+						struct mlxsw_sp_vr *vr,
 						enum mlxsw_sp_l3proto proto)
 {
+	struct mlxsw_sp_lpm_tree *lpm_tree;
 	struct mlxsw_sp_fib *fib;
 	int err;
 
+	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
 	if (!fib)
 		return ERR_PTR(-ENOMEM);
@@ -484,17 +489,26 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
 	INIT_LIST_HEAD(&fib->node_list);
 	fib->proto = proto;
 	fib->vr = vr;
+	fib->lpm_tree = lpm_tree;
+	mlxsw_sp_lpm_tree_hold(lpm_tree);
+	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
+	if (err)
+		goto err_lpm_tree_bind;
 	return fib;
 
+err_lpm_tree_bind:
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
 err_rhashtable_init:
 	kfree(fib);
 	return ERR_PTR(err);
 }
 
-static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
+				 struct mlxsw_sp_fib *fib)
 {
+	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
 	WARN_ON(!list_empty(&fib->node_list));
-	WARN_ON(fib->lpm_tree);
 	rhashtable_destroy(&fib->ht);
 	kfree(fib);
 }
@@ -581,6 +595,9 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
 		goto err_left_struct_set;
 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
 	       sizeof(lpm_tree->prefix_usage));
+	memset(&lpm_tree->prefix_ref_count, 0,
+	       sizeof(lpm_tree->prefix_ref_count));
+	lpm_tree->ref_count = 1;
 	return lpm_tree;
 
 err_left_struct_set:
@@ -607,8 +624,10 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
 		if (lpm_tree->ref_count != 0 &&
 		    lpm_tree->proto == proto &&
 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
-					     prefix_usage))
+					     prefix_usage)) {
+			mlxsw_sp_lpm_tree_hold(lpm_tree);
 			return lpm_tree;
+		}
 	}
 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
 }
@@ -629,9 +648,10 @@ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
 
 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
 {
+	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
 	struct mlxsw_sp_lpm_tree *lpm_tree;
 	u64 max_trees;
-	int i;
+	int err, i;
 
 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
 		return -EIO;
@@ -649,11 +669,42 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
 	}
 
+	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+					 MLXSW_SP_L3_PROTO_IPV4);
+	if (IS_ERR(lpm_tree)) {
+		err = PTR_ERR(lpm_tree);
+		goto err_ipv4_tree_get;
+	}
+	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
+
+	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+					 MLXSW_SP_L3_PROTO_IPV6);
+	if (IS_ERR(lpm_tree)) {
+		err = PTR_ERR(lpm_tree);
+		goto err_ipv6_tree_get;
+	}
+	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
+
 	return 0;
+
+err_ipv6_tree_get:
+	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_ipv4_tree_get:
+	kfree(mlxsw_sp->router->lpm.trees);
+	return err;
 }
 
 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
 {
+	struct mlxsw_sp_lpm_tree *lpm_tree;
+
+	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
 	kfree(mlxsw_sp->router->lpm.trees);
 }
 
@@ -745,10 +796,10 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
 		NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
 		return ERR_PTR(-EBUSY);
 	}
-	vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+	vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
 	if (IS_ERR(vr->fib4))
 		return ERR_CAST(vr->fib4);
-	vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
+	vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
 	if (IS_ERR(vr->fib6)) {
 		err = PTR_ERR(vr->fib6);
 		goto err_fib6_create;
@@ -763,21 +814,22 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
 	return vr;
 
 err_mr_table_create:
-	mlxsw_sp_fib_destroy(vr->fib6);
+	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
 	vr->fib6 = NULL;
 err_fib6_create:
-	mlxsw_sp_fib_destroy(vr->fib4);
+	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
 	vr->fib4 = NULL;
 	return ERR_PTR(err);
 }
 
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+				struct mlxsw_sp_vr *vr)
 {
 	mlxsw_sp_mr_table_destroy(vr->mr4_table);
 	vr->mr4_table = NULL;
-	mlxsw_sp_fib_destroy(vr->fib6);
+	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
 	vr->fib6 = NULL;
-	mlxsw_sp_fib_destroy(vr->fib4);
+	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
 	vr->fib4 = NULL;
 }
 
@@ -793,12 +845,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
 	return vr;
 }
 
-static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
 {
 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
 	    list_empty(&vr->fib6->node_list) &&
 	    mlxsw_sp_mr_table_empty(vr->mr4_table))
-		mlxsw_sp_vr_destroy(vr);
+		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
 }
 
 static bool
@@ -809,7 +861,7 @@ mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
 
 	if (!mlxsw_sp_vr_is_used(vr))
 		return false;
-	if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
+	if (fib->lpm_tree->id == tree_id)
 		return true;
 	return false;
 }
@@ -839,14 +891,13 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
 					 struct mlxsw_sp_fib *fib,
 					 struct mlxsw_sp_lpm_tree *new_tree)
 {
-	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
 	enum mlxsw_sp_l3proto proto = fib->proto;
+	struct mlxsw_sp_lpm_tree *old_tree;
 	u8 old_id, new_id = new_tree->id;
 	struct mlxsw_sp_vr *vr;
 	int i, err;
 
-	if (!old_tree)
-		goto no_replace;
+	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
 	old_id = old_tree->id;
 
 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
@@ -860,6 +911,11 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
 			goto err_tree_replace;
 	}
 
+	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
+	       sizeof(new_tree->prefix_ref_count));
+	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+
 	return 0;
 
 err_tree_replace:
@@ -871,36 +927,6 @@ err_tree_replace:
 					     old_tree);
 	}
 	return err;
-
-no_replace:
-	fib->lpm_tree = new_tree;
-	mlxsw_sp_lpm_tree_hold(new_tree);
-	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
-	if (err) {
-		mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
-		fib->lpm_tree = NULL;
-		return err;
-	}
-	return 0;
-}
-
-static void
-mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
-		      enum mlxsw_sp_l3proto proto,
-		      struct mlxsw_sp_prefix_usage *req_prefix_usage)
-{
-	int i;
-
-	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
-		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
-		struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
-		unsigned char prefix;
-
-		if (!mlxsw_sp_vr_is_used(vr))
-			continue;
-		mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
-			mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
-	}
 }
 
 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -2630,7 +2656,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
 
 static bool
 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
-				    const struct in6_addr *gw, int ifindex)
+				    const struct in6_addr *gw, int ifindex,
+				    int weight)
 {
 	int i;
 
@@ -2638,7 +2665,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
 		const struct mlxsw_sp_nexthop *nh;
 
 		nh = &nh_grp->nexthops[i];
-		if (nh->ifindex == ifindex &&
+		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
 			return true;
 	}
@@ -2657,11 +2684,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
 
 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
 		struct in6_addr *gw;
-		int ifindex;
+		int ifindex, weight;
 
 		ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
+		weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
 		gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
-		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
+		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+							 weight))
 			return false;
 	}
 
@@ -4192,68 +4221,66 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
 }
 
 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
-				      struct mlxsw_sp_fib *fib,
 				      struct mlxsw_sp_fib_node *fib_node)
 {
-	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
+	struct mlxsw_sp_prefix_usage req_prefix_usage;
+	struct mlxsw_sp_fib *fib = fib_node->fib;
 	struct mlxsw_sp_lpm_tree *lpm_tree;
 	int err;
 
-	/* Since the tree is shared between all virtual routers we must
-	 * make sure it contains all the required prefix lengths. This
-	 * can be computed by either adding the new prefix length to the
-	 * existing prefix usage of a bound tree, or by aggregating the
-	 * prefix lengths across all virtual routers and adding the new
-	 * one as well.
-	 */
-	if (fib->lpm_tree)
-		mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
-					  &fib->lpm_tree->prefix_usage);
-	else
-		mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
-	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
+	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+		goto out;
 
+	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
 					 fib->proto);
 	if (IS_ERR(lpm_tree))
 		return PTR_ERR(lpm_tree);
 
-	if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
-		return 0;
-
 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
 	if (err)
-		return err;
+		goto err_lpm_tree_replace;
 
+out:
+	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
 	return 0;
-}
 
-static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
-					 struct mlxsw_sp_fib *fib)
-{
-	if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
-		return;
-	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
-	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
-	fib->lpm_tree = NULL;
+err_lpm_tree_replace:
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+	return err;
 }
 
-static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
+					 struct mlxsw_sp_fib_node *fib_node)
 {
-	unsigned char prefix_len = fib_node->key.prefix_len;
+	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+	struct mlxsw_sp_prefix_usage req_prefix_usage;
 	struct mlxsw_sp_fib *fib = fib_node->fib;
+	int err;
 
-	if (fib->prefix_ref_count[prefix_len]++ == 0)
-		mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
-}
+	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+		return;
+	/* Try to construct a new LPM tree from the current prefix usage
+	 * minus the unused one. If we fail, continue using the old one.
+	 */
+	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
+				    fib_node->key.prefix_len);
+	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+					 fib->proto);
+	if (IS_ERR(lpm_tree))
+		return;
 
-static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
-{
-	unsigned char prefix_len = fib_node->key.prefix_len;
-	struct mlxsw_sp_fib *fib = fib_node->fib;
+	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+	if (err)
+		goto err_lpm_tree_replace;
 
-	if (--fib->prefix_ref_count[prefix_len] == 0)
-		mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+	return;
+
+err_lpm_tree_replace:
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
 }
 
 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
@@ -4267,12 +4294,10 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
 		return err;
 	fib_node->fib = fib;
 
-	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
+	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
 	if (err)
 		goto err_fib_lpm_tree_link;
 
-	mlxsw_sp_fib_node_prefix_inc(fib_node);
-
 	return 0;
 
 err_fib_lpm_tree_link:
@@ -4286,8 +4311,7 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
 {
 	struct mlxsw_sp_fib *fib = fib_node->fib;
 
-	mlxsw_sp_fib_node_prefix_dec(fib_node);
-	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
+	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
 	fib_node->fib = NULL;
 	mlxsw_sp_fib_node_remove(fib, fib_node);
 }
@@ -4326,7 +4350,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
 err_fib_node_init:
 	mlxsw_sp_fib_node_destroy(fib_node);
 err_fib_node_create:
-	mlxsw_sp_vr_put(vr);
+	mlxsw_sp_vr_put(mlxsw_sp, vr);
 	return ERR_PTR(err);
 }
 
@@ -4339,7 +4363,7 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
 		return;
 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
 	mlxsw_sp_fib_node_destroy(fib_node);
-	mlxsw_sp_vr_put(vr);
+	mlxsw_sp_vr_put(mlxsw_sp, vr);
 }
 
 static struct mlxsw_sp_fib4_entry *
@@ -4764,7 +4788,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
 	struct net_device *dev = rt->dst.dev;
 
 	nh->nh_grp = nh_grp;
-	nh->nh_weight = 1;
+	nh->nh_weight = rt->rt6i_nh_weight;
 	memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
 
@@ -5362,7 +5386,7 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
 		return;
 
 	mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
-	mlxsw_sp_vr_put(vr);
+	mlxsw_sp_vr_put(mlxsw_sp, vr);
 }
 
 static int
@@ -5399,7 +5423,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
 		return;
 
 	mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
-	mlxsw_sp_vr_put(vr);
+	mlxsw_sp_vr_put(mlxsw_sp, vr);
 }
 
 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -6048,7 +6072,7 @@ err_fid_get:
 err_rif_alloc:
 err_rif_index_alloc:
 	vr->rif_count--;
-	mlxsw_sp_vr_put(vr);
+	mlxsw_sp_vr_put(mlxsw_sp, vr);
 	return ERR_PTR(err);
 }
 
@@ -6071,7 +6095,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
 		mlxsw_sp_fid_put(fid);
 	kfree(rif);
 	vr->rif_count--;
-	mlxsw_sp_vr_put(vr);
+	mlxsw_sp_vr_put(mlxsw_sp, vr);
 }
 
 static void
@@ -6861,7 +6885,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
 	return 0;
 
 err_loopback_op:
-	mlxsw_sp_vr_put(ul_vr);
+	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
 	return err;
 }
 
@@ -6875,7 +6899,7 @@ static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
 
 	--ul_vr->rif_count;
-	mlxsw_sp_vr_put(ul_vr);
+	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
 }
 
 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
@@ -7010,6 +7034,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
 }
 #endif
 
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+	char rdpm_pl[MLXSW_REG_RDPM_LEN];
+	unsigned int i;
+
+	MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+	/* HW is determining switch priority based on DSCP-bits, but the
+	 * kernel is still doing that based on the ToS. Since there's a
+	 * mismatch in bits we need to make sure to translate the right
+	 * value ToS would observe, skipping the 2 least-significant ECN bits.
+	 */
+	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 {
 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -7022,6 +7064,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 
 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+	mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
 	if (err)
 		return err;
@@ -7097,6 +7140,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 	if (err)
 		goto err_mp_hash_init;
 
+	err = mlxsw_sp_dscp_init(mlxsw_sp);
+	if (err)
+		goto err_dscp_init;
+
 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
 	err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
 				    mlxsw_sp_router_fib_dump_flush);
@@ -7106,6 +7153,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 	return 0;
 
 err_register_fib_notifier:
+err_dscp_init:
 err_mp_hash_init:
 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
 err_register_netevent_notifier:
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 2fe96f1f3fe5..bd6e9014bc74 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -28,6 +28,7 @@
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/of_net.h>
 
 #include "ks8851.h"
 
@@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev)
  * @ks: The device structure
  *
  * Get or create the initial mac address for the device and then set that
- * into the station address register. If there is an EEPROM present, then
+ * into the station address register. A mac address supplied in the device
+ * tree takes precedence. Otherwise, if there is an EEPROM present, then
  * we try that. If no valid mac address is found we use eth_random_addr()
  * to create a new one.
  */
 static void ks8851_init_mac(struct ks8851_net *ks)
 {
 	struct net_device *dev = ks->netdev;
+	const u8 *mac_addr;
+
+	mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+	if (mac_addr) {
+		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+		ks8851_write_mac_addr(dev);
+		return;
+	}
 
-	/* first, try reading what we've got already */
 	if (ks->rc_ccr & CCR_EEPROM) {
 		ks8851_read_mac_addr(dev);
 		if (is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 24c4408b5734..d5866d708dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,6 +22,8 @@ nfp-objs := \
 	    nfp_hwmon.o \
 	    nfp_main.o \
 	    nfp_net_common.o \
+	    nfp_net_ctrl.o \
+	    nfp_net_debugdump.o \
 	    nfp_net_ethtool.o \
 	    nfp_net_main.o \
 	    nfp_net_repr.o \
@@ -43,6 +45,7 @@ endif
 
 ifeq ($(CONFIG_BPF_SYSCALL),y)
 nfp-objs += \
+	    bpf/cmsg.o \
 	    bpf/main.o \
 	    bpf/offload.o \
 	    bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
new file mode 100644
index 000000000000..80d3aa0fc9d3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "fw.h"
+#include "main.h"
+
+#define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
+#define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)
+
+static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+{
+	u16 used_tags;
+
+	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
+
+	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
+}
+
+static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
+{
+	/* All FW communication for BPF is request-reply.  To make sure we
+	 * don't reuse the message ID too early after timeout - limit the
+	 * number of requests in flight.
+	 */
+	if (nfp_bpf_all_tags_busy(bpf)) {
+		cmsg_warn(bpf, "all FW request contexts busy!\n");
+		return -EAGAIN;
+	}
+
+	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
+	return bpf->tag_alloc_next++;
+}
+
+static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
+
+	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
+	       bpf->tag_alloc_last != bpf->tag_alloc_next)
+		bpf->tag_alloc_last++;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+{
+	struct sk_buff *skb;
+
+	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+	skb_put(skb, size);
+
+	return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+	unsigned int size;
+
+	size = sizeof(struct cmsg_req_map_op);
+	size += sizeof(struct cmsg_key_value_pair) * n;
+
+	return nfp_bpf_cmsg_alloc(bpf, size);
+}
+
+static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
+{
+	struct cmsg_hdr *hdr;
+
+	hdr = (struct cmsg_hdr *)skb->data;
+
+	return be16_to_cpu(hdr->tag);
+}
+
+static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+	unsigned int msg_tag;
+	struct sk_buff *skb;
+
+	skb_queue_walk(&bpf->cmsg_replies, skb) {
+		msg_tag = nfp_bpf_cmsg_get_tag(skb);
+		if (msg_tag == tag) {
+			nfp_bpf_free_tag(bpf, tag);
+			__skb_unlink(skb, &bpf->cmsg_replies);
+			return skb;
+		}
+	}
+
+	return NULL;
+}
+
+static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+	struct sk_buff *skb;
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+	skb = __nfp_bpf_reply(bpf, tag);
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	return skb;
+}
+
+static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+	struct sk_buff *skb;
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+	skb = __nfp_bpf_reply(bpf, tag);
+	if (!skb)
+		nfp_bpf_free_tag(bpf, tag);
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
+			int tag)
+{
+	struct sk_buff *skb;
+	int i, err;
+
+	for (i = 0; i < 50; i++) {
+		udelay(4);
+		skb = nfp_bpf_reply(bpf, tag);
+		if (skb)
+			return skb;
+	}
+
+	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
+					       skb = nfp_bpf_reply(bpf, tag),
+					       msecs_to_jiffies(5000));
+	/* We didn't get a response - try last time and atomically drop
+	 * the tag even if no response is matched.
+	 */
+	if (!skb)
+		skb = nfp_bpf_reply_drop_tag(bpf, tag);
+	if (err < 0) {
+		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
+			  err == ERESTARTSYS ? "interrupted" : "error",
+			  type, err);
+		return ERR_PTR(err);
+	}
+	if (!skb) {
+		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
+			  type);
+		return ERR_PTR(-ETIMEDOUT);
+	}
+
+	return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
+			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
+{
+	struct cmsg_hdr *hdr;
+	int tag;
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+	tag = nfp_bpf_alloc_tag(bpf);
+	if (tag < 0) {
+		nfp_ctrl_unlock(bpf->app->ctrl);
+		dev_kfree_skb_any(skb);
+		return ERR_PTR(tag);
+	}
+
+	hdr = (void *)skb->data;
+	hdr->ver = CMSG_MAP_ABI_VERSION;
+	hdr->type = type;
+	hdr->tag = cpu_to_be16(tag);
+
+	__nfp_app_ctrl_tx(bpf->app, skb);
+
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
+	if (IS_ERR(skb))
+		return skb;
+
+	hdr = (struct cmsg_hdr *)skb->data;
+	/* 0 reply_size means caller will do the validation */
+	if (reply_size && skb->len != reply_size) {
+		cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
+			  skb->len, reply_size);
+		goto err_free;
+	}
+	if (hdr->type != __CMSG_REPLY(type)) {
+		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+			  hdr->type, __CMSG_REPLY(type));
+		goto err_free;
+	}
+
+	return skb;
+err_free:
+	dev_kfree_skb_any(skb);
+	return ERR_PTR(-EIO);
+}
+
+static int
+nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
+			 struct cmsg_reply_map_simple *reply)
+{
+	static const int res_table[] = {
+		[CMSG_RC_SUCCESS]	= 0,
+		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
+		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
+		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
+		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
+		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
+		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
+		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
+	};
+	u32 rc;
+
+	rc = be32_to_cpu(reply->rc);
+	if (rc >= ARRAY_SIZE(res_table)) {
+		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
+		return -EIO;
+	}
+
+	return res_table[rc];
+}
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
+{
+	struct cmsg_reply_map_alloc_tbl *reply;
+	struct cmsg_req_map_alloc_tbl *req;
+	struct sk_buff *skb;
+	u32 tid;
+	int err;
+
+	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+	if (!skb)
+		return -ENOMEM;
+
+	req = (void *)skb->data;
+	req->key_size = cpu_to_be32(map->key_size);
+	req->value_size = cpu_to_be32(map->value_size);
+	req->max_entries = cpu_to_be32(map->max_entries);
+	req->map_type = cpu_to_be32(map->map_type);
+	req->map_flags = 0;
+
+	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
+				       sizeof(*reply));
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	reply = (void *)skb->data;
+	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+	if (err)
+		goto err_free;
+
+	tid = be32_to_cpu(reply->tid);
+	dev_consume_skb_any(skb);
+
+	return tid;
+err_free:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
+{
+	struct cmsg_reply_map_free_tbl *reply;
+	struct cmsg_req_map_free_tbl *req;
+	struct sk_buff *skb;
+	int err;
+
+	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+	if (!skb) {
+		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
+		return;
+	}
+
+	req = (void *)skb->data;
+	req->tid = cpu_to_be32(nfp_map->tid);
+
+	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
+				       sizeof(*reply));
+	if (IS_ERR(skb)) {
+		cmsg_warn(bpf, "leaking map - I/O error\n");
+		return;
+	}
+
+	reply = (void *)skb->data;
+	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+	if (err)
+		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
+
+	dev_consume_skb_any(skb);
+}
+
+static int
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
+		      enum nfp_bpf_cmsg_type op,
+		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
+{
+	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+	struct nfp_app_bpf *bpf = nfp_map->bpf;
+	struct bpf_map *map = &offmap->map;
+	struct cmsg_reply_map_op *reply;
+	struct cmsg_req_map_op *req;
+	struct sk_buff *skb;
+	int err;
+
+	/* FW messages have no space for more than 32 bits of flags */
+	if (flags >> 32)
+		return -EOPNOTSUPP;
+
+	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (void *)skb->data;
+	req->tid = cpu_to_be32(nfp_map->tid);
+	req->count = cpu_to_be32(1);
+	req->flags = cpu_to_be32(flags);
+
+	/* Copy inputs */
+	if (key)
+		memcpy(&req->elem[0].key, key, map->key_size);
+	if (value)
+		memcpy(&req->elem[0].value, value, map->value_size);
+
+	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
+				       sizeof(*reply) + sizeof(*reply->elem));
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	reply = (void *)skb->data;
+	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+	if (err)
+		goto err_free;
+
+	/* Copy outputs */
+	if (out_key)
+		memcpy(out_key, &reply->elem[0].key, map->key_size);
+	if (out_value)
+		memcpy(out_value, &reply->elem[0].value, map->value_size);
+
+	dev_consume_skb_any(skb);
+
+	return 0;
+err_free:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value, u64 flags)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+				     key, value, flags, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+				     key, NULL, 0, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+				     key, NULL, 0, NULL, value);
+}
+
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+				void *next_key)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+				     NULL, NULL, 0, next_key, NULL);
+}
+
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+			       void *key, void *next_key)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+				     key, NULL, 0, next_key, NULL);
+}
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+	struct nfp_app_bpf *bpf = app->priv;
+	unsigned int tag;
+
+	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
+		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
+		goto err_free;
+	}
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+
+	tag = nfp_bpf_cmsg_get_tag(skb);
+	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
+		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
+			  tag);
+		goto err_unlock;
+	}
+
+	__skb_queue_tail(&bpf->cmsg_replies, skb);
+	wake_up_interruptible_all(&bpf->cmsg_wq);
+
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	return;
+err_unlock:
+	nfp_ctrl_unlock(bpf->app->ctrl);
+err_free:
+	dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
new file mode 100644
index 000000000000..cfcc7bcb2c67
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_BPF_FW_H
+#define NFP_BPF_FW_H 1
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+enum bpf_cap_tlv_type {
+	NFP_BPF_CAP_TYPE_FUNC		= 1,
+	NFP_BPF_CAP_TYPE_ADJUST_HEAD	= 2,
+	NFP_BPF_CAP_TYPE_MAPS		= 3,
+};
+
+struct nfp_bpf_cap_tlv_func {
+	__le32 func_id;
+	__le32 func_addr;
+};
+
+struct nfp_bpf_cap_tlv_adjust_head {
+	__le32 flags;
+	__le32 off_min;
+	__le32 off_max;
+	__le32 guaranteed_sub;
+	__le32 guaranteed_add;
+};
+
+#define NFP_BPF_ADJUST_HEAD_NO_META	BIT(0)
+
+struct nfp_bpf_cap_tlv_maps {
+	__le32 types;
+	__le32 max_maps;
+	__le32 max_elems;
+	__le32 max_key_sz;
+	__le32 max_val_sz;
+	__le32 max_elem_sz;
+};
+
+/*
+ * Types defined for map related control messages
+ */
+#define CMSG_MAP_ABI_VERSION		1
+
+enum nfp_bpf_cmsg_type {
+	CMSG_TYPE_MAP_ALLOC	= 1,
+	CMSG_TYPE_MAP_FREE	= 2,
+	CMSG_TYPE_MAP_LOOKUP	= 3,
+	CMSG_TYPE_MAP_UPDATE	= 4,
+	CMSG_TYPE_MAP_DELETE	= 5,
+	CMSG_TYPE_MAP_GETNEXT	= 6,
+	CMSG_TYPE_MAP_GETFIRST	= 7,
+	__CMSG_TYPE_MAP_MAX,
+};
+
+#define CMSG_TYPE_MAP_REPLY_BIT		7
+#define __CMSG_REPLY(req)		(BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+
+#define CMSG_MAP_KEY_LW			16
+#define CMSG_MAP_VALUE_LW		16
+
+enum nfp_bpf_cmsg_status {
+	CMSG_RC_SUCCESS			= 0,
+	CMSG_RC_ERR_MAP_FD		= 1,
+	CMSG_RC_ERR_MAP_NOENT		= 2,
+	CMSG_RC_ERR_MAP_ERR		= 3,
+	CMSG_RC_ERR_MAP_PARSE		= 4,
+	CMSG_RC_ERR_MAP_EXIST		= 5,
+	CMSG_RC_ERR_MAP_NOMEM		= 6,
+	CMSG_RC_ERR_MAP_E2BIG		= 7,
+};
+
+struct cmsg_hdr {
+	u8 type;
+	u8 ver;
+	__be16 tag;
+};
+
+struct cmsg_reply_map_simple {
+	struct cmsg_hdr hdr;
+	__be32 rc;
+};
+
+struct cmsg_req_map_alloc_tbl {
+	struct cmsg_hdr hdr;
+	__be32 key_size;		/* in bytes */
+	__be32 value_size;		/* in bytes */
+	__be32 max_entries;
+	__be32 map_type;
+	__be32 map_flags;		/* reserved */
+};
+
+struct cmsg_reply_map_alloc_tbl {
+	struct cmsg_reply_map_simple reply_hdr;
+	__be32 tid;
+};
+
+struct cmsg_req_map_free_tbl {
+	struct cmsg_hdr hdr;
+	__be32 tid;
+};
+
+struct cmsg_reply_map_free_tbl {
+	struct cmsg_reply_map_simple reply_hdr;
+	__be32 count;
+};
+
+struct cmsg_key_value_pair {
+	__be32 key[CMSG_MAP_KEY_LW];
+	__be32 value[CMSG_MAP_VALUE_LW];
+};
+
+struct cmsg_req_map_op {
+	struct cmsg_hdr hdr;
+	__be32 tid;
+	__be32 count;
+	__be32 flags;
+	struct cmsg_key_value_pair elem[0];
+};
+
+struct cmsg_reply_map_op {
+	struct cmsg_reply_map_simple reply_hdr;
+	__be32 count;
+	__be32 resv;
+	struct cmsg_key_value_pair elem[0];
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 995e95410b11..56451edf01c2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -33,6 +33,7 @@
 
 #define pr_fmt(fmt)	"NFP net bpf: " fmt
 
+#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
@@ -66,12 +67,6 @@
 	     next2 = nfp_meta_next(next))
 
 static bool
-nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
-	return meta->l.next != &nfp_prog->insns;
-}
-
-static bool
 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	return meta->l.prev != &nfp_prog->insns;
@@ -90,19 +85,25 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
 
 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
 {
-	return nfp_prog->start_off + nfp_prog->prog_len;
+	return nfp_prog->prog_len;
 }
 
-static unsigned int
-nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+static bool
+nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
 {
-	return offset - nfp_prog->start_off;
+	/* If there is a recorded error we may have dropped instructions;
+	 * that doesn't have to be due to translator bug, and the translation
+	 * will fail anyway, so just return OK.
+	 */
+	if (nfp_prog->error)
+		return true;
+	return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
 }
 
 /* --- Emitters --- */
 static void
 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
-	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir)
 {
 	enum cmd_ctx_swap ctx;
 	u64 insn;
@@ -120,14 +121,15 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
 		FIELD_PREP(OP_CMD_CNT, size) |
 		FIELD_PREP(OP_CMD_SIG, sync) |
 		FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+		FIELD_PREP(OP_CMD_INDIR, indir) |
 		FIELD_PREP(OP_CMD_MODE, mode);
 
 	nfp_prog_push(nfp_prog, insn);
 }
 
 static void
-emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
-	 u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
+emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+	     swreg lreg, swreg rreg, u8 size, bool sync, bool indir)
 {
 	struct nfp_insn_re_regs reg;
 	int err;
@@ -148,7 +150,22 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
 		return;
 	}
 
-	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync,
+		   indir);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+	 swreg lreg, swreg rreg, u8 size, bool sync)
+{
+	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false);
+}
+
+static void
+emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+	       swreg lreg, swreg rreg, u8 size, bool sync)
+{
+	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true);
 }
 
 static void
@@ -172,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
 	nfp_prog_push(nfp_prog, insn);
 }
 
-static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+static void
+emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
+	     enum nfp_relo_type relo)
 {
-	if (defer > 2) {
+	if (mask == BR_UNC && defer > 2) {
 		pr_err("BUG: branch defer out of bounds %d\n", defer);
 		nfp_prog->error = -EFAULT;
 		return;
 	}
-	__emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+
+	__emit_br(nfp_prog, mask,
+		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+		  BR_CSS_NONE, addr, defer);
+
+	nfp_prog->prog[nfp_prog->prog_len - 1] |=
+		FIELD_PREP(OP_RELO_TYPE, relo);
 }
 
 static void
 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
 {
-	__emit_br(nfp_prog, mask,
-		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
-		  BR_CSS_NONE, addr, defer);
+	emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
 }
 
 static void
@@ -230,9 +253,11 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
 		return;
 	}
 
-	__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
-		     invert, shift, reg.wr_both,
-		     reg.dst_lmextn, reg.src_lmextn);
+	/* Use reg.dst when destination is No-Dest. */
+	__emit_immed(nfp_prog,
+		     swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
+		     reg.breg, imm >> 8, width, invert, shift,
+		     reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
 }
 
 static void
@@ -458,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
 	}
 }
 
+static void
+wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
+	       enum nfp_relo_type relo)
+{
+	if (imm > 0xffff) {
+		pr_err("relocation of a large immediate!\n");
+		nfp_prog->error = -EFAULT;
+		return;
+	}
+	emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
+
+	nfp_prog->prog[nfp_prog->prog_len - 1] |=
+		FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
  * If the @imm is small enough encode it directly in operand and return
  * otherwise load @imm to a spare register and return its encoding.
@@ -490,24 +530,179 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
 		emit_nop(nfp_prog);
 }
 
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+	emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+	wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+}
+
+/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
+ * result to @dst from low end.
+ */
 static void
-wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
-	       enum br_special special)
+wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
+		u8 offset)
 {
-	emit_br(nfp_prog, mask, 0, 0);
+	enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
+	u8 mask = (1 << field_len) - 1;
 
-	nfp_prog->prog[nfp_prog->prog_len - 1] |=
-		FIELD_PREP(OP_BR_SPECIAL, special);
+	emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
 }
 
-static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+static void
+addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+	      swreg *rega, swreg *regb)
 {
-	emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+	if (offset == reg_imm(0)) {
+		*rega = reg_a(src_gpr);
+		*regb = reg_b(src_gpr + 1);
+		return;
+	}
+
+	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
+	emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
+		 reg_imm(0));
+	*rega = imm_a(nfp_prog);
+	*regb = imm_b(nfp_prog);
 }
 
-static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+/* NFP has Command Push Pull bus which supports bluk memory operations. */
+static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+	bool descending_seq = meta->ldst_gather_len < 0;
+	s16 len = abs(meta->ldst_gather_len);
+	swreg src_base, off;
+	bool src_40bit_addr;
+	unsigned int i;
+	u8 xfer_num;
+
+	off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+	src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
+	src_base = reg_a(meta->insn.src_reg * 2);
+	xfer_num = round_up(len, 4) / 4;
+
+	if (src_40bit_addr)
+		addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+			      &off);
+
+	/* Setup PREV_ALU fields to override memory read length. */
+	if (len > 32)
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+
+	/* Memory read from source addr into transfer-in registers. */
+	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
+		     src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
+		     src_base, off, xfer_num - 1, true, len > 32);
+
+	/* Move from transfer-in to transfer-out. */
+	for (i = 0; i < xfer_num; i++)
+		wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
+
+	off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
+
+	if (len <= 8) {
+		/* Use single direct_ref write8. */
+		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+			 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
+			 true);
+	} else if (len <= 32 && IS_ALIGNED(len, 4)) {
+		/* Use single direct_ref write32. */
+		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
+			 true);
+	} else if (len <= 32) {
+		/* Use single indirect_ref write8. */
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
+		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+			       reg_a(meta->paired_st->dst_reg * 2), off,
+			       len - 1, true);
+	} else if (IS_ALIGNED(len, 4)) {
+		/* Use single indirect_ref write32. */
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			       reg_a(meta->paired_st->dst_reg * 2), off,
+			       xfer_num - 1, true);
+	} else if (len <= 40) {
+		/* Use one direct_ref write32 to write the first 32-bytes, then
+		 * another direct_ref write8 to write the remaining bytes.
+		 */
+		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			 reg_a(meta->paired_st->dst_reg * 2), off, 7,
+			 true);
+
+		off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
+				      imm_b(nfp_prog));
+		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
+			 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
+			 true);
+	} else {
+		/* Use one indirect_ref write32 to write 4-bytes aligned length,
+		 * then another direct_ref write8 to write the remaining bytes.
+		 */
+		u8 new_off;
+
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
+		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			       reg_a(meta->paired_st->dst_reg * 2), off,
+			       xfer_num - 2, true);
+		new_off = meta->paired_st->off + (xfer_num - 1) * 4;
+		off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
+		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
+			 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
+			 (len & 0x3) - 1, true);
+	}
+
+	/* TODO: The following extra load is to make sure data flow be identical
+	 *  before and after we do memory copy optimization.
+	 *
+	 *  The load destination register is not guaranteed to be dead, so we
+	 *  need to make sure it is loaded with the value the same as before
+	 *  this transformation.
+	 *
+	 *  These extra loads could be removed once we have accurate register
+	 *  usage information.
+	 */
+	if (descending_seq)
+		xfer_num = 0;
+	else if (BPF_SIZE(meta->insn.code) != BPF_DW)
+		xfer_num = xfer_num - 1;
+	else
+		xfer_num = xfer_num - 2;
+
+	switch (BPF_SIZE(meta->insn.code)) {
+	case BPF_B:
+		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+				reg_xfer(xfer_num), 1,
+				IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
+		break;
+	case BPF_H:
+		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+				reg_xfer(xfer_num), 2, (len & 3) ^ 2);
+		break;
+	case BPF_W:
+		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+			reg_xfer(0));
+		break;
+	case BPF_DW:
+		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+			reg_xfer(xfer_num));
+		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
+			reg_xfer(xfer_num + 1));
+		break;
+	}
+
+	if (BPF_SIZE(meta->insn.code) != BPF_DW)
+		wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+	return 0;
 }
 
 static int
@@ -540,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
 }
 
 static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
-		   u8 dst_gpr, int size)
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
+		   swreg lreg, swreg rreg, int size, enum cmd_mode mode)
 {
 	unsigned int i;
 	u8 mask, sz;
 
-	/* We load the value from the address indicated in @offset and then
+	/* We load the value from the address indicated in rreg + lreg and then
 	 * mask out the data we don't need.  Note: this is little endian!
 	 */
 	sz = max(size, 4);
 	mask = size < 4 ? GENMASK(size - 1, 0) : 0;
 
-	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
-		 reg_a(src_gpr), offset, sz / 4 - 1, true);
+	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
+		 lreg, rreg, sz / 4 - 1, true);
 
 	i = 0;
 	if (mask)
@@ -570,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
 }
 
 static int
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+			  u8 dst_gpr, u8 size)
+{
+	return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
+				  size, CMD_MODE_32b);
+}
+
+static int
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+			  u8 dst_gpr, u8 size)
+{
+	swreg rega, regb;
+
+	addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
+
+	return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+				  size, CMD_MODE_40b_BA);
+}
+
+static int
 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
 {
 	swreg tmp_reg;
@@ -583,7 +798,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
 		 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
 	emit_alu(nfp_prog, reg_none(),
 		 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
-	wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
 
 	/* Load data */
 	return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -596,7 +811,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
 	/* Check packet length */
 	tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
 	emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
-	wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
 
 	/* Load data */
 	tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -975,9 +1190,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 {
 	const struct bpf_insn *insn = &meta->insn;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
 			 insn->src_reg * 2, br_mask, insn->off);
 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
@@ -995,9 +1207,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	u8 reg = insn->dst_reg * 2;
 	swreg tmp_reg;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
 	if (!swap)
 		emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
@@ -1027,9 +1236,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	areg = insn->dst_reg * 2;
 	breg = insn->src_reg * 2;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (swap) {
 		areg ^= breg;
 		breg ^= areg;
@@ -1052,6 +1258,136 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
 		      SHF_SC_R_ROT, 16);
 }
 
+static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
+	struct nfp_bpf_cap_adjust_head *adjust_head;
+	u32 ret_einval, end;
+
+	adjust_head = &nfp_prog->bpf->adjust_head;
+
+	/* Optimized version - 5 vs 14 cycles */
+	if (nfp_prog->adjust_head_location != UINT_MAX) {
+		if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
+			return -EINVAL;
+
+		emit_alu(nfp_prog, pptr_reg(nfp_prog),
+			 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
+		emit_alu(nfp_prog, plen_reg(nfp_prog),
+			 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+		emit_alu(nfp_prog, pv_len(nfp_prog),
+			 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+		wrp_immed(nfp_prog, reg_both(0), 0);
+		wrp_immed(nfp_prog, reg_both(1), 0);
+
+		/* TODO: when adjust head is guaranteed to succeed we can
+		 * also eliminate the following if (r0 == 0) branch.
+		 */
+
+		return 0;
+	}
+
+	ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
+	end = ret_einval + 2;
+
+	/* We need to use a temp because offset is just a part of the pkt ptr */
+	emit_alu(nfp_prog, tmp,
+		 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
+
+	/* Validate result will fit within FW datapath constraints */
+	emit_alu(nfp_prog, reg_none(),
+		 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
+	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+	emit_alu(nfp_prog, reg_none(),
+		 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
+	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+
+	/* Validate the length is at least ETH_HLEN */
+	emit_alu(nfp_prog, tmp_len,
+		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+	emit_alu(nfp_prog, reg_none(),
+		 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
+	emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+	/* Load the ret code */
+	wrp_immed(nfp_prog, reg_both(0), 0);
+	wrp_immed(nfp_prog, reg_both(1), 0);
+
+	/* Modify the packet metadata */
+	emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
+
+	/* Skip over the -EINVAL ret code (defer 2) */
+	emit_br(nfp_prog, BR_UNC, end, 2);
+
+	emit_alu(nfp_prog, plen_reg(nfp_prog),
+		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+	emit_alu(nfp_prog, pv_len(nfp_prog),
+		 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+	/* return -EINVAL target */
+	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+		return -EINVAL;
+
+	wrp_immed(nfp_prog, reg_both(0), -22);
+	wrp_immed(nfp_prog, reg_both(1), ~0);
+
+	if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	struct bpf_offloaded_map *offmap;
+	struct nfp_bpf_map *nfp_map;
+	bool load_lm_ptr;
+	u32 ret_tgt;
+	s64 lm_off;
+	swreg tid;
+
+	offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
+	nfp_map = offmap->dev_priv;
+
+	/* We only have to reload LM0 if the key is not at start of stack */
+	lm_off = nfp_prog->stack_depth;
+	lm_off += meta->arg2.var_off.value + meta->arg2.off;
+	load_lm_ptr = meta->arg2_var_off || lm_off;
+
+	/* Set LM0 to start of key */
+	if (load_lm_ptr)
+		emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
+
+	/* Load map ID into a register, it should actually fit as an immediate
+	 * but in case it doesn't deal with it here, not in the delay slots.
+	 */
+	tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
+
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
+		     2, RELO_BR_HELPER);
+	ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+
+	/* Load map ID into A0 */
+	wrp_mov(nfp_prog, reg_a(0), tid);
+
+	/* Load the return address into B0 */
+	wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+		return -EINVAL;
+
+	/* Reset the LM0 pointer */
+	if (!load_lm_ptr)
+		return 0;
+
+	emit_csr_wr(nfp_prog, stack_reg(nfp_prog),  NFP_CSR_ACT_LM_ADDR0);
+	wrp_nops(nfp_prog, 3);
+
+	return 0;
+}
+
 /* --- Callbacks --- */
 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
@@ -1486,14 +1822,29 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
 
-	return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
-				  meta->insn.dst_reg * 2, size);
+	return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+					 tmp_reg, meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+	     unsigned int size)
+{
+	swreg tmp_reg;
+
+	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+	return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+					 tmp_reg, meta->insn.dst_reg * 2, size);
 }
 
 static int
 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	unsigned int size)
 {
+	if (meta->ldst_gather_len)
+		return nfp_cpp_memcpy(nfp_prog, meta);
+
 	if (meta->ptr.type == PTR_TO_CTX) {
 		if (nfp_prog->type == BPF_PROG_TYPE_XDP)
 			return mem_ldx_xdp(nfp_prog, meta, size);
@@ -1508,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 		return mem_ldx_stack(nfp_prog, meta, size,
 				     meta->ptr.off + meta->ptr.var_off.value);
 
+	if (meta->ptr.type == PTR_TO_MAP_VALUE)
+		return mem_ldx_emem(nfp_prog, meta, size);
+
 	return -EOPNOTSUPP;
 }
 
@@ -1630,8 +1984,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	if (meta->insn.off < 0) /* TODO */
-		return -EOPNOTSUPP;
 	emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
 
 	return 0;
@@ -1646,9 +1998,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	or1 = reg_a(insn->dst_reg * 2);
 	or2 = reg_b(insn->dst_reg * 2 + 1);
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (imm & ~0U) {
 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
 		emit_alu(nfp_prog, imm_a(nfp_prog),
@@ -1689,15 +2038,32 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
 }
 
+static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
+}
+
 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	const struct bpf_insn *insn = &meta->insn;
 	u64 imm = insn->imm; /* sign extend */
 	swreg tmp_reg;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (!imm) {
 		meta->skip = true;
 		return 0;
@@ -1726,9 +2092,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	u64 imm = insn->imm; /* sign extend */
 	swreg tmp_reg;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (!imm) {
 		emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
 			 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
@@ -1753,9 +2116,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	const struct bpf_insn *insn = &meta->insn;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
 		 ALU_OP_XOR, reg_b(insn->src_reg * 2));
 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
@@ -1787,6 +2147,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
 }
 
+static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
+}
+
 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -1797,9 +2177,22 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
 }
 
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	switch (meta->insn.imm) {
+	case BPF_FUNC_xdp_adjust_head:
+		return adjust_head(nfp_prog, meta);
+	case BPF_FUNC_map_lookup_elem:
+		return map_lookup_stack(nfp_prog, meta);
+	default:
+		WARN_ONCE(1, "verifier allowed unsupported function\n");
+		return -EOPNOTSUPP;
+	}
+}
+
 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
 
 	return 0;
 }
@@ -1860,6 +2253,10 @@ static const instr_cb_t instr_cb[256] = {
 	[BPF_JMP | BPF_JGE | BPF_K] =	jge_imm,
 	[BPF_JMP | BPF_JLT | BPF_K] =	jlt_imm,
 	[BPF_JMP | BPF_JLE | BPF_K] =	jle_imm,
+	[BPF_JMP | BPF_JSGT | BPF_K] =  jsgt_imm,
+	[BPF_JMP | BPF_JSGE | BPF_K] =  jsge_imm,
+	[BPF_JMP | BPF_JSLT | BPF_K] =  jslt_imm,
+	[BPF_JMP | BPF_JSLE | BPF_K] =  jsle_imm,
 	[BPF_JMP | BPF_JSET | BPF_K] =	jset_imm,
 	[BPF_JMP | BPF_JNE | BPF_K] =	jne_imm,
 	[BPF_JMP | BPF_JEQ | BPF_X] =	jeq_reg,
@@ -1867,99 +2264,64 @@ static const instr_cb_t instr_cb[256] = {
 	[BPF_JMP | BPF_JGE | BPF_X] =	jge_reg,
 	[BPF_JMP | BPF_JLT | BPF_X] =	jlt_reg,
 	[BPF_JMP | BPF_JLE | BPF_X] =	jle_reg,
+	[BPF_JMP | BPF_JSGT | BPF_X] =  jsgt_reg,
+	[BPF_JMP | BPF_JSGE | BPF_X] =  jsge_reg,
+	[BPF_JMP | BPF_JSLT | BPF_X] =  jslt_reg,
+	[BPF_JMP | BPF_JSLE | BPF_X] =  jsle_reg,
 	[BPF_JMP | BPF_JSET | BPF_X] =	jset_reg,
 	[BPF_JMP | BPF_JNE | BPF_X] =	jne_reg,
+	[BPF_JMP | BPF_CALL] =		call,
 	[BPF_JMP | BPF_EXIT] =		goto_out,
 };
 
-/* --- Misc code --- */
-static void br_set_offset(u64 *instr, u16 offset)
-{
-	u16 addr_lo, addr_hi;
-
-	addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
-	addr_hi = offset != addr_lo;
-	*instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
-	*instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
-	*instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
-}
-
 /* --- Assembler logic --- */
 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
 {
-	struct nfp_insn_meta *meta, *next;
-	u32 off, br_idx;
-	u32 idx;
+	struct nfp_insn_meta *meta, *jmp_dst;
+	u32 idx, br_idx;
 
-	nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
 		if (meta->skip)
 			continue;
+		if (meta->insn.code == (BPF_JMP | BPF_CALL))
+			continue;
 		if (BPF_CLASS(meta->insn.code) != BPF_JMP)
 			continue;
 
-		br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+		if (list_is_last(&meta->l, &nfp_prog->insns))
+			br_idx = nfp_prog->last_bpf_off;
+		else
+			br_idx = list_next_entry(meta, l)->off - 1;
+
 		if (!nfp_is_br(nfp_prog->prog[br_idx])) {
 			pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
 			       br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
 			return -ELOOP;
 		}
 		/* Leave special branches for later */
-		if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+		if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+		    RELO_BR_REL)
 			continue;
 
-		/* Find the target offset in assembler realm */
-		off = meta->insn.off;
-		if (!off) {
-			pr_err("Fixup found zero offset!!\n");
+		if (!meta->jmp_dst) {
+			pr_err("Non-exit jump doesn't have destination info recorded!!\n");
 			return -ELOOP;
 		}
 
-		while (off && nfp_meta_has_next(nfp_prog, next)) {
-			next = nfp_meta_next(next);
-			off--;
-		}
-		if (off) {
-			pr_err("Fixup found too large jump!! %d\n", off);
-			return -ELOOP;
-		}
+		jmp_dst = meta->jmp_dst;
 
-		if (next->skip) {
+		if (jmp_dst->skip) {
 			pr_err("Branch landing on removed instruction!!\n");
 			return -ELOOP;
 		}
 
-		for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
-		     idx <= br_idx; idx++) {
+		for (idx = meta->off; idx <= br_idx; idx++) {
 			if (!nfp_is_br(nfp_prog->prog[idx]))
 				continue;
-			br_set_offset(&nfp_prog->prog[idx], next->off);
+			br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
 		}
 	}
 
-	/* Fixup 'goto out's separately, they can be scattered around */
-	for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
-		enum br_special special;
-
-		if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
-			continue;
-
-		special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
-		switch (special) {
-		case OP_BR_NORMAL:
-			break;
-		case OP_BR_GO_OUT:
-			br_set_offset(&nfp_prog->prog[br_idx],
-				      nfp_prog->tgt_out);
-			break;
-		case OP_BR_GO_ABORT:
-			br_set_offset(&nfp_prog->prog[br_idx],
-				      nfp_prog->tgt_abort);
-			break;
-		}
-
-		nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
-	}
-
 	return 0;
 }
 
@@ -1987,7 +2349,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
 	/* Target for aborts */
 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2014,7 +2376,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
 	emit_shf(nfp_prog, reg_b(2),
 		 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	emit_shf(nfp_prog, reg_b(2),
 		 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2033,7 +2395,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
 	/* Target for aborts */
 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2054,7 +2416,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
 	emit_shf(nfp_prog, reg_b(2),
 		 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2105,6 +2467,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
 		nfp_prog->n_translated++;
 	}
 
+	nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
+
 	nfp_outro(nfp_prog);
 	if (nfp_prog->error)
 		return nfp_prog->error;
@@ -2173,6 +2537,9 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
 		if (next.src_reg || next.dst_reg)
 			continue;
 
+		if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
+			continue;
+
 		meta2->skip = true;
 	}
 }
@@ -2209,40 +2576,294 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
 		if (next1.imm != 0x20 || next2.imm != 0x20)
 			continue;
 
+		if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
+		    meta3->flags & FLAG_INSN_IS_JUMP_DST)
+			continue;
+
 		meta2->skip = true;
 		meta3->skip = true;
 	}
 }
 
+/* load/store pair that forms memory copy sould look like the following:
+ *
+ *   ld_width R, [addr_src + offset_src]
+ *   st_width [addr_dest + offset_dest], R
+ *
+ * The destination register of load and source register of store should
+ * be the same, load and store should also perform at the same width.
+ * If either of addr_src or addr_dest is stack pointer, we don't do the
+ * CPP optimization as stack is modelled by registers on NFP.
+ */
+static bool
+curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
+		    struct nfp_insn_meta *st_meta)
+{
+	struct bpf_insn *ld = &ld_meta->insn;
+	struct bpf_insn *st = &st_meta->insn;
+
+	if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
+		return false;
+
+	if (ld_meta->ptr.type != PTR_TO_PACKET)
+		return false;
+
+	if (st_meta->ptr.type != PTR_TO_PACKET)
+		return false;
+
+	if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
+		return false;
+
+	if (ld->dst_reg != st->src_reg)
+		return false;
+
+	/* There is jump to the store insn in this pair. */
+	if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
+		return false;
+
+	return true;
+}
+
+/* Currently, we only support chaining load/store pairs if:
+ *
+ *  - Their address base registers are the same.
+ *  - Their address offsets are in the same order.
+ *  - They operate at the same memory width.
+ *  - There is no jump into the middle of them.
+ */
+static bool
+curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
+			      struct nfp_insn_meta *st_meta,
+			      struct bpf_insn *prev_ld,
+			      struct bpf_insn *prev_st)
+{
+	u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
+	struct bpf_insn *ld = &ld_meta->insn;
+	struct bpf_insn *st = &st_meta->insn;
+	s16 prev_ld_off, prev_st_off;
+
+	/* This pair is the start pair. */
+	if (!prev_ld)
+		return true;
+
+	prev_size = BPF_LDST_BYTES(prev_ld);
+	curr_size = BPF_LDST_BYTES(ld);
+	prev_ld_base = prev_ld->src_reg;
+	prev_st_base = prev_st->dst_reg;
+	prev_ld_dst = prev_ld->dst_reg;
+	prev_ld_off = prev_ld->off;
+	prev_st_off = prev_st->off;
+
+	if (ld->dst_reg != prev_ld_dst)
+		return false;
+
+	if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
+		return false;
+
+	if (curr_size != prev_size)
+		return false;
+
+	/* There is jump to the head of this pair. */
+	if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
+		return false;
+
+	/* Both in ascending order. */
+	if (prev_ld_off + prev_size == ld->off &&
+	    prev_st_off + prev_size == st->off)
+		return true;
+
+	/* Both in descending order. */
+	if (ld->off + curr_size == prev_ld_off &&
+	    st->off + curr_size == prev_st_off)
+		return true;
+
+	return false;
+}
+
+/* Return TRUE if cross memory access happens. Cross memory access means
+ * store area is overlapping with load area that a later load might load
+ * the value from previous store, for this case we can't treat the sequence
+ * as an memory copy.
+ */
+static bool
+cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
+		 struct nfp_insn_meta *head_st_meta)
+{
+	s16 head_ld_off, head_st_off, ld_off;
+
+	/* Different pointer types does not overlap. */
+	if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
+		return false;
+
+	/* load and store are both PTR_TO_PACKET, check ID info.  */
+	if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
+		return true;
+
+	/* Canonicalize the offsets. Turn all of them against the original
+	 * base register.
+	 */
+	head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
+	head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
+	ld_off = ld->off + head_ld_meta->ptr.off;
+
+	/* Ascending order cross. */
+	if (ld_off > head_ld_off &&
+	    head_ld_off < head_st_off && ld_off >= head_st_off)
+		return true;
+
+	/* Descending order cross. */
+	if (ld_off < head_ld_off &&
+	    head_ld_off > head_st_off && ld_off <= head_st_off)
+		return true;
+
+	return false;
+}
+
+/* This pass try to identify the following instructoin sequences.
+ *
+ *   load R, [regA + offA]
+ *   store [regB + offB], R
+ *   load R, [regA + offA + const_imm_A]
+ *   store [regB + offB + const_imm_A], R
+ *   load R, [regA + offA + 2 * const_imm_A]
+ *   store [regB + offB + 2 * const_imm_A], R
+ *   ...
+ *
+ * Above sequence is typically generated by compiler when lowering
+ * memcpy. NFP prefer using CPP instructions to accelerate it.
+ */
+static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
+{
+	struct nfp_insn_meta *head_ld_meta = NULL;
+	struct nfp_insn_meta *head_st_meta = NULL;
+	struct nfp_insn_meta *meta1, *meta2;
+	struct bpf_insn *prev_ld = NULL;
+	struct bpf_insn *prev_st = NULL;
+	u8 count = 0;
+
+	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+		struct bpf_insn *ld = &meta1->insn;
+		struct bpf_insn *st = &meta2->insn;
+
+		/* Reset record status if any of the following if true:
+		 *   - The current insn pair is not load/store.
+		 *   - The load/store pair doesn't chain with previous one.
+		 *   - The chained load/store pair crossed with previous pair.
+		 *   - The chained load/store pair has a total size of memory
+		 *     copy beyond 128 bytes which is the maximum length a
+		 *     single NFP CPP command can transfer.
+		 */
+		if (!curr_pair_is_memcpy(meta1, meta2) ||
+		    !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
+						   prev_st) ||
+		    (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
+						       head_st_meta) ||
+				      head_ld_meta->ldst_gather_len >= 128))) {
+			if (!count)
+				continue;
+
+			if (count > 1) {
+				s16 prev_ld_off = prev_ld->off;
+				s16 prev_st_off = prev_st->off;
+				s16 head_ld_off = head_ld_meta->insn.off;
+
+				if (prev_ld_off < head_ld_off) {
+					head_ld_meta->insn.off = prev_ld_off;
+					head_st_meta->insn.off = prev_st_off;
+					head_ld_meta->ldst_gather_len =
+						-head_ld_meta->ldst_gather_len;
+				}
+
+				head_ld_meta->paired_st = &head_st_meta->insn;
+				head_st_meta->skip = true;
+			} else {
+				head_ld_meta->ldst_gather_len = 0;
+			}
+
+			/* If the chain is ended by an load/store pair then this
+			 * could serve as the new head of the the next chain.
+			 */
+			if (curr_pair_is_memcpy(meta1, meta2)) {
+				head_ld_meta = meta1;
+				head_st_meta = meta2;
+				head_ld_meta->ldst_gather_len =
+					BPF_LDST_BYTES(ld);
+				meta1 = nfp_meta_next(meta1);
+				meta2 = nfp_meta_next(meta2);
+				prev_ld = ld;
+				prev_st = st;
+				count = 1;
+			} else {
+				head_ld_meta = NULL;
+				head_st_meta = NULL;
+				prev_ld = NULL;
+				prev_st = NULL;
+				count = 0;
+			}
+
+			continue;
+		}
+
+		if (!head_ld_meta) {
+			head_ld_meta = meta1;
+			head_st_meta = meta2;
+		} else {
+			meta1->skip = true;
+			meta2->skip = true;
+		}
+
+		head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
+		meta1 = nfp_meta_next(meta1);
+		meta2 = nfp_meta_next(meta2);
+		prev_ld = ld;
+		prev_st = st;
+		count++;
+	}
+}
+
 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
 {
 	nfp_bpf_opt_reg_init(nfp_prog);
 
 	nfp_bpf_opt_ld_mask(nfp_prog);
 	nfp_bpf_opt_ld_shift(nfp_prog);
+	nfp_bpf_opt_ldst_gather(nfp_prog);
 
 	return 0;
 }
 
-static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
 {
+	__le64 *ustore = (__force __le64 *)prog;
 	int i;
 
-	for (i = 0; i < nfp_prog->prog_len; i++) {
+	for (i = 0; i < len; i++) {
 		int err;
 
-		err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+		err = nfp_ustore_check_valid_no_ecc(prog[i]);
 		if (err)
 			return err;
 
-		nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
-
-		ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+		ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
 	}
 
 	return 0;
 }
 
+static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
+{
+	void *prog;
+
+	prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
+	if (!prog)
+		return;
+
+	nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
+	memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
+	kvfree(nfp_prog->prog);
+	nfp_prog->prog = prog;
+}
+
 int nfp_bpf_jit(struct nfp_prog *nfp_prog)
 {
 	int ret;
@@ -2258,5 +2879,102 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
 		return -EINVAL;
 	}
 
-	return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
+	nfp_bpf_prog_trim(nfp_prog);
+
+	return ret;
+}
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+	struct nfp_insn_meta *meta;
+
+	/* Another pass to record jump information. */
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
+		u64 code = meta->insn.code;
+
+		if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
+		    BPF_OP(code) != BPF_CALL) {
+			struct nfp_insn_meta *dst_meta;
+			unsigned short dst_indx;
+
+			dst_indx = meta->n + 1 + meta->insn.off;
+			dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
+						     cnt);
+
+			meta->jmp_dst = dst_meta;
+			dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+		}
+	}
+}
+
+bool nfp_bpf_supported_opcode(u8 code)
+{
+	return !!instr_cb[code];
+}
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
+{
+	unsigned int i;
+	u64 *prog;
+	int err;
+
+	prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
+		       GFP_KERNEL);
+	if (!prog)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < nfp_prog->prog_len; i++) {
+		enum nfp_relo_type special;
+		u32 val;
+
+		special = FIELD_GET(OP_RELO_TYPE, prog[i]);
+		switch (special) {
+		case RELO_NONE:
+			continue;
+		case RELO_BR_REL:
+			br_add_offset(&prog[i], bv->start_off);
+			break;
+		case RELO_BR_GO_OUT:
+			br_set_offset(&prog[i],
+				      nfp_prog->tgt_out + bv->start_off);
+			break;
+		case RELO_BR_GO_ABORT:
+			br_set_offset(&prog[i],
+				      nfp_prog->tgt_abort + bv->start_off);
+			break;
+		case RELO_BR_NEXT_PKT:
+			br_set_offset(&prog[i], bv->tgt_done);
+			break;
+		case RELO_BR_HELPER:
+			val = br_get_offset(prog[i]);
+			val -= BR_OFF_RELO;
+			switch (val) {
+			case BPF_FUNC_map_lookup_elem:
+				val = nfp_prog->bpf->helpers.map_lookup;
+				break;
+			default:
+				pr_err("relocation of unknown helper %d\n",
+				       val);
+				err = -EINVAL;
+				goto err_free_prog;
+			}
+			br_set_offset(&prog[i], val);
+			break;
+		case RELO_IMMED_REL:
+			immed_add_value(&prog[i], bv->start_off);
+			break;
+		}
+
+		prog[i] &= ~OP_RELO_TYPE;
+	}
+
+	err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
+	if (err)
+		goto err_free_prog;
+
+	return prog;
+
+err_free_prog:
+	kfree(prog);
+	return ERR_PTR(err);
 }
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 13190aa09faf..322027792fe8 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -34,10 +34,12 @@
 #include <net/pkt_cls.h>
 
 #include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
 #include "../nfp_app.h"
 #include "../nfp_main.h"
 #include "../nfp_net.h"
 #include "../nfp_port.h"
+#include "fw.h"
 #include "main.h"
 
 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -52,7 +54,7 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn)
 
 static int
 nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog)
+		    struct bpf_prog *prog, struct netlink_ext_ack *extack)
 {
 	bool running, xdp_running;
 	int ret;
@@ -68,10 +70,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 	if (prog && running && !xdp_running)
 		return -EBUSY;
 
-	ret = nfp_net_bpf_offload(nn, prog, running);
+	ret = nfp_net_bpf_offload(nn, prog, running, extack);
 	/* Stop offload if replace not possible */
 	if (ret && prog)
-		nfp_bpf_xdp_offload(app, nn, NULL);
+		nfp_bpf_xdp_offload(app, nn, NULL, extack);
 
 	nn->dp.bpf_offload_xdp = prog && !ret;
 	return ret;
@@ -85,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
 static int
 nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
 {
+	struct nfp_bpf_vnic *bv;
 	int err;
 
-	nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
-	if (!nn->app_priv)
+	bv = kzalloc(sizeof(*bv), GFP_KERNEL);
+	if (!bv)
 		return -ENOMEM;
+	nn->app_priv = bv;
 
 	err = nfp_app_nic_vnic_alloc(app, nn, id);
 	if (err)
 		goto err_free_priv;
 
+	bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+	bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
 	return 0;
 err_free_priv:
 	kfree(nn->app_priv);
@@ -105,8 +112,6 @@ static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
 {
 	struct nfp_bpf_vnic *bv = nn->app_priv;
 
-	if (nn->dp.bpf_offload_xdp)
-		nfp_bpf_xdp_offload(app, nn, NULL);
 	WARN_ON(bv->tc_prog);
 	kfree(bv);
 }
@@ -120,17 +125,29 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 	struct nfp_bpf_vnic *bv;
 	int err;
 
-	if (type != TC_SETUP_CLSBPF ||
-	    !tc_can_offload(nn->dp.netdev) ||
-	    !nfp_net_ebpf_capable(nn) ||
-	    cls_bpf->common.protocol != htons(ETH_P_ALL) ||
-	    cls_bpf->common.chain_index)
+	if (type != TC_SETUP_CLSBPF) {
+		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+				   "only offload of BPF classifiers supported");
+		return -EOPNOTSUPP;
+	}
+	if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
+		return -EOPNOTSUPP;
+	if (!nfp_net_ebpf_capable(nn)) {
+		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+				   "NFP firmware does not support eBPF offload");
 		return -EOPNOTSUPP;
+	}
+	if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+				   "only ETH_P_ALL supported as filter protocol");
+		return -EOPNOTSUPP;
+	}
 
 	/* Only support TC direct action */
 	if (!cls_bpf->exts_integrated ||
 	    tcf_exts_has_actions(cls_bpf->exts)) {
-		nn_err(nn, "only direct action with no legacy actions supported\n");
+		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+				   "only direct action with no legacy actions supported");
 		return -EOPNOTSUPP;
 	}
 
@@ -147,7 +164,8 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 			return 0;
 	}
 
-	err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+	err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
+				  cls_bpf->common.extack);
 	if (err)
 		return err;
 
@@ -191,23 +209,215 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
 
 static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
 {
-	return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+	struct nfp_bpf_vnic *bv = nn->app_priv;
+
+	return !!bv->tc_prog;
+}
+
+static int
+nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+	struct nfp_net *nn = netdev_priv(netdev);
+	unsigned int max_mtu;
+
+	if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+		return 0;
+
+	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+	if (new_mtu > max_mtu) {
+		nn_info(nn, "BPF offload active, MTU over %u not supported\n",
+			max_mtu);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int
+nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
+			      u32 length)
+{
+	struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
+	struct nfp_cpp *cpp = bpf->app->pf->cpp;
+
+	if (length < sizeof(*cap)) {
+		nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
+		return -EINVAL;
+	}
+
+	bpf->adjust_head.flags = readl(&cap->flags);
+	bpf->adjust_head.off_min = readl(&cap->off_min);
+	bpf->adjust_head.off_max = readl(&cap->off_max);
+	bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
+	bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
+
+	if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
+		nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
+		return -EINVAL;
+	}
+	if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
+	    !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
+		nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
+		memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
+		return 0;
+	}
+
+	return 0;
+}
+
+static int
+nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+	struct nfp_bpf_cap_tlv_func __iomem *cap = value;
+
+	if (length < sizeof(*cap)) {
+		nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
+		return -EINVAL;
+	}
+
+	switch (readl(&cap->func_id)) {
+	case BPF_FUNC_map_lookup_elem:
+		bpf->helpers.map_lookup = readl(&cap->func_addr);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+	struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
+
+	if (length < sizeof(*cap)) {
+		nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
+		return -EINVAL;
+	}
+
+	bpf->maps.types = readl(&cap->types);
+	bpf->maps.max_maps = readl(&cap->max_maps);
+	bpf->maps.max_elems = readl(&cap->max_elems);
+	bpf->maps.max_key_sz = readl(&cap->max_key_sz);
+	bpf->maps.max_val_sz = readl(&cap->max_val_sz);
+	bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
+
+	return 0;
+}
+
+static int nfp_bpf_parse_capabilities(struct nfp_app *app)
+{
+	struct nfp_cpp *cpp = app->pf->cpp;
+	struct nfp_cpp_area *area;
+	u8 __iomem *mem, *start;
+
+	mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
+			    8, &area);
+	if (IS_ERR(mem))
+		return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
+
+	start = mem;
+	while (mem - start + 8 < nfp_cpp_area_size(area)) {
+		u8 __iomem *value;
+		u32 type, length;
+
+		type = readl(mem);
+		length = readl(mem + 4);
+		value = mem + 8;
+
+		mem += 8 + length;
+		if (mem - start > nfp_cpp_area_size(area))
+			goto err_release_free;
+
+		switch (type) {
+		case NFP_BPF_CAP_TYPE_FUNC:
+			if (nfp_bpf_parse_cap_func(app->priv, value, length))
+				goto err_release_free;
+			break;
+		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
+			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
+							  length))
+				goto err_release_free;
+			break;
+		case NFP_BPF_CAP_TYPE_MAPS:
+			if (nfp_bpf_parse_cap_maps(app->priv, value, length))
+				goto err_release_free;
+			break;
+		default:
+			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
+			break;
+		}
+	}
+	if (mem - start != nfp_cpp_area_size(area)) {
+		nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
+			mem - start, nfp_cpp_area_size(area));
+		goto err_release_free;
+	}
+
+	nfp_cpp_area_release_free(area);
+
+	return 0;
+
+err_release_free:
+	nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
+	nfp_cpp_area_release_free(area);
+	return -EINVAL;
+}
+
+static int nfp_bpf_init(struct nfp_app *app)
+{
+	struct nfp_app_bpf *bpf;
+	int err;
+
+	bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
+	if (!bpf)
+		return -ENOMEM;
+	bpf->app = app;
+	app->priv = bpf;
+
+	skb_queue_head_init(&bpf->cmsg_replies);
+	init_waitqueue_head(&bpf->cmsg_wq);
+	INIT_LIST_HEAD(&bpf->map_list);
+
+	err = nfp_bpf_parse_capabilities(app);
+	if (err)
+		goto err_free_bpf;
+
+	return 0;
+
+err_free_bpf:
+	kfree(bpf);
+	return err;
+}
+
+static void nfp_bpf_clean(struct nfp_app *app)
+{
+	struct nfp_app_bpf *bpf = app->priv;
+
+	WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+	WARN_ON(!list_empty(&bpf->map_list));
+	WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+	kfree(bpf);
 }
 
 const struct nfp_app_type app_bpf = {
 	.id		= NFP_APP_BPF_NIC,
 	.name		= "ebpf",
 
+	.ctrl_cap_mask	= 0,
+
+	.init		= nfp_bpf_init,
+	.clean		= nfp_bpf_clean,
+
+	.change_mtu	= nfp_bpf_change_mtu,
+
 	.extra_cap	= nfp_bpf_extra_cap,
 
 	.vnic_alloc	= nfp_bpf_vnic_alloc,
 	.vnic_free	= nfp_bpf_vnic_free,
 
+	.ctrl_msg_rx	= nfp_bpf_ctrl_msg_rx,
+
 	.setup_tc	= nfp_bpf_setup_tc,
 	.tc_busy	= nfp_bpf_tc_busy,
+	.bpf		= nfp_ndo_bpf,
 	.xdp_offload	= nfp_bpf_xdp_offload,
-
-	.bpf_verifier_prep	= nfp_bpf_verifier_prep,
-	.bpf_translate		= nfp_bpf_translate,
-	.bpf_destroy		= nfp_bpf_destroy,
 };
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 57b6043177a3..424fe8338105 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -37,22 +37,40 @@
 #include <linux/bitfield.h>
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/skbuff.h>
 #include <linux/types.h>
+#include <linux/wait.h>
 
 #include "../nfp_asm.h"
+#include "fw.h"
 
-/* For branch fixup logic use up-most byte of branch instruction as scratch
+/* For relocation logic use up-most byte of branch instruction as scratch
  * area.  Remember to clear this before sending instructions to HW!
  */
-#define OP_BR_SPECIAL	0xff00000000000000ULL
-
-enum br_special {
-	OP_BR_NORMAL = 0,
-	OP_BR_GO_OUT,
-	OP_BR_GO_ABORT,
+#define OP_RELO_TYPE	0xff00000000000000ULL
+
+enum nfp_relo_type {
+	RELO_NONE = 0,
+	/* standard internal jumps */
+	RELO_BR_REL,
+	/* internal jumps to parts of the outro */
+	RELO_BR_GO_OUT,
+	RELO_BR_GO_ABORT,
+	/* external jumps to fixed addresses */
+	RELO_BR_NEXT_PKT,
+	RELO_BR_HELPER,
+	/* immediate relocation against load address */
+	RELO_IMMED_REL,
 };
 
+/* To make absolute relocated branches (branches other than RELO_BR_REL)
+ * distinguishable in user space dumps from normal jumps, add a large offset
+ * to them.
+ */
+#define BR_OFF_RELO		15000
+
 enum static_regs {
 	STATIC_REG_IMM		= 21, /* Bank AB */
 	STATIC_REG_STACK	= 22, /* Bank A */
@@ -78,6 +96,89 @@ enum pkt_vec {
 #define NFP_BPF_ABI_FLAGS	reg_imm(0)
 #define   NFP_BPF_ABI_FLAG_MARK	1
 
+/**
+ * struct nfp_app_bpf - bpf app priv structure
+ * @app:		backpointer to the app
+ *
+ * @tag_allocator:	bitmap of control message tags in use
+ * @tag_alloc_next:	next tag bit to allocate
+ * @tag_alloc_last:	next tag bit to be freed
+ *
+ * @cmsg_replies:	received cmsg replies waiting to be consumed
+ * @cmsg_wq:		work queue for waiting for cmsg replies
+ *
+ * @map_list:		list of offloaded maps
+ * @maps_in_use:	number of currently offloaded maps
+ * @map_elems_in_use:	number of elements allocated to offloaded maps
+ *
+ * @adjust_head:	adjust head capability
+ * @flags:		extra flags for adjust head
+ * @off_min:		minimal packet offset within buffer required
+ * @off_max:		maximum packet offset within buffer required
+ * @guaranteed_sub:	amount of negative adjustment guaranteed possible
+ * @guaranteed_add:	amount of positive adjustment guaranteed possible
+ *
+ * @maps:		map capability
+ * @types:		supported map types
+ * @max_maps:		max number of maps supported
+ * @max_elems:		max number of entries in each map
+ * @max_key_sz:		max size of map key
+ * @max_val_sz:		max size of map value
+ * @max_elem_sz:	max size of map entry (key + value)
+ *
+ * @helpers:		helper addressess for various calls
+ * @map_lookup:		map lookup helper address
+ */
+struct nfp_app_bpf {
+	struct nfp_app *app;
+
+	DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+	u16 tag_alloc_next;
+	u16 tag_alloc_last;
+
+	struct sk_buff_head cmsg_replies;
+	struct wait_queue_head cmsg_wq;
+
+	struct list_head map_list;
+	unsigned int maps_in_use;
+	unsigned int map_elems_in_use;
+
+	struct nfp_bpf_cap_adjust_head {
+		u32 flags;
+		int off_min;
+		int off_max;
+		int guaranteed_sub;
+		int guaranteed_add;
+	} adjust_head;
+
+	struct {
+		u32 types;
+		u32 max_maps;
+		u32 max_elems;
+		u32 max_key_sz;
+		u32 max_val_sz;
+		u32 max_elem_sz;
+	} maps;
+
+	struct {
+		u32 map_lookup;
+	} helpers;
+};
+
+/**
+ * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
+ * @offmap:	pointer to the offloaded BPF map
+ * @bpf:	back pointer to bpf app private structure
+ * @tid:	table id identifying map on datapath
+ * @l:		link on the nfp_app_bpf->map_list list
+ */
+struct nfp_bpf_map {
+	struct bpf_offloaded_map *offmap;
+	struct nfp_app_bpf *bpf;
+	u32 tid;
+	struct list_head l;
+};
+
 struct nfp_prog;
 struct nfp_insn_meta;
 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -89,23 +190,47 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
 #define nfp_meta_next(meta)	list_next_entry(meta, l)
 #define nfp_meta_prev(meta)	list_prev_entry(meta, l)
 
+#define FLAG_INSN_IS_JUMP_DST	BIT(0)
+
 /**
  * struct nfp_insn_meta - BPF instruction wrapper
  * @insn: BPF instruction
  * @ptr: pointer type for memory operations
+ * @ldst_gather_len: memcpy length gathered from load/store sequence
+ * @paired_st: the paired store insn at the head of the sequence
  * @ptr_not_const: pointer is not always constant
+ * @jmp_dst: destination info for jump instructions
+ * @func_id: function id for call instructions
+ * @arg1: arg1 for call instructions
+ * @arg2: arg2 for call instructions
+ * @arg2_var_off: arg2 changes stack offset on different paths
  * @off: index of first generated machine instruction (in nfp_prog.prog)
  * @n: eBPF instruction number
+ * @flags: eBPF instruction extra optimization flags
  * @skip: skip this instruction (optimized out)
  * @double_cb: callback for second part of the instruction
  * @l: link on nfp_prog->insns list
  */
 struct nfp_insn_meta {
 	struct bpf_insn insn;
-	struct bpf_reg_state ptr;
-	bool ptr_not_const;
+	union {
+		struct {
+			struct bpf_reg_state ptr;
+			struct bpf_insn *paired_st;
+			s16 ldst_gather_len;
+			bool ptr_not_const;
+		};
+		struct nfp_insn_meta *jmp_dst;
+		struct {
+			u32 func_id;
+			struct bpf_reg_state arg1;
+			struct bpf_reg_state arg2;
+			bool arg2_var_off;
+		};
+	};
 	unsigned int off;
 	unsigned short n;
+	unsigned short flags;
 	bool skip;
 	instr_cb_t double_cb;
 
@@ -134,23 +259,36 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
 	return BPF_MODE(meta->insn.code);
 }
 
+static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
+{
+	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
+}
+
+static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
+{
+	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
+}
+
 /**
  * struct nfp_prog - nfp BPF program
+ * @bpf: backpointer to the bpf app priv structure
  * @prog: machine code
  * @prog_len: number of valid instructions in @prog array
  * @__prog_alloc_len: alloc size of @prog array
  * @verifier_meta: temporary storage for verifier's insn meta
  * @type: BPF program type
- * @start_off: address of the first instruction in the memory
+ * @last_bpf_off: address of the last instruction translated from BPF
  * @tgt_out: jump target for normal exit
  * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
- * @tgt_done: jump target to get the next packet
  * @n_translated: number of successfully translated instructions (for errors)
  * @error: error code if something went wrong
  * @stack_depth: max stack depth from the verifier
+ * @adjust_head_location: if program has single adjust head call - the insn no.
  * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
  */
 struct nfp_prog {
+	struct nfp_app_bpf *bpf;
+
 	u64 *prog;
 	unsigned int prog_len;
 	unsigned int __prog_alloc_len;
@@ -159,15 +297,15 @@ struct nfp_prog {
 
 	enum bpf_prog_type type;
 
-	unsigned int start_off;
+	unsigned int last_bpf_off;
 	unsigned int tgt_out;
 	unsigned int tgt_abort;
-	unsigned int tgt_done;
 
 	unsigned int n_translated;
 	int error;
 
 	unsigned int stack_depth;
+	unsigned int adjust_head_location;
 
 	struct list_head insns;
 };
@@ -175,26 +313,49 @@ struct nfp_prog {
 /**
  * struct nfp_bpf_vnic - per-vNIC BPF priv structure
  * @tc_prog:	currently loaded cls_bpf program
+ * @start_off:	address of the first instruction in the memory
+ * @tgt_done:	jump target to get the next packet
  */
 struct nfp_bpf_vnic {
 	struct bpf_prog *tc_prog;
+	unsigned int start_off;
+	unsigned int tgt_done;
 };
 
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
 int nfp_bpf_jit(struct nfp_prog *prog);
+bool nfp_bpf_supported_opcode(u8 code);
 
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
 
 struct netdev_bpf;
 struct nfp_app;
 struct nfp_net;
 
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
+		struct netdev_bpf *bpf);
 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
-			bool old_prog);
-
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
-			  struct netdev_bpf *bpf);
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
-		      struct bpf_prog *prog);
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog);
+			bool old_prog, struct netlink_ext_ack *extack);
+
+struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+		  unsigned int insn_idx, unsigned int n_insns);
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
+void
+nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+				void *next_key);
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value, u64 flags);
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value);
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+			       void *key, void *next_key);
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index bc879aeb62d4..0a7732385469 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -36,18 +36,23 @@
  * Netronome network device driver: TC offload functions for PF and VF
  */
 
+#define pr_fmt(fmt)	"NFP net bpf: " fmt
+
+#include <linux/bpf.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/jiffies.h>
 #include <linux/timer.h>
 #include <linux/list.h>
+#include <linux/mm.h>
 
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
 #include <net/tc_act/tc_mirred.h>
 
 #include "main.h"
+#include "../nfp_app.h"
 #include "../nfp_net_ctrl.h"
 #include "../nfp_net.h"
 
@@ -55,11 +60,10 @@ static int
 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
 		 unsigned int cnt)
 {
+	struct nfp_insn_meta *meta;
 	unsigned int i;
 
 	for (i = 0; i < cnt; i++) {
-		struct nfp_insn_meta *meta;
-
 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
 		if (!meta)
 			return -ENOMEM;
@@ -70,6 +74,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
 		list_add_tail(&meta->l, &nfp_prog->insns);
 	}
 
+	nfp_bpf_jit_prepare(nfp_prog, cnt);
+
 	return 0;
 }
 
@@ -84,8 +90,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
 	kfree(nfp_prog);
 }
 
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
-			  struct netdev_bpf *bpf)
+static int
+nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+		      struct netdev_bpf *bpf)
 {
 	struct bpf_prog *prog = bpf->verifier.prog;
 	struct nfp_prog *nfp_prog;
@@ -98,6 +105,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
 
 	INIT_LIST_HEAD(&nfp_prog->insns);
 	nfp_prog->type = prog->type;
+	nfp_prog->bpf = app->priv;
 
 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
 	if (ret)
@@ -114,12 +122,12 @@ err_free:
 	return ret;
 }
 
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
-		      struct bpf_prog *prog)
+static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
 {
 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 	unsigned int stack_size;
 	unsigned int max_instr;
+	int err;
 
 	stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
 	if (prog->aux->stack_depth > stack_size) {
@@ -127,50 +135,179 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
 			prog->aux->stack_depth, stack_size);
 		return -EOPNOTSUPP;
 	}
-
-	nfp_prog->stack_depth = prog->aux->stack_depth;
-	nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
-	nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+	nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
 
 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
 
-	nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+	nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
 	if (!nfp_prog->prog)
 		return -ENOMEM;
 
-	return nfp_bpf_jit(nfp_prog);
+	err = nfp_bpf_jit(nfp_prog);
+	if (err)
+		return err;
+
+	prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
+	prog->aux->offload->jited_image = nfp_prog->prog;
+
+	return 0;
 }
 
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog)
+static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
 {
 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 
-	kfree(nfp_prog->prog);
+	kvfree(nfp_prog->prog);
 	nfp_prog_free(nfp_prog);
 
 	return 0;
 }
 
-static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
+static int
+nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
+			 void *key, void *next_key)
+{
+	if (!key)
+		return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
+	return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
+}
+
+static int
+nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+		return -EINVAL;
+	return nfp_bpf_ctrl_del_entry(offmap, key);
+}
+
+static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
+	.map_get_next_key	= nfp_bpf_map_get_next_key,
+	.map_lookup_elem	= nfp_bpf_ctrl_lookup_entry,
+	.map_update_elem	= nfp_bpf_ctrl_update_entry,
+	.map_delete_elem	= nfp_bpf_map_delete_elem,
+};
+
+static int
+nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+	struct nfp_bpf_map *nfp_map;
+	long long int res;
+
+	if (!bpf->maps.types)
+		return -EOPNOTSUPP;
+
+	if (offmap->map.map_flags ||
+	    offmap->map.numa_node != NUMA_NO_NODE) {
+		pr_info("map flags are not supported\n");
+		return -EINVAL;
+	}
+
+	if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
+		pr_info("map type not supported\n");
+		return -EOPNOTSUPP;
+	}
+	if (bpf->maps.max_maps == bpf->maps_in_use) {
+		pr_info("too many maps for a device\n");
+		return -ENOMEM;
+	}
+	if (bpf->maps.max_elems - bpf->map_elems_in_use <
+	    offmap->map.max_entries) {
+		pr_info("map with too many elements: %u, left: %u\n",
+			offmap->map.max_entries,
+			bpf->maps.max_elems - bpf->map_elems_in_use);
+		return -ENOMEM;
+	}
+	if (offmap->map.key_size > bpf->maps.max_key_sz ||
+	    offmap->map.value_size > bpf->maps.max_val_sz ||
+	    round_up(offmap->map.key_size, 8) +
+	    round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
+		pr_info("elements don't fit in device constraints\n");
+		return -ENOMEM;
+	}
+
+	nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
+	if (!nfp_map)
+		return -ENOMEM;
+
+	offmap->dev_priv = nfp_map;
+	nfp_map->offmap = offmap;
+	nfp_map->bpf = bpf;
+
+	res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
+	if (res < 0) {
+		kfree(nfp_map);
+		return res;
+	}
+
+	nfp_map->tid = res;
+	offmap->dev_ops = &nfp_bpf_map_ops;
+	bpf->maps_in_use++;
+	bpf->map_elems_in_use += offmap->map.max_entries;
+	list_add_tail(&nfp_map->l, &bpf->map_list);
+
+	return 0;
+}
+
+static int
+nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+
+	nfp_bpf_ctrl_free_map(bpf, nfp_map);
+	list_del_init(&nfp_map->l);
+	bpf->map_elems_in_use -= offmap->map.max_entries;
+	bpf->maps_in_use--;
+	kfree(nfp_map);
+
+	return 0;
+}
+
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
+{
+	switch (bpf->command) {
+	case BPF_OFFLOAD_VERIFIER_PREP:
+		return nfp_bpf_verifier_prep(app, nn, bpf);
+	case BPF_OFFLOAD_TRANSLATE:
+		return nfp_bpf_translate(nn, bpf->offload.prog);
+	case BPF_OFFLOAD_DESTROY:
+		return nfp_bpf_destroy(nn, bpf->offload.prog);
+	case BPF_OFFLOAD_MAP_ALLOC:
+		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
+	case BPF_OFFLOAD_MAP_FREE:
+		return nfp_bpf_map_free(app->priv, bpf->offmap);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int
+nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
+		 struct netlink_ext_ack *extack)
 {
 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 	unsigned int max_mtu;
 	dma_addr_t dma_addr;
+	void *img;
 	int err;
 
 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
 	if (max_mtu < nn->dp.netdev->mtu) {
-		nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+		NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
 		return -EOPNOTSUPP;
 	}
 
-	dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+	img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
+	if (IS_ERR(img))
+		return PTR_ERR(img);
+
+	dma_addr = dma_map_single(nn->dp.dev, img,
 				  nfp_prog->prog_len * sizeof(u64),
 				  DMA_TO_DEVICE);
-	if (dma_mapping_error(nn->dp.dev, dma_addr))
+	if (dma_mapping_error(nn->dp.dev, dma_addr)) {
+		kfree(img);
 		return -ENOMEM;
+	}
 
 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -178,15 +315,18 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
 	/* Load up the JITed code */
 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
 	if (err)
-		nn_err(nn, "FW command error while loading BPF: %d\n", err);
+		NL_SET_ERR_MSG_MOD(extack,
+				   "FW command error while loading BPF");
 
 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
 			 DMA_TO_DEVICE);
+	kfree(img);
 
 	return err;
 }
 
-static void nfp_net_bpf_start(struct nfp_net *nn)
+static void
+nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
 {
 	int err;
 
@@ -195,7 +335,8 @@ static void nfp_net_bpf_start(struct nfp_net *nn)
 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
 	if (err)
-		nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+		NL_SET_ERR_MSG_MOD(extack,
+				   "FW command error while enabling BPF");
 }
 
 static int nfp_net_bpf_stop(struct nfp_net *nn)
@@ -210,12 +351,12 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
 }
 
 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
-			bool old_prog)
+			bool old_prog, struct netlink_ext_ack *extack)
 {
 	int err;
 
 	if (prog) {
-		struct bpf_dev_offload *offload = prog->aux->offload;
+		struct bpf_prog_offload *offload = prog->aux->offload;
 
 		if (!offload)
 			return -EINVAL;
@@ -228,7 +369,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 
 		cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
 		if (!(cap & NFP_NET_BPF_CAP_RELO)) {
-			nn_err(nn, "FW does not support live reload\n");
+			NL_SET_ERR_MSG_MOD(extack,
+					   "FW does not support live reload");
 			return -EBUSY;
 		}
 	}
@@ -240,12 +382,12 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 	if (old_prog && !prog)
 		return nfp_net_bpf_stop(nn);
 
-	err = nfp_net_bpf_load(nn, prog);
+	err = nfp_net_bpf_load(nn, prog, extack);
 	if (err)
 		return err;
 
 	if (!old_prog)
-		nfp_net_bpf_start(nn);
+		nfp_net_bpf_start(nn, extack);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 8d43491ddd6b..479f602887e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,16 +31,18 @@
  * SOFTWARE.
  */
 
-#define pr_fmt(fmt)	"NFP net bpf: " fmt
-
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/kernel.h>
 #include <linux/pkt_cls.h>
 
+#include "fw.h"
 #include "main.h"
 
-static struct nfp_insn_meta *
+#define pr_vlog(env, fmt, ...)	\
+	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
+
+struct nfp_insn_meta *
 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 		  unsigned int insn_idx, unsigned int n_insns)
 {
@@ -68,6 +70,114 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	return meta;
 }
 
+static void
+nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+		       struct nfp_insn_meta *meta,
+		       const struct bpf_reg_state *reg2)
+{
+	unsigned int location =	UINT_MAX;
+	int imm;
+
+	/* Datapath usually can give us guarantees on how much adjust head
+	 * can be done without the need for any checks.  Optimize the simple
+	 * case where there is only one adjust head by a constant.
+	 */
+	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
+		goto exit_set_location;
+	imm = reg2->var_off.value;
+	/* Translator will skip all checks, we need to guarantee min pkt len */
+	if (imm > ETH_ZLEN - ETH_HLEN)
+		goto exit_set_location;
+	if (imm > (int)bpf->adjust_head.guaranteed_add ||
+	    imm < -bpf->adjust_head.guaranteed_sub)
+		goto exit_set_location;
+
+	if (nfp_prog->adjust_head_location) {
+		/* Only one call per program allowed */
+		if (nfp_prog->adjust_head_location != meta->n)
+			goto exit_set_location;
+
+		if (meta->arg2.var_off.value != imm)
+			goto exit_set_location;
+	}
+
+	location = meta->n;
+exit_set_location:
+	nfp_prog->adjust_head_location = location;
+}
+
+static int
+nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
+		   struct nfp_insn_meta *meta)
+{
+	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
+	struct nfp_app_bpf *bpf = nfp_prog->bpf;
+	u32 func_id = meta->insn.imm;
+	s64 off, old_off;
+
+	switch (func_id) {
+	case BPF_FUNC_xdp_adjust_head:
+		if (!bpf->adjust_head.off_max) {
+			pr_vlog(env, "adjust_head not supported by FW\n");
+			return -EOPNOTSUPP;
+		}
+		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
+			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
+			return -EOPNOTSUPP;
+		}
+
+		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
+		break;
+
+	case BPF_FUNC_map_lookup_elem:
+		if (!bpf->helpers.map_lookup) {
+			pr_vlog(env, "map_lookup: not supported by FW\n");
+			return -EOPNOTSUPP;
+		}
+		if (reg2->type != PTR_TO_STACK) {
+			pr_vlog(env,
+				"map_lookup: unsupported key ptr type %d\n",
+				reg2->type);
+			return -EOPNOTSUPP;
+		}
+		if (!tnum_is_const(reg2->var_off)) {
+			pr_vlog(env, "map_lookup: variable key pointer\n");
+			return -EOPNOTSUPP;
+		}
+
+		off = reg2->var_off.value + reg2->off;
+		if (-off % 4) {
+			pr_vlog(env,
+				"map_lookup: unaligned stack pointer %lld\n",
+				-off);
+			return -EOPNOTSUPP;
+		}
+
+		/* Rest of the checks is only if we re-parse the same insn */
+		if (!meta->func_id)
+			break;
+
+		old_off = meta->arg2.var_off.value + meta->arg2.off;
+		meta->arg2_var_off |= off != old_off;
+
+		if (meta->arg1.map_ptr != reg1->map_ptr) {
+			pr_vlog(env, "map_lookup: called for different map\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	default:
+		pr_vlog(env, "unsupported function id: %d\n", func_id);
+		return -EOPNOTSUPP;
+	}
+
+	meta->func_id = func_id;
+	meta->arg1 = *reg1;
+	meta->arg2 = *reg2;
+
+	return 0;
+}
+
 static int
 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 		   struct bpf_verifier_env *env)
@@ -82,7 +192,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 		char tn_buf[48];
 
 		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
-		pr_info("unsupported exit state: %d, var_off: %s\n",
+		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
 			reg0->type, tn_buf);
 		return -EINVAL;
 	}
@@ -92,7 +202,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 	    imm <= TC_ACT_REDIRECT &&
 	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
 	    imm != TC_ACT_QUEUED) {
-		pr_info("unsupported exit state: %d, imm: %llx\n",
+		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
 			reg0->type, imm);
 		return -EINVAL;
 	}
@@ -103,12 +213,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 static int
 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
 			   struct nfp_insn_meta *meta,
-			   const struct bpf_reg_state *reg)
+			   const struct bpf_reg_state *reg,
+			   struct bpf_verifier_env *env)
 {
 	s32 old_off, new_off;
 
 	if (!tnum_is_const(reg->var_off)) {
-		pr_info("variable ptr stack access\n");
+		pr_vlog(env, "variable ptr stack access\n");
 		return -EINVAL;
 	}
 
@@ -126,7 +237,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
 	if (old_off % 4 == new_off % 4)
 		return 0;
 
-	pr_info("stack access changed location was:%d is:%d\n",
+	pr_vlog(env, "stack access changed location was:%d is:%d\n",
 		old_off, new_off);
 	return -EINVAL;
 }
@@ -140,19 +251,27 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
 	if (reg->type != PTR_TO_CTX &&
 	    reg->type != PTR_TO_STACK &&
+	    reg->type != PTR_TO_MAP_VALUE &&
 	    reg->type != PTR_TO_PACKET) {
-		pr_info("unsupported ptr type: %d\n", reg->type);
+		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
 		return -EINVAL;
 	}
 
 	if (reg->type == PTR_TO_STACK) {
-		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
 		if (err)
 			return err;
 	}
 
+	if (reg->type == PTR_TO_MAP_VALUE) {
+		if (is_mbpf_store(meta)) {
+			pr_vlog(env, "map writes not supported\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
 	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
-		pr_info("ptr type changed for instruction %d -> %d\n",
+		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
 			meta->ptr.type, reg->type);
 		return -EINVAL;
 	}
@@ -171,25 +290,33 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
 	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
 	nfp_prog->verifier_meta = meta;
 
+	if (!nfp_bpf_supported_opcode(meta->insn.code)) {
+		pr_vlog(env, "instruction %#02x not supported\n",
+			meta->insn.code);
+		return -EINVAL;
+	}
+
 	if (meta->insn.src_reg >= MAX_BPF_REG ||
 	    meta->insn.dst_reg >= MAX_BPF_REG) {
-		pr_err("program uses extended registers - jit hardening?\n");
+		pr_vlog(env, "program uses extended registers - jit hardening?\n");
 		return -EINVAL;
 	}
 
+	if (meta->insn.code == (BPF_JMP | BPF_CALL))
+		return nfp_bpf_check_call(nfp_prog, env, meta);
 	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
 		return nfp_bpf_check_exit(nfp_prog, env);
 
-	if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+	if (is_mbpf_load(meta))
 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
 					 meta->insn.src_reg);
-	if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+	if (is_mbpf_store(meta))
 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
 					 meta->insn.dst_reg);
 
 	return 0;
 }
 
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
 	.insn_hook = nfp_verify_insn,
 };
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c1c595f8bb87..b3567a596fc1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
 		return tun_type == NFP_FL_TUNNEL_VXLAN;
 
+	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+		return tun_type == NFP_FL_TUNNEL_GENEVE;
+
 	return false;
 }
 
@@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
 	size_t act_size = sizeof(struct nfp_fl_output);
 	struct net_device *out_dev;
 	u16 tmp_flags;
-	int ifindex;
 
 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 
-	ifindex = tcf_mirred_ifindex(action);
-	out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+	out_dev = tcf_mirred_dev(action);
 	if (!out_dev)
 		return -EOPNOTSUPP;
 
@@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
 	return 0;
 }
 
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+				const struct tc_action *action)
 {
 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
-
-	return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+	struct nfp_flower_priv *priv = app->priv;
+
+	switch (tun->key.tp_dst) {
+	case htons(NFP_FL_VXLAN_PORT):
+		return NFP_FL_TUNNEL_VXLAN;
+	case htons(NFP_FL_GENEVE_PORT):
+		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+			return NFP_FL_TUNNEL_GENEVE;
+		/* FALLTHROUGH */
+	default:
+		return NFP_FL_TUNNEL_NONE;
+	}
 }
 
 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
 }
 
 static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
-		 const struct tc_action *action,
-		 struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+			const struct tc_action *action,
+			struct nfp_fl_pre_tunnel *pre_tun,
+			enum nfp_flower_tun_type tun_type)
 {
-	struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
-	size_t act_size = sizeof(struct nfp_fl_set_vxlan);
-	u32 tmp_set_vxlan_type_index = 0;
+	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+	u32 tmp_set_ip_tun_type_index = 0;
 	/* Currently support one pre-tunnel so index is always 0. */
 	int pretun_idx = 0;
 
-	if (vxlan->options_len) {
-		/* Do not support options e.g. vxlan gpe. */
+	if (ip_tun->options_len)
 		return -EOPNOTSUPP;
-	}
 
-	set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
-	set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 
 	/* Set tunnel type and pre-tunnel index. */
-	tmp_set_vxlan_type_index |=
-		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+	tmp_set_ip_tun_type_index |=
+		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
 
-	set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
-	set_vxlan->tun_id = vxlan->key.tun_id;
-	set_vxlan->tun_flags = vxlan->key.tun_flags;
-	set_vxlan->ipv4_ttl = vxlan->key.ttl;
-	set_vxlan->ipv4_tos = vxlan->key.tos;
+	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+	set_tun->tun_id = ip_tun->key.tun_id;
 
 	/* Complete pre_tunnel action. */
-	pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
 
 	return 0;
 }
@@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
 		       struct net_device *netdev,
 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
 {
+	struct nfp_fl_set_ipv4_udp_tun *set_tun;
 	struct nfp_fl_pre_tunnel *pre_tun;
-	struct nfp_fl_set_vxlan *s_vxl;
 	struct nfp_fl_push_vlan *psh_v;
 	struct nfp_fl_pop_vlan *pop_v;
 	struct nfp_fl_output *output;
@@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
 
 		nfp_fl_push_vlan(psh_v, a);
 		*a_len += sizeof(struct nfp_fl_push_vlan);
-	} else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+	} else if (is_tcf_tunnel_set(a)) {
+		struct nfp_repr *repr = netdev_priv(netdev);
+		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+		if (*tun_type == NFP_FL_TUNNEL_NONE)
+			return -EOPNOTSUPP;
+
 		/* Pre-tunnel action is required for tunnel encap.
 		 * This checks for next hop entries on NFP.
 		 * If none, the packet falls back before applying other actions.
 		 */
 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
-		    sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
 			return -EOPNOTSUPP;
 
-		*tun_type = NFP_FL_TUNNEL_VXLAN;
 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
 
-		s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
-		err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+		set_tun = (void *)&nfp_fl->action_data[*a_len];
+		err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
 		if (err)
 			return err;
-
-		*a_len += sizeof(struct nfp_fl_set_vxlan);
+		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
 	} else if (is_tcf_tunnel_release(a)) {
 		/* Tunnel decap is handled by default so accept action. */
 		return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index e98bb9cdb6a3..baaea6f1a9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
 	return 0;
 }
 
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+	struct nfp_flower_cmsg_portreify *msg;
+	struct sk_buff *skb;
+
+	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+				    NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+				    GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	msg = nfp_flower_cmsg_get_data(skb);
+	msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+	msg->reserved = 0;
+	msg->info = cpu_to_be16(exists);
+
+	nfp_ctrl_tx(repr->app->ctrl, skb);
+
+	return 0;
+}
+
 static void
 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 {
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 }
 
 static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	struct nfp_flower_cmsg_portreify *msg;
+	bool exists;
+
+	msg = nfp_flower_cmsg_get_data(skb);
+
+	rcu_read_lock();
+	exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+	rcu_read_unlock();
+	if (!exists) {
+		nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+				     be32_to_cpu(msg->portnum));
+		return;
+	}
+
+	atomic_inc(&priv->reify_replies);
+	wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 {
 	struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -168,20 +211,14 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 
 	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
 
-	if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
-		nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
-				     cmsg_hdr->version);
-		goto out;
-	}
-
 	type = cmsg_hdr->type;
 	switch (type) {
+	case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+		nfp_flower_cmsg_portreify_rx(app, skb);
+		break;
 	case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
 		nfp_flower_cmsg_portmod_rx(app, skb);
 		break;
-	case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
-		nfp_flower_rx_flow_stats(app, skb);
-		break;
 	case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
 		nfp_tunnel_request_route(app, skb);
 		break;
@@ -217,7 +254,23 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work)
 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
 {
 	struct nfp_flower_priv *priv = app->priv;
+	struct nfp_flower_cmsg_hdr *cmsg_hdr;
+
+	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
 
-	skb_queue_tail(&priv->cmsg_skbs, skb);
-	schedule_work(&priv->cmsg_work);
+	if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+		nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+				     cmsg_hdr->version);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
+		/* We need to deal with stats updates from HW asap */
+		nfp_flower_rx_flow_stats(app, skb);
+		dev_consume_skb_any(skb);
+	} else {
+		skb_queue_tail(&priv->cmsg_skbs, skb);
+		schedule_work(&priv->cmsg_work);
+	}
 }
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 66070741d55f..adfe474c2cf0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -41,7 +41,7 @@
 #include "../nfp_app.h"
 #include "../nfpcore/nfp_cpp.h"
 
-#define NFP_FLOWER_LAYER_META		BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META	BIT(0)
 #define NFP_FLOWER_LAYER_PORT		BIT(1)
 #define NFP_FLOWER_LAYER_MAC		BIT(2)
 #define NFP_FLOWER_LAYER_TP		BIT(3)
@@ -50,8 +50,7 @@
 #define NFP_FLOWER_LAYER_CT		BIT(6)
 #define NFP_FLOWER_LAYER_VXLAN		BIT(7)
 
-#define NFP_FLOWER_LAYER_ETHER		BIT(3)
-#define NFP_FLOWER_LAYER_ARP		BIT(4)
+#define NFP_FLOWER_LAYER2_GENEVE	BIT(5)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO	GENMASK(15, 13)
 #define NFP_FLOWER_MASK_VLAN_CFI	BIT(12)
@@ -108,6 +107,7 @@
 enum nfp_flower_tun_type {
 	NFP_FL_TUNNEL_NONE =	0,
 	NFP_FL_TUNNEL_VXLAN =	2,
+	NFP_FL_TUNNEL_GENEVE =	4,
 };
 
 struct nfp_fl_act_head {
@@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan {
 	__be16 reserved;
 };
 
-/* Metadata without L2 (1W/4B)
- * ----------------------------------------------------------------
- *    3                   2                   1
- *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |  key_layers   |    mask_id    |           reserved            |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-struct nfp_flower_meta_one {
-	u8 nfp_flow_key_layer;
-	u8 mask_id;
-	u16 reserved;
-};
-
 struct nfp_fl_pre_tunnel {
 	struct nfp_fl_act_head head;
 	__be16 reserved;
@@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel {
 	__be32 extra[3];
 };
 
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
 	struct nfp_fl_act_head head;
 	__be16 reserved;
-	__be64 tun_id;
+	__be64 tun_id __packed;
 	__be32 tun_type_index;
-	__be16 tun_flags;
-	u8 ipv4_ttl;
-	u8 ipv4_tos;
-	__be32 extra[2];
-} __packed;
+	__be32 extra[3];
+};
 
 /* Metadata with L2 (1W/4B)
  * ----------------------------------------------------------------
@@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan {
  *                           NOTE: |             TCI               |
  *                                 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  */
-struct nfp_flower_meta_two {
+struct nfp_flower_meta_tci {
 	u8 nfp_flow_key_layer;
 	u8 mask_id;
 	__be16 tci;
 };
 
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ *    3                   2                   1
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                      nfp_flow_key_layer2                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+	__be32 nfp_flow_key_layer2;
+};
+
 /* Port details (1W/4B)
  * ----------------------------------------------------------------
  *    3                   2                   1
@@ -313,7 +308,7 @@ struct nfp_flower_ipv6 {
 	struct in6_addr ipv6_dst;
 };
 
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
  * -----------------------------------------------------------------
  *    3                   2                   1
  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -322,22 +317,17 @@ struct nfp_flower_ipv6 {
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                         ipv4_addr_dst                         |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |           tun_flags           |       tos     |       ttl     |
+ * |                            Reserved                           |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |   gpe_flags   |            Reserved           | Next Protocol |
+ * |                            Reserved                           |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                     VNI                       |   Reserved    |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  */
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
 	__be32 ip_src;
 	__be32 ip_dst;
-	__be16 tun_flags;
-	u8 tos;
-	u8 ttl;
-	u8 gpe_flags;
-	u8 reserved[2];
-	u8 nxt_proto;
+	__be32 reserved[2];
 	__be32 tun_id;
 };
 
@@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
 enum nfp_flower_cmsg_type_port {
 	NFP_FLOWER_CMSG_TYPE_FLOW_ADD =		0,
 	NFP_FLOWER_CMSG_TYPE_FLOW_DEL =		2,
+	NFP_FLOWER_CMSG_TYPE_PORT_REIFY =	6,
 	NFP_FLOWER_CMSG_TYPE_MAC_REPR =		7,
 	NFP_FLOWER_CMSG_TYPE_PORT_MOD =		8,
 	NFP_FLOWER_CMSG_TYPE_NO_NEIGH =		10,
@@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
 
 #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK	BIT(0)
 
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+	__be32 portnum;
+	u16 reserved;
+	__be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST	BIT(0)
+
 enum nfp_flower_cmsg_port_type {
 	NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC =	0x0,
 	NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT =	0x1,
@@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
 			     unsigned int nbi, unsigned int nbi_port,
 			     unsigned int phys_port);
 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
 void nfp_flower_cmsg_process_rx(struct work_struct *work);
 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
 struct sk_buff *
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 8fcc90c0d2d3..742d6f1575b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/lockdep.h>
 #include <linux/pci.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
@@ -98,7 +99,57 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
 	if (port >= reprs->num_reprs)
 		return NULL;
 
-	return reprs->reprs[port];
+	return rcu_dereference(reprs->reprs[port]);
+}
+
+static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+		       bool exists)
+{
+	struct nfp_reprs *reprs;
+	int i, err, count = 0;
+
+	reprs = rcu_dereference_protected(app->reprs[type],
+					  lockdep_is_held(&app->pf->lock));
+	if (!reprs)
+		return 0;
+
+	for (i = 0; i < reprs->num_reprs; i++) {
+		struct net_device *netdev;
+
+		netdev = nfp_repr_get_locked(app, reprs, i);
+		if (netdev) {
+			struct nfp_repr *repr = netdev_priv(netdev);
+
+			err = nfp_flower_cmsg_portreify(repr, exists);
+			if (err)
+				return err;
+			count++;
+		}
+	}
+
+	return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	int err;
+
+	if (!tot_repl)
+		return 0;
+
+	lockdep_assert_held(&app->pf->lock);
+	err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+					       atomic_read(replies) >= tot_repl,
+					       msecs_to_jiffies(10));
+	if (err <= 0) {
+		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+		return -EIO;
+	}
+
+	return 0;
 }
 
 static int
@@ -110,7 +161,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 	if (err)
 		return err;
 
-	netif_carrier_on(repr->netdev);
 	netif_tx_wake_all_queues(repr->netdev);
 
 	return 0;
@@ -119,7 +169,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 static int
 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
 {
-	netif_carrier_off(repr->netdev);
 	netif_tx_disable(repr->netdev);
 
 	return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +189,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
 				     netdev_priv(netdev));
 }
 
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+	struct nfp_repr *repr = netdev_priv(netdev);
+	struct nfp_flower_priv *priv = app->priv;
+	atomic_t *replies = &priv->reify_replies;
+	int err;
+
+	atomic_set(replies, 0);
+	err = nfp_flower_cmsg_portreify(repr, false);
+	if (err) {
+		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+		return;
+	}
+
+	nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
 static void nfp_flower_sriov_disable(struct nfp_app *app)
 {
 	struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +224,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 {
 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
 	struct nfp_flower_priv *priv = app->priv;
+	atomic_t *replies = &priv->reify_replies;
 	enum nfp_port_type port_type;
 	struct nfp_reprs *reprs;
+	int i, err, reify_cnt;
 	const u8 queue = 0;
-	int i, err;
 
 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
 						    NFP_PORT_VF_PORT;
@@ -170,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 		return -ENOMEM;
 
 	for (i = 0; i < cnt; i++) {
+		struct net_device *repr;
 		struct nfp_port *port;
 		u32 port_id;
 
-		reprs->reprs[i] = nfp_repr_alloc(app);
-		if (!reprs->reprs[i]) {
+		repr = nfp_repr_alloc(app);
+		if (!repr) {
 			err = -ENOMEM;
 			goto err_reprs_clean;
 		}
+		RCU_INIT_POINTER(reprs->reprs[i], repr);
 
 		/* For now we only support 1 PF */
 		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
 
-		port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+		port = nfp_port_alloc(app, port_type, repr);
 		if (repr_type == NFP_REPR_TYPE_PF) {
 			port->pf_id = i;
 			port->vnic = priv->nn->dp.ctrl_bar;
@@ -193,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
 		}
 
-		eth_hw_addr_random(reprs->reprs[i]);
+		eth_hw_addr_random(repr);
 
 		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
 						    i, queue);
-		err = nfp_repr_init(app, reprs->reprs[i],
+		err = nfp_repr_init(app, repr,
 				    port_id, port, priv->nn->dp.netdev);
 		if (err) {
 			nfp_port_free(port);
@@ -206,14 +276,28 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 
 		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
 			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
-			 reprs->reprs[i]->name);
+			 repr->name);
 	}
 
 	nfp_app_reprs_set(app, repr_type, reprs);
 
+	atomic_set(replies, 0);
+	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+	if (reify_cnt < 0) {
+		err = reify_cnt;
+		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+		goto err_reprs_remove;
+	}
+
+	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+	if (err)
+		goto err_reprs_remove;
+
 	return 0;
+err_reprs_remove:
+	reprs = nfp_app_reprs_set(app, repr_type, NULL);
 err_reprs_clean:
-	nfp_reprs_clean_and_free(reprs);
+	nfp_reprs_clean_and_free(app, reprs);
 	return err;
 }
 
@@ -233,10 +317,11 @@ static int
 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 {
 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+	atomic_t *replies = &priv->reify_replies;
 	struct sk_buff *ctrl_skb;
 	struct nfp_reprs *reprs;
+	int err, reify_cnt;
 	unsigned int i;
-	int err;
 
 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
 	if (!ctrl_skb)
@@ -250,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 
 	for (i = 0; i < eth_tbl->count; i++) {
 		unsigned int phys_port = eth_tbl->ports[i].index;
+		struct net_device *repr;
 		struct nfp_port *port;
 		u32 cmsg_port_id;
 
-		reprs->reprs[phys_port] = nfp_repr_alloc(app);
-		if (!reprs->reprs[phys_port]) {
+		repr = nfp_repr_alloc(app);
+		if (!repr) {
 			err = -ENOMEM;
 			goto err_reprs_clean;
 		}
+		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
 
-		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
-				      reprs->reprs[phys_port]);
+		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
 		if (IS_ERR(port)) {
 			err = PTR_ERR(port);
 			goto err_reprs_clean;
@@ -271,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 			goto err_reprs_clean;
 		}
 
-		SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
 		nfp_net_get_mac_addr(app->pf, port);
 
 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
-		err = nfp_repr_init(app, reprs->reprs[phys_port],
+		err = nfp_repr_init(app, repr,
 				    cmsg_port_id, port, priv->nn->dp.netdev);
 		if (err) {
 			nfp_port_free(port);
@@ -288,23 +374,37 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 					     phys_port);
 
 		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
-			 phys_port, reprs->reprs[phys_port]->name);
+			 phys_port, repr->name);
 	}
 
 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
 
-	/* The MAC_REPR control message should be sent after the MAC
+	/* The REIFY/MAC_REPR control messages should be sent after the MAC
 	 * representors are registered using nfp_app_reprs_set().  This is
 	 * because the firmware may respond with control messages for the
 	 * MAC representors, f.e. to provide the driver with information
 	 * about their state, and without registration the driver will drop
 	 * any such messages.
 	 */
+	atomic_set(replies, 0);
+	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+	if (reify_cnt < 0) {
+		err = reify_cnt;
+		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+		goto err_reprs_remove;
+	}
+
+	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+	if (err)
+		goto err_reprs_remove;
+
 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
 
 	return 0;
+err_reprs_remove:
+	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
 err_reprs_clean:
-	nfp_reprs_clean_and_free(reprs);
+	nfp_reprs_clean_and_free(app, reprs);
 err_free_ctrl_skb:
 	kfree_skb(ctrl_skb);
 	return err;
@@ -381,7 +481,7 @@ static int nfp_flower_init(struct nfp_app *app)
 {
 	const struct nfp_pf *pf = app->pf;
 	struct nfp_flower_priv *app_priv;
-	u64 version;
+	u64 version, features;
 	int err;
 
 	if (!pf->eth_tbl) {
@@ -419,11 +519,20 @@ static int nfp_flower_init(struct nfp_app *app)
 	app_priv->app = app;
 	skb_queue_head_init(&app_priv->cmsg_skbs);
 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+	init_waitqueue_head(&app_priv->reify_wait_queue);
 
 	err = nfp_flower_metadata_init(app);
 	if (err)
 		goto err_free_app_priv;
 
+	/* Extract the extra features supported by the firmware. */
+	features = nfp_rtsym_read_le(app->pf->rtbl,
+				     "_abi_flower_extra_features", &err);
+	if (err)
+		app_priv->flower_ext_feats = 0;
+	else
+		app_priv->flower_ext_feats = features;
+
 	return 0;
 
 err_free_app_priv:
@@ -456,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app)
 const struct nfp_app_type app_flower = {
 	.id		= NFP_APP_FLOWER_NIC,
 	.name		= "flower",
+
+	.ctrl_cap_mask	= ~0U,
 	.ctrl_has_meta	= true,
 
 	.extra_cap	= nfp_flower_extra_cap,
@@ -468,6 +579,7 @@ const struct nfp_app_type app_flower = {
 	.vnic_clean	= nfp_flower_vnic_clean,
 
 	.repr_init	= nfp_flower_repr_netdev_init,
+	.repr_preclean	= nfp_flower_repr_netdev_preclean,
 	.repr_clean	= nfp_flower_repr_netdev_clean,
 
 	.repr_open	= nfp_flower_repr_netdev_open,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e6b26c5ae6e0..332ff0fdc038 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,6 +34,8 @@
 #ifndef __NFP_FLOWER_H__
 #define __NFP_FLOWER_H__ 1
 
+#include "cmsg.h"
+
 #include <linux/circ_buf.h>
 #include <linux/hashtable.h>
 #include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
 #define NFP_FL_MASK_ID_LOCATION		1
 
 #define NFP_FL_VXLAN_PORT		4789
+#define NFP_FL_GENEVE_PORT		6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE		BIT(0)
 
 struct nfp_fl_mask_id {
 	struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
  * @nn:			Pointer to vNIC
  * @mask_id_seed:	Seed used for mask hash table
  * @flower_version:	HW version of flower
+ * @flower_ext_feats:	Bitmap of extra features the HW supports
  * @stats_ids:		List of free stats ids
  * @mask_ids:		List of free mask ids
  * @mask_table:		Hash table used to store masks
@@ -95,12 +102,16 @@ struct nfp_fl_stats_id {
  * @nfp_mac_off_count:	Number of MACs in address list
  * @nfp_tun_mac_nb:	Notifier to monitor link state
  * @nfp_tun_neigh_nb:	Notifier to monitor neighbour state
+ * @reify_replies:	atomically stores the number of replies received
+ *			from firmware for repr reify
+ * @reify_wait_queue:	wait queue for repr reify response counting
  */
 struct nfp_flower_priv {
 	struct nfp_app *app;
 	struct nfp_net *nn;
 	u32 mask_id_seed;
 	u64 flower_version;
+	u64 flower_ext_feats;
 	struct nfp_fl_stats_id stats_ids;
 	struct nfp_fl_mask_id mask_ids;
 	DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -119,6 +130,8 @@ struct nfp_flower_priv {
 	int nfp_mac_off_count;
 	struct notifier_block nfp_tun_mac_nb;
 	struct notifier_block nfp_tun_neigh_nb;
+	atomic_t reify_replies;
+	wait_queue_head_t reify_wait_queue;
 };
 
 struct nfp_fl_key_ls {
@@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 				  struct nfp_fl_key_ls *key_ls,
 				  struct net_device *netdev,
-				  struct nfp_fl_payload *nfp_flow);
+				  struct nfp_fl_payload *nfp_flow,
+				  enum nfp_flower_tun_type tun_type);
 int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
 			      struct net_device *netdev,
 			      struct nfp_fl_payload *nfp_flow);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 60614d4f0e22..37c2ecae2a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -38,7 +38,7 @@
 #include "main.h"
 
 static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
 			    struct tc_cls_flower_offload *flow, u8 key_type,
 			    bool mask_version)
 {
@@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
 	struct flow_dissector_key_vlan *flow_vlan;
 	u16 tmp_tci;
 
-	memset(frame, 0, sizeof(struct nfp_flower_meta_two));
+	memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
 	/* Populate the metadata frame. */
 	frame->nfp_flow_key_layer = key_type;
 	frame->mask_id = ~0;
@@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
 }
 
 static void
-nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
 {
-	frame->nfp_flow_key_layer = key_type;
-	frame->mask_id = 0;
-	frame->reserved = 0;
+	frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
 }
 
 static int
@@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
 }
 
 static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
-			 struct tc_cls_flower_offload *flow,
-			 bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+				struct tc_cls_flower_offload *flow,
+				bool mask_version)
 {
 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-	struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+	struct flow_dissector_key_ipv4_addrs *tun_ips;
 	struct flow_dissector_key_keyid *vni;
 
-	/* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
-	memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+	memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
 
 	if (dissector_uses_key(flow->dissector,
 			       FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -248,80 +245,68 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
 
 	if (dissector_uses_key(flow->dissector,
 			       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
-		vxlan_ips =
+		tun_ips =
 		   skb_flow_dissector_target(flow->dissector,
 					     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
 					     target);
-		frame->ip_src = vxlan_ips->src;
-		frame->ip_dst = vxlan_ips->dst;
-		*tun_dst = vxlan_ips->dst;
+		frame->ip_src = tun_ips->src;
+		frame->ip_dst = tun_ips->dst;
 	}
 }
 
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 				  struct nfp_fl_key_ls *key_ls,
 				  struct net_device *netdev,
-				  struct nfp_fl_payload *nfp_flow)
+				  struct nfp_fl_payload *nfp_flow,
+				  enum nfp_flower_tun_type tun_type)
 {
-	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
-	__be32 tun_dst, tun_dst_mask = 0;
 	struct nfp_repr *netdev_repr;
 	int err;
 	u8 *ext;
 	u8 *msk;
 
-	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
-		tun_type = NFP_FL_TUNNEL_VXLAN;
-
 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
 
 	ext = nfp_flow->unmasked_data;
 	msk = nfp_flow->mask_data;
-	if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
-		/* Populate Exact Metadata. */
-		nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
-					    flow, key_ls->key_layer, false);
-		/* Populate Mask Metadata. */
-		nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
-					    flow, key_ls->key_layer, true);
-		ext += sizeof(struct nfp_flower_meta_two);
-		msk += sizeof(struct nfp_flower_meta_two);
-
-		/* Populate Exact Port data. */
-		err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
-					      nfp_repr_get_port_id(netdev),
-					      false, tun_type);
-		if (err)
-			return err;
-
-		/* Populate Mask Port Data. */
-		err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
-					      nfp_repr_get_port_id(netdev),
-					      true, tun_type);
-		if (err)
-			return err;
-
-		ext += sizeof(struct nfp_flower_in_port);
-		msk += sizeof(struct nfp_flower_in_port);
-	} else {
-		/* Populate Exact Metadata. */
-		nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
-					key_ls->key_layer);
-		/* Populate Mask Metadata. */
-		nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
-					key_ls->key_layer);
-		ext += sizeof(struct nfp_flower_meta_one);
-		msk += sizeof(struct nfp_flower_meta_one);
-	}
 
-	if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
-		/* Additional Metadata Fields.
-		 * Currently unsupported.
-		 */
-		return -EOPNOTSUPP;
+	/* Populate Exact Metadata. */
+	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
+				    flow, key_ls->key_layer, false);
+	/* Populate Mask Metadata. */
+	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
+				    flow, key_ls->key_layer, true);
+	ext += sizeof(struct nfp_flower_meta_tci);
+	msk += sizeof(struct nfp_flower_meta_tci);
+
+	/* Populate Extended Metadata if Required. */
+	if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+					    key_ls->key_layer_two);
+		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+					    key_ls->key_layer_two);
+		ext += sizeof(struct nfp_flower_ext_meta);
+		msk += sizeof(struct nfp_flower_ext_meta);
 	}
 
+	/* Populate Exact Port data. */
+	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+				      nfp_repr_get_port_id(netdev),
+				      false, tun_type);
+	if (err)
+		return err;
+
+	/* Populate Mask Port Data. */
+	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+				      nfp_repr_get_port_id(netdev),
+				      true, tun_type);
+	if (err)
+		return err;
+
+	ext += sizeof(struct nfp_flower_in_port);
+	msk += sizeof(struct nfp_flower_in_port);
+
 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
 		/* Populate Exact MAC Data. */
 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
@@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 		msk += sizeof(struct nfp_flower_ipv6);
 	}
 
-	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+	    key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+		__be32 tun_dst;
+
 		/* Populate Exact VXLAN Data. */
-		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
-					 flow, false, &tun_dst);
+		nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
 		/* Populate Mask VXLAN Data. */
-		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
-					 flow, true, &tun_dst_mask);
-		ext += sizeof(struct nfp_flower_vxlan);
-		msk += sizeof(struct nfp_flower_vxlan);
+		nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+		tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+		ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+		msk += sizeof(struct nfp_flower_ipv4_udp_tun);
 
 		/* Configure tunnel end point MAC. */
 		if (nfp_netdev_is_nfp_repr(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 553f94f55dce..08c4c6dc5f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
 }
 
 static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+				struct nfp_fl_key_ls *ret_key_ls,
 				struct tc_cls_flower_offload *flow,
-				bool egress)
+				bool egress,
+				enum nfp_flower_tun_type *tun_type)
 {
 	struct flow_dissector_key_basic *mask_basic = NULL;
 	struct flow_dissector_key_basic *key_basic = NULL;
+	struct nfp_flower_priv *priv = app->priv;
 	u32 key_layer_two;
 	u8 key_layer;
 	int key_size;
@@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
 		return -EOPNOTSUPP;
 
 	key_layer_two = 0;
-	key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
-	key_size = sizeof(struct nfp_flower_meta_one) +
-		   sizeof(struct nfp_flower_in_port) +
-		   sizeof(struct nfp_flower_mac_mpls);
+	key_layer = NFP_FLOWER_LAYER_PORT;
+	key_size = sizeof(struct nfp_flower_meta_tci) +
+		   sizeof(struct nfp_flower_in_port);
+
+	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+		key_layer |= NFP_FLOWER_LAYER_MAC;
+		key_size += sizeof(struct nfp_flower_mac_mpls);
+	}
 
 	if (dissector_uses_key(flow->dissector,
 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
 						  flow->key);
 
-		if (mask_enc_ports->dst != cpu_to_be16(~0) ||
-		    enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+		if (mask_enc_ports->dst != cpu_to_be16(~0))
 			return -EOPNOTSUPP;
 
-		key_layer |= NFP_FLOWER_LAYER_VXLAN;
-		key_size += sizeof(struct nfp_flower_vxlan);
+		switch (enc_ports->dst) {
+		case htons(NFP_FL_VXLAN_PORT):
+			*tun_type = NFP_FL_TUNNEL_VXLAN;
+			key_layer |= NFP_FLOWER_LAYER_VXLAN;
+			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+			break;
+		case htons(NFP_FL_GENEVE_PORT):
+			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+				return -EOPNOTSUPP;
+			*tun_type = NFP_FL_TUNNEL_GENEVE;
+			key_layer |= NFP_FLOWER_LAYER_EXT_META;
+			key_size += sizeof(struct nfp_flower_ext_meta);
+			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+			break;
+		default:
+			return -EOPNOTSUPP;
+		}
 	} else if (egress) {
 		/* Reject non tunnel matches offloaded to egress repr. */
 		return -EOPNOTSUPP;
@@ -325,6 +348,7 @@ static int
 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 		       struct tc_cls_flower_offload *flow, bool egress)
 {
+	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_fl_payload *flow_pay;
 	struct nfp_fl_key_ls *key_layer;
@@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 	if (!key_layer)
 		return -ENOMEM;
 
-	err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+					      &tun_type);
 	if (err)
 		goto err_free_key_ls;
 
@@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 		goto err_free_key_ls;
 	}
 
-	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+					    tun_type);
 	if (err)
 		goto err_destroy_flow;
 
@@ -457,8 +483,7 @@ static int
 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
 			struct tc_cls_flower_offload *flower, bool egress)
 {
-	if (!eth_proto_is_802_3(flower->common.protocol) ||
-	    flower->common.chain_index)
+	if (!eth_proto_is_802_3(flower->common.protocol))
 		return -EOPNOTSUPP;
 
 	switch (flower->command) {
@@ -478,7 +503,7 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
 {
 	struct nfp_repr *repr = cb_priv;
 
-	if (!tc_can_offload(repr->netdev))
+	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
@@ -495,7 +520,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
 {
 	struct nfp_repr *repr = cb_priv;
 
-	if (!tc_can_offload(repr->netdev))
+	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
 		return -EOPNOTSUPP;
 
 	switch (type) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 955a9f44d244..6aedef0ad433 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -32,6 +32,8 @@
  */
 
 #include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 
@@ -99,13 +101,19 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
 }
 
 struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
+{
+	return rcu_dereference_protected(app->reprs[type],
+					 lockdep_is_held(&app->pf->lock));
+}
+
+struct nfp_reprs *
 nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
 		  struct nfp_reprs *reprs)
 {
 	struct nfp_reprs *old;
 
-	old = rcu_dereference_protected(app->reprs[type],
-					lockdep_is_held(&app->pf->lock));
+	old = nfp_reprs_get_locked(app, type);
 	rcu_assign_pointer(app->reprs[type], reprs);
 
 	return old;
@@ -116,7 +124,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
 	struct nfp_app *app;
 
 	if (id >= ARRAY_SIZE(apps) || !apps[id]) {
-		nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+		nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
 		return ERR_PTR(-EINVAL);
 	}
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 0e5e0305ad1c..437964afa8ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -43,6 +43,7 @@
 struct bpf_prog;
 struct net_device;
 struct netdev_bpf;
+struct netlink_ext_ack;
 struct pci_dev;
 struct sk_buff;
 struct sk_buff;
@@ -66,6 +67,9 @@ extern const struct nfp_app_type app_flower;
  * struct nfp_app_type - application definition
  * @id:		application ID
  * @name:	application name
+ * @ctrl_cap_mask:  ctrl vNIC capability mask, allows disabling features like
+ *		    IRQMOD which are on by default but counter-productive for
+ *		    control messages which are often latency-sensitive
  * @ctrl_has_meta:  control messages have prepend of type:5/port:CTRL
  *
  * Callbacks
@@ -77,18 +81,20 @@ extern const struct nfp_app_type app_flower;
  * @vnic_init:	vNIC netdev was registered
  * @vnic_clean:	vNIC netdev about to be unregistered
  * @repr_init:	representor about to be registered
+ * @repr_preclean:	representor about to unregistered, executed before app
+ *			reference to the it is removed
  * @repr_clean:	representor about to be unregistered
  * @repr_open:	representor netdev open callback
  * @repr_stop:	representor netdev stop callback
+ * @change_mtu:	MTU change on a netdev has been requested (veto-only, change
+ *		is not guaranteed to be committed)
  * @start:	start application logic
  * @stop:	stop application logic
  * @ctrl_msg_rx:    control message handler
  * @setup_tc:	setup TC ndo
  * @tc_busy:	TC HW offload busy (rules loaded)
+ * @bpf:	BPF ndo offload-related calls
  * @xdp_offload:    offload an XDP program
- * @bpf_verifier_prep:	verifier prep for dev-specific BPF programs
- * @bpf_translate:	translate call for dev-specific BPF programs
- * @bpf_destroy:	destroy for dev-specific BPF programs
  * @eswitch_mode_get:    get SR-IOV eswitch mode
  * @sriov_enable: app-specific sriov initialisation
  * @sriov_disable: app-specific sriov clean-up
@@ -98,6 +104,7 @@ struct nfp_app_type {
 	enum nfp_app_id id;
 	const char *name;
 
+	u32 ctrl_cap_mask;
 	bool ctrl_has_meta;
 
 	int (*init)(struct nfp_app *app);
@@ -112,11 +119,15 @@ struct nfp_app_type {
 	void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
 
 	int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+	void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev);
 	void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
 
 	int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
 	int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
 
+	int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
+			  int new_mtu);
+
 	int (*start)(struct nfp_app *app);
 	void (*stop)(struct nfp_app *app);
 
@@ -125,14 +136,11 @@ struct nfp_app_type {
 	int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
 			enum tc_setup_type type, void *type_data);
 	bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+	int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
+		   struct netdev_bpf *xdp);
 	int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
-			   struct bpf_prog *prog);
-	int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
-				 struct netdev_bpf *bpf);
-	int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
-			     struct bpf_prog *prog);
-	int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
-			   struct bpf_prog *prog);
+			   struct bpf_prog *prog,
+			   struct netlink_ext_ack *extack);
 
 	int (*sriov_enable)(struct nfp_app *app, int num_vfs);
 	void (*sriov_disable)(struct nfp_app *app);
@@ -163,6 +171,7 @@ struct nfp_app {
 	void *priv;
 };
 
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 
 static inline int nfp_app_init(struct nfp_app *app)
@@ -226,12 +235,27 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
 }
 
 static inline void
+nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+	if (app->type->repr_preclean)
+		app->type->repr_preclean(app, netdev);
+}
+
+static inline void
 nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
 {
 	if (app->type->repr_clean)
 		app->type->repr_clean(app, netdev);
 }
 
+static inline int
+nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+	if (!app || !app->type->change_mtu)
+		return 0;
+	return app->type->change_mtu(app, netdev, new_mtu);
+}
+
 static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
 {
 	app->ctrl = ctrl;
@@ -293,39 +317,29 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
 	return app->type->setup_tc(app, netdev, type, type_data);
 }
 
-static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
-				      struct bpf_prog *prog)
+static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
+			      struct netdev_bpf *bpf)
 {
-	if (!app || !app->type->xdp_offload)
-		return -EOPNOTSUPP;
-	return app->type->xdp_offload(app, nn, prog);
+	if (!app || !app->type->bpf)
+		return -EINVAL;
+	return app->type->bpf(app, nn, bpf);
 }
 
-static inline int
-nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
-			  struct netdev_bpf *bpf)
+static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+				      struct bpf_prog *prog,
+				      struct netlink_ext_ack *extack)
 {
-	if (!app || !app->type->bpf_verifier_prep)
+	if (!app || !app->type->xdp_offload)
 		return -EOPNOTSUPP;
-	return app->type->bpf_verifier_prep(app, nn, bpf);
+	return app->type->xdp_offload(app, nn, prog, extack);
 }
 
-static inline int
-nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
-		      struct bpf_prog *prog)
+static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
 {
-	if (!app || !app->type->bpf_translate)
-		return -EOPNOTSUPP;
-	return app->type->bpf_translate(app, nn, prog);
-}
+	trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+			    skb->data, skb->len);
 
-static inline int
-nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog)
-{
-	if (!app || !app->type->bpf_destroy)
-		return -EOPNOTSUPP;
-	return app->type->bpf_destroy(app, nn, prog);
+	return __nfp_ctrl_tx(app->ctrl, skb);
 }
 
 static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
@@ -378,6 +392,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
 struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
 
 struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type);
+struct nfp_reprs *
 nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
 		  struct nfp_reprs *reprs);
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 830f6de25f47..3f6952b66a49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -41,6 +41,7 @@
 
 const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
 	[CMD_TGT_WRITE8_SWAP] =		{ 0x02, 0x42 },
+	[CMD_TGT_WRITE32_SWAP] =	{ 0x02, 0x5f },
 	[CMD_TGT_READ8] =		{ 0x01, 0x43 },
 	[CMD_TGT_READ32] =		{ 0x00, 0x5c },
 	[CMD_TGT_READ32_LE] =		{ 0x01, 0x5c },
@@ -49,6 +50,94 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
 	[CMD_TGT_READ_SWAP_LE] =	{ 0x03, 0x40 },
 };
 
+static bool unreg_is_imm(u16 reg)
+{
+	return (reg & UR_REG_IMM) == UR_REG_IMM;
+}
+
+u16 br_get_offset(u64 instr)
+{
+	u16 addr_lo, addr_hi;
+
+	addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
+	addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
+
+	return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
+		addr_lo;
+}
+
+void br_set_offset(u64 *instr, u16 offset)
+{
+	u16 addr_lo, addr_hi;
+
+	addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+	addr_hi = offset != addr_lo;
+	*instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+	*instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+	*instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+void br_add_offset(u64 *instr, u16 offset)
+{
+	u16 addr;
+
+	addr = br_get_offset(*instr);
+	br_set_offset(instr, addr + offset);
+}
+
+static bool immed_can_modify(u64 instr)
+{
+	if (FIELD_GET(OP_IMMED_INV, instr) ||
+	    FIELD_GET(OP_IMMED_SHIFT, instr) ||
+	    FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
+		pr_err("Can't decode/encode immed!\n");
+		return false;
+	}
+	return true;
+}
+
+u16 immed_get_value(u64 instr)
+{
+	u16 reg;
+
+	if (!immed_can_modify(instr))
+		return 0;
+
+	reg = FIELD_GET(OP_IMMED_A_SRC, instr);
+	if (!unreg_is_imm(reg))
+		reg = FIELD_GET(OP_IMMED_B_SRC, instr);
+
+	return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+}
+
+void immed_set_value(u64 *instr, u16 immed)
+{
+	if (!immed_can_modify(*instr))
+		return;
+
+	if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
+		*instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
+		*instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
+	} else {
+		*instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
+		*instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
+	}
+
+	*instr &= ~OP_IMMED_IMM;
+	*instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
+}
+
+void immed_add_value(u64 *instr, u16 offset)
+{
+	u16 val;
+
+	if (!immed_can_modify(*instr))
+		return;
+
+	val = immed_get_value(*instr);
+	immed_set_value(instr, val + offset);
+}
+
 static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
 {
 	bool lm_id, lm_dec = false;
@@ -120,7 +209,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
 	reg->dst = nfp_swreg_to_unreg(dst, true);
 
 	/* Decode source operands */
-	if (swreg_type(lreg) == swreg_type(rreg))
+	if (swreg_type(lreg) == swreg_type(rreg) &&
+	    swreg_type(lreg) != NN_REG_NONE)
 		return -EFAULT;
 
 	if (swreg_type(lreg) == NN_REG_GPR_B ||
@@ -200,7 +290,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
 	reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
 
 	/* Decode source operands */
-	if (swreg_type(lreg) == swreg_type(rreg))
+	if (swreg_type(lreg) == swreg_type(rreg) &&
+	    swreg_type(lreg) != NN_REG_NONE)
 		return -EFAULT;
 
 	if (swreg_type(lreg) == NN_REG_GPR_B ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 74d0c11ab2f9..5f9291db98e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -77,9 +77,11 @@
 enum br_mask {
 	BR_BEQ = 0x00,
 	BR_BNE = 0x01,
+	BR_BMI = 0x02,
 	BR_BHS = 0x04,
 	BR_BLO = 0x05,
 	BR_BGE = 0x08,
+	BR_BLT = 0x09,
 	BR_UNC = 0x18,
 };
 
@@ -92,6 +94,10 @@ enum br_ctx_signal_state {
 	BR_CSS_NONE = 2,
 };
 
+u16 br_get_offset(u64 instr);
+void br_set_offset(u64 *instr, u16 offset);
+void br_add_offset(u64 *instr, u16 offset);
+
 #define OP_BBYTE_BASE		0x0c800000000ULL
 #define OP_BB_A_SRC		0x000000000ffULL
 #define OP_BB_BYTE		0x00000000300ULL
@@ -132,6 +138,10 @@ enum immed_shift {
 	IMMED_SHIFT_2B = 2,
 };
 
+u16 immed_get_value(u64 instr);
+void immed_set_value(u64 *instr, u16 immed);
+void immed_add_value(u64 *instr, u16 offset);
+
 #define OP_SHF_BASE		0x08000000000ULL
 #define OP_SHF_A_SRC		0x000000000ffULL
 #define OP_SHF_SC		0x00000000300ULL
@@ -175,6 +185,7 @@ enum alu_op {
 	ALU_OP_NONE	= 0x00,
 	ALU_OP_ADD	= 0x01,
 	ALU_OP_NOT	= 0x04,
+	ALU_OP_ADD_2B	= 0x05,
 	ALU_OP_AND	= 0x08,
 	ALU_OP_SUB_C	= 0x0d,
 	ALU_OP_ADD_C	= 0x11,
@@ -209,6 +220,7 @@ enum alu_dst_ab {
 #define OP_CMD_CNT		0x0000e000000ULL
 #define OP_CMD_SIG		0x000f0000000ULL
 #define OP_CMD_TGT_CMD		0x07f00000000ULL
+#define OP_CMD_INDIR		0x20000000000ULL
 #define OP_CMD_MODE	       0x1c0000000000ULL
 
 struct cmd_tgt_act {
@@ -219,6 +231,7 @@ struct cmd_tgt_act {
 enum cmd_tgt_map {
 	CMD_TGT_READ8,
 	CMD_TGT_WRITE8_SWAP,
+	CMD_TGT_WRITE32_SWAP,
 	CMD_TGT_READ32,
 	CMD_TGT_READ32_LE,
 	CMD_TGT_READ32_SWAP,
@@ -240,6 +253,9 @@ enum cmd_ctx_swap {
 	CMD_CTX_NO_SWAP = 3,
 };
 
+#define CMD_OVE_LEN	BIT(7)
+#define CMD_OV_LEN	GENMASK(12, 8)
+
 #define OP_LCSR_BASE		0x0fc00000000ULL
 #define OP_LCSR_A_SRC		0x000000003ffULL
 #define OP_LCSR_B_SRC		0x000000ffc00ULL
@@ -257,6 +273,7 @@ enum lcsr_wr_src {
 #define OP_CARB_BASE		0x0e000000000ULL
 #define OP_CARB_OR		0x00000010000ULL
 
+#define NFP_CSR_CTX_PTR		0x20
 #define NFP_CSR_ACT_LM_ADDR0	0x64
 #define NFP_CSR_ACT_LM_ADDR1	0x6c
 #define NFP_CSR_ACT_LM_ADDR2	0x94
@@ -377,4 +394,13 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
 int nfp_ustore_check_valid_no_ecc(u64 insn);
 u64 nfp_ustore_calc_ecc_insn(u64 insn);
 
+#define NFP_IND_ME_REFL_WR_SIG_INIT	3
+#define NFP_IND_ME_CTX_PTR_BASE_MASK	GENMASK(9, 0)
+#define NFP_IND_NUM_CONTEXTS		8
+
+static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
+{
+	return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
+}
+
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 6c9f29c2e975..eb0fc614673d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -152,18 +152,8 @@ out:
 static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 {
 	struct nfp_pf *pf = devlink_priv(devlink);
-	int ret;
-
-	mutex_lock(&pf->lock);
-	if (!pf->app) {
-		ret = -EBUSY;
-		goto out;
-	}
-	ret = nfp_app_eswitch_mode_get(pf->app, mode);
-out:
-	mutex_unlock(&pf->lock);
 
-	return ret;
+	return nfp_app_eswitch_mode_get(pf->app, mode);
 }
 
 const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 35eaccbece36..cc570bb6563c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -45,6 +45,7 @@
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <linux/vermagic.h>
+#include <linux/vmalloc.h>
 #include <net/devlink.h>
 
 #include "nfpcore/nfp.h"
@@ -498,17 +499,16 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_hwinfo_free;
 
-	err = devlink_register(devlink, &pdev->dev);
-	if (err)
-		goto err_hwinfo_free;
-
 	err = nfp_nsp_init(pdev, pf);
 	if (err)
-		goto err_devlink_unreg;
+		goto err_hwinfo_free;
 
 	pf->mip = nfp_mip_open(pf->cpp);
 	pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
 
+	pf->dump_flag = NFP_DUMP_NSP_DIAG;
+	pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
+
 	err = nfp_pcie_sriov_read_nfd_limit(pf);
 	if (err)
 		goto err_fw_unload;
@@ -518,6 +518,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 		dev_err(&pdev->dev,
 			"Error: %d VFs already enabled, but loaded FW can only support %d\n",
 			pf->num_vfs, pf->limit_vfs);
+		err = -EINVAL;
 		goto err_fw_unload;
 	}
 
@@ -544,8 +545,7 @@ err_fw_unload:
 		nfp_fw_unload(pf);
 	kfree(pf->eth_tbl);
 	kfree(pf->nspi);
-err_devlink_unreg:
-	devlink_unregister(devlink);
+	vfree(pf->dumpspec);
 err_hwinfo_free:
 	kfree(pf->hwinfo);
 	nfp_cpp_free(pf->cpp);
@@ -566,19 +566,15 @@ err_pci_disable:
 static void nfp_pci_remove(struct pci_dev *pdev)
 {
 	struct nfp_pf *pf = pci_get_drvdata(pdev);
-	struct devlink *devlink;
 
 	nfp_hwmon_unregister(pf);
 
-	devlink = priv_to_devlink(pf);
-
-	nfp_net_pci_remove(pf);
-
 	nfp_pcie_sriov_disable(pdev);
 	pci_sriov_set_totalvfs(pf->pdev, 0);
 
-	devlink_unregister(devlink);
+	nfp_net_pci_remove(pf);
 
+	vfree(pf->dumpspec);
 	kfree(pf->rtbl);
 	nfp_mip_close(pf->mip);
 	if (pf->fw_loaded)
@@ -592,7 +588,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
 	kfree(pf->eth_tbl);
 	kfree(pf->nspi);
 	mutex_destroy(&pf->lock);
-	devlink_free(devlink);
+	devlink_free(priv_to_devlink(pf));
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 }
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index be0ee59f2eb9..add46e28212b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -39,6 +39,7 @@
 #ifndef NFP_MAIN_H
 #define NFP_MAIN_H
 
+#include <linux/ethtool.h>
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/msi.h>
@@ -62,6 +63,17 @@ struct nfp_port;
 struct nfp_rtsym_table;
 
 /**
+ * struct nfp_dumpspec - NFP FW dump specification structure
+ * @size:	Size of the data
+ * @data:	Sequence of TLVs, each being an instruction to dump some data
+ *		from FW
+ */
+struct nfp_dumpspec {
+	u32 size;
+	u8 data[0];
+};
+
+/**
  * struct nfp_pf - NFP PF-specific device structure
  * @pdev:		Backpointer to PCI device
  * @cpp:		Pointer to the CPP handle
@@ -83,6 +95,9 @@ struct nfp_rtsym_table;
  * @mip:		MIP handle
  * @rtbl:		RTsym table
  * @hwinfo:		HWInfo table
+ * @dumpspec:		Debug dump specification
+ * @dump_flag:		Store dump flag between set_dump and get_dump_flag
+ * @dump_len:		Store dump length between set_dump and get_dump_flag
  * @eth_tbl:		NSP ETH table
  * @nspi:		NSP identification info
  * @hwmon_dev:		pointer to hwmon device
@@ -124,6 +139,9 @@ struct nfp_pf {
 	const struct nfp_mip *mip;
 	struct nfp_rtsym_table *rtbl;
 	struct nfp_hwinfo *hwinfo;
+	struct nfp_dumpspec *dumpspec;
+	u32 dump_flag;
+	u32 dump_len;
 	struct nfp_eth_table *eth_tbl;
 	struct nfp_nsp_identify *nspi;
 
@@ -157,4 +175,15 @@ void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
 
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 
+enum nfp_dump_diag {
+	NFP_DUMP_NSP_DIAG = 0,
+};
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl);
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				u32 flag);
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				 struct ethtool_dump *dump_param, void *dest);
+
 #endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 7f9857c276b1..d88eda9707e6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -47,6 +47,7 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
+#include <net/xdp.h>
 
 #include "nfp_net_ctrl.h"
 
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
  * @rxds:       Virtual address of FL/RX ring in host memory
  * @dma:        DMA address of the FL/RX ring
  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
+ * @xdp_rxq:    RX-ring info avail for XDP
  */
 struct nfp_net_rx_ring {
 	struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
 	u32 idx;
 
 	int fl_qcidx;
+	unsigned int size;
 	u8 __iomem *qcp_fl;
 
 	struct nfp_net_rx_buf *rxbufs;
 	struct nfp_net_rx_desc *rxds;
 
 	dma_addr_t dma;
-	unsigned int size;
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_aligned;
 
 /**
@@ -548,6 +551,8 @@ struct nfp_net_dp {
  * @max_r_vecs:		Number of allocated interrupt vectors for RX/TX
  * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
  * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
+ * @stride_rx:		Queue controller RX queue spacing
+ * @stride_tx:		Queue controller TX queue spacing
  * @r_vecs:             Pre-allocated array of ring vectors
  * @irq_entries:        Pre-allocated array of MSI-X entries
  * @lsc_handler:        Handler for Link State Change interrupt
@@ -573,6 +578,7 @@ struct nfp_net_dp {
  * @qcp_cfg:            Pointer to QCP queue used for configuration notification
  * @tx_bar:             Pointer to mapped TX queues
  * @rx_bar:             Pointer to mapped FL/RX queues
+ * @tlv_caps:		Parsed TLV capabilities
  * @debugfs_dir:	Device directory in debugfs
  * @vnic_list:		Entry on device vNIC list
  * @pdev:		Backpointer to PCI device
@@ -639,6 +645,8 @@ struct nfp_net {
 	u8 __iomem *tx_bar;
 	u8 __iomem *rx_bar;
 
+	struct nfp_net_tlv_caps tlv_caps;
+
 	struct dentry *debugfs_dir;
 
 	struct list_head vnic_list;
@@ -834,6 +842,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
 	return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
 }
 
+static inline void nfp_ctrl_lock(struct nfp_net *nn)
+	__acquires(&nn->r_vecs[0].lock)
+{
+	spin_lock_bh(&nn->r_vecs[0].lock);
+}
+
+static inline void nfp_ctrl_unlock(struct nfp_net *nn)
+	__releases(&nn->r_vecs[0].lock)
+{
+	spin_unlock_bh(&nn->r_vecs[0].lock);
+}
+
 /* Globals */
 extern const char nfp_driver_version[];
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 99b0487b6d82..c0fd351c86b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -293,9 +293,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
  */
 static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
 {
+	u32 mbox = nn->tlv_caps.mbox_off;
 	int ret;
 
-	nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+	if (!nfp_net_has_mbox(&nn->tlv_caps)) {
+		nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
+		return -EIO;
+	}
+
+	nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
 
 	ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
 	if (ret) {
@@ -303,7 +309,7 @@ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
 		return ret;
 	}
 
-	return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+	return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
 }
 
 /* Interrupt configuration and handling
@@ -1610,11 +1616,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 	unsigned int true_bufsz;
 	struct sk_buff *skb;
 	int pkts_polled = 0;
+	struct xdp_buff xdp;
 	int idx;
 
 	rcu_read_lock();
 	xdp_prog = READ_ONCE(dp->xdp_prog);
 	true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+	xdp.rxq = &rx_ring->xdp_rxq;
 	tx_ring = r_vec->xdp_ring;
 
 	while (pkts_polled < budget) {
@@ -1705,7 +1713,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 				  dp->bpf_offload_xdp) && !meta.portid) {
 			void *orig_data = rxbuf->frag + pkt_off;
 			unsigned int dma_off;
-			struct xdp_buff xdp;
 			int act;
 
 			xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -1919,6 +1926,13 @@ err_free:
 	return false;
 }
 
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+	return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+}
+
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
 {
 	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
@@ -2254,6 +2268,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
 
+	if (dp->netdev)
+		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 	kfree(rx_ring->rxbufs);
 
 	if (rx_ring->rxds)
@@ -2277,7 +2293,14 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 static int
 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
 {
-	int sz;
+	int sz, err;
+
+	if (dp->netdev) {
+		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+				       rx_ring->idx);
+		if (err < 0)
+			return err;
+	}
 
 	rx_ring->cnt = dp->rxd_cnt;
 	rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2441,7 +2464,7 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
 	 * count.
 	 */
-	factor = nn->me_freq_mhz / 16;
+	factor = nn->tlv_caps.me_freq_mhz / 16;
 
 	/* copy RX interrupt coalesce parameters */
 	value = (nn->rx_coalesce_max_frames << 16) |
@@ -2852,6 +2875,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
 
 	new_ctrl = nn->dp.ctrl;
 
+	if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
+		new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
+	else
+		new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+
 	if (netdev->flags & IFF_PROMISC) {
 		if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
 			new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -3036,6 +3064,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct nfp_net *nn = netdev_priv(netdev);
 	struct nfp_net_dp *dp;
+	int err;
+
+	err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
+	if (err)
+		return err;
 
 	dp = nfp_net_clone_dp(nn);
 	if (!dp)
@@ -3057,8 +3090,9 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 	if (!vid)
 		return 0;
 
-	nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
-	nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+		  ETH_P_8021Q);
 
 	return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
 }
@@ -3074,8 +3108,9 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
 	if (!vid)
 		return 0;
 
-	nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
-	nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+		  ETH_P_8021Q);
 
 	return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
 }
@@ -3368,7 +3403,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
 	if (err)
 		return err;
 
-	err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
+	err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
 	if (err && flags & XDP_FLAGS_HW_MODE)
 		return err;
 
@@ -3394,17 +3429,10 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
 		if (nn->dp.bpf_offload_xdp)
 			xdp->prog_attached = XDP_ATTACHED_HW;
 		xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
+		xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
 		return 0;
-	case BPF_OFFLOAD_VERIFIER_PREP:
-		return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
-	case BPF_OFFLOAD_TRANSLATE:
-		return nfp_app_bpf_translate(nn->app, nn,
-					     xdp->offload.prog);
-	case BPF_OFFLOAD_DESTROY:
-		return nfp_app_bpf_destroy(nn->app, nn,
-					   xdp->offload.prog);
 	default:
-		return -EINVAL;
+		return nfp_app_bpf(nn->app, nn, xdp);
 	}
 }
 
@@ -3563,9 +3591,6 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
  */
 void nfp_net_free(struct nfp_net *nn)
 {
-	if (nn->xdp_prog)
-		bpf_prog_put(nn->xdp_prog);
-
 	if (nn->dp.netdev)
 		free_netdev(nn->dp.netdev);
 	else
@@ -3731,18 +3756,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
 	nfp_net_set_ethtool_ops(netdev);
 }
 
-/**
- * nfp_net_init() - Initialise/finalise the nfp_net structure
- * @nn:		NFP Net device structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_init(struct nfp_net *nn)
+static int nfp_net_read_caps(struct nfp_net *nn)
 {
-	int err;
-
-	nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
 	/* Get some of the read-only fields from the BAR */
 	nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
 	nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3775,6 +3790,29 @@ int nfp_net_init(struct nfp_net *nn)
 		nn->dp.rx_offset = NFP_NET_RX_OFFSET;
 	}
 
+	/* For control vNICs mask out the capabilities app doesn't want. */
+	if (!nn->dp.netdev)
+		nn->cap &= nn->app->type->ctrl_cap_mask;
+
+	return 0;
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn:		NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+	int err;
+
+	nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+	err = nfp_net_read_caps(nn);
+	if (err)
+		return err;
+
 	/* Set default MTU and Freelist buffer size */
 	if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
 		nn->dp.mtu = nn->max_mtu;
@@ -3791,8 +3829,6 @@ int nfp_net_init(struct nfp_net *nn)
 	/* Allow L2 Broadcast and Multicast through by default, if supported */
 	if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
-	if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
-		nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* Allow IRQ moderation, if supported */
 	if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
@@ -3800,6 +3836,11 @@ int nfp_net_init(struct nfp_net *nn)
 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
 	}
 
+	err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+				     &nn->tlv_caps);
+	if (err)
+		return err;
+
 	if (nn->dp.netdev)
 		nfp_net_netdev_init(nn);
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
new file mode 100644
index 000000000000..ffb402746ad4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
+{
+	memset(caps, 0, sizeof(*caps));
+	caps->me_freq_mhz = 1200;
+	caps->mbox_off = NFP_NET_CFG_MBOX_BASE;
+	caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
+}
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+			   struct nfp_net_tlv_caps *caps)
+{
+	u8 __iomem *data = ctrl_mem + NFP_NET_CFG_TLV_BASE;
+	u8 __iomem *end = ctrl_mem + NFP_NET_CFG_BAR_SZ;
+	u32 hdr;
+
+	nfp_net_tlv_caps_reset(caps);
+
+	hdr = readl(data);
+	if (!hdr)
+		return 0;
+
+	while (true) {
+		unsigned int length, offset;
+		u32 hdr = readl(data);
+
+		length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
+		offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE;
+
+		/* Advance past the header */
+		data += 4;
+
+		if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
+			dev_err(dev, "TLV size not multiple of %u len:%u\n",
+				NFP_NET_CFG_TLV_LENGTH_INC, length);
+			return -EINVAL;
+		}
+		if (data + length > end) {
+			dev_err(dev, "oversized TLV offset:%u len:%u\n",
+				offset, length);
+			return -EINVAL;
+		}
+
+		switch (FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr)) {
+		case NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+			dev_err(dev, "NULL TLV at offset:%u\n", offset);
+			return -EINVAL;
+		case NFP_NET_CFG_TLV_TYPE_RESERVED:
+			break;
+		case NFP_NET_CFG_TLV_TYPE_END:
+			if (!length)
+				return 0;
+
+			dev_err(dev, "END TLV should be empty, has len:%d\n",
+				length);
+			return -EINVAL;
+		case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+			if (length != 4) {
+				dev_err(dev,
+					"ME FREQ TLV should be 4B, is %dB\n",
+					length);
+				return -EINVAL;
+			}
+
+			caps->me_freq_mhz = readl(data);
+			break;
+		case NFP_NET_CFG_TLV_TYPE_MBOX:
+			if (!length) {
+				caps->mbox_off = 0;
+				caps->mbox_len = 0;
+			} else {
+				caps->mbox_off = data - ctrl_mem;
+				caps->mbox_len = length;
+			}
+			break;
+		default:
+			if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
+				break;
+
+			dev_err(dev, "unknown TLV type:%u offset:%u len:%u\n",
+				FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+				offset, length);
+			return -EINVAL;
+		}
+
+		data += length;
+		if (data + 4 > end) {
+			dev_err(dev, "reached end of BAR without END TLV\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Not reached */
+	return -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452e0fc2..eeecef2caac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -43,9 +43,7 @@
 #ifndef _NFP_NET_CTRL_H_
 #define _NFP_NET_CTRL_H_
 
-/* IMPORTANT: This header file is shared with the FW,
- *	      no OS specific constructs, please!
- */
+#include <linux/types.h>
 
 /**
  * Configuration BAR size.
@@ -91,23 +89,24 @@
 #define NFP_NET_RSS_IPV6_EX_UDP         9
 
 /**
- * @NFP_NET_TXR_MAX:         Maximum number of TX rings
- * @NFP_NET_RXR_MAX:         Maximum number of RX rings
+ * Ring counts
+ * %NFP_NET_TXR_MAX:         Maximum number of TX rings
+ * %NFP_NET_RXR_MAX:         Maximum number of RX rings
  */
 #define NFP_NET_TXR_MAX                 64
 #define NFP_NET_RXR_MAX                 64
 
 /**
  * Read/Write config words (0x0000 - 0x002c)
- * @NFP_NET_CFG_CTRL:        Global control
- * @NFP_NET_CFG_UPDATE:      Indicate which fields are updated
- * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
- * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * @NFP_NET_CFG_MTU:         Set MTU size
- * @NFP_NET_CFG_FLBUFSZ:     Set freelist buffer size (must be larger than MTU)
- * @NFP_NET_CFG_EXN:         MSI-X table entry for exceptions
- * @NFP_NET_CFG_LSC:         MSI-X table entry for link state changes
- * @NFP_NET_CFG_MACADDR:     MAC address
+ * %NFP_NET_CFG_CTRL:        Global control
+ * %NFP_NET_CFG_UPDATE:      Indicate which fields are updated
+ * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * %NFP_NET_CFG_MTU:         Set MTU size
+ * %NFP_NET_CFG_FLBUFSZ:     Set freelist buffer size (must be larger than MTU)
+ * %NFP_NET_CFG_EXN:         MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC:         MSI-X table entry for link state changes
+ * %NFP_NET_CFG_MACADDR:     MAC address
  *
  * TODO:
  * - define Error details in UPDATE
@@ -176,14 +175,14 @@
 
 /**
  * Read-only words (0x0030 - 0x0050):
- * @NFP_NET_CFG_VERSION:     Firmware version number
- * @NFP_NET_CFG_STS:         Status
- * @NFP_NET_CFG_CAP:         Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_CFG_MAX_MTU:     Maximum support MTU
- * @NFP_NET_CFG_START_TXQ:   Start Queue Control Queue to use for TX (PF only)
- * @NFP_NET_CFG_START_RXQ:   Start Queue Control Queue to use for RX (PF only)
+ * %NFP_NET_CFG_VERSION:     Firmware version number
+ * %NFP_NET_CFG_STS:         Status
+ * %NFP_NET_CFG_CAP:         Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * %NFP_NET_CFG_MAX_MTU:     Maximum support MTU
+ * %NFP_NET_CFG_START_TXQ:   Start Queue Control Queue to use for TX (PF only)
+ * %NFP_NET_CFG_START_RXQ:   Start Queue Control Queue to use for RX (PF only)
  *
  * TODO:
  * - define more STS bits
@@ -228,31 +227,37 @@
 
 /**
  * RSS capabilities
- * @NFP_NET_CFG_RSS_CAP_HFUNC:	supported hash functions (same bits as
- *				@NFP_NET_CFG_RSS_HFUNC)
+ * %NFP_NET_CFG_RSS_CAP_HFUNC:	supported hash functions (same bits as
+ *				%NFP_NET_CFG_RSS_HFUNC)
  */
 #define NFP_NET_CFG_RSS_CAP		0x0054
 #define   NFP_NET_CFG_RSS_CAP_HFUNC	  0xff000000
 
 /**
+ * TLV area start
+ * %NFP_NET_CFG_TLV_BASE:	start anchor of the TLV area
+ */
+#define NFP_NET_CFG_TLV_BASE		0x0058
+
+/**
  * VXLAN/UDP encap configuration
- * @NFP_NET_CFG_VXLAN_PORT:	Base address of table of tunnels' UDP dst ports
- * @NFP_NET_CFG_VXLAN_SZ:	Size of the UDP port table in bytes
+ * %NFP_NET_CFG_VXLAN_PORT:	Base address of table of tunnels' UDP dst ports
+ * %NFP_NET_CFG_VXLAN_SZ:	Size of the UDP port table in bytes
  */
 #define NFP_NET_CFG_VXLAN_PORT		0x0060
 #define NFP_NET_CFG_VXLAN_SZ		  0x0008
 
 /**
  * BPF section
- * @NFP_NET_CFG_BPF_ABI:	BPF ABI version
- * @NFP_NET_CFG_BPF_CAP:	BPF capabilities
- * @NFP_NET_CFG_BPF_MAX_LEN:	Maximum size of JITed BPF code in bytes
- * @NFP_NET_CFG_BPF_START:	Offset at which BPF will be loaded
- * @NFP_NET_CFG_BPF_DONE:	Offset to jump to on exit
- * @NFP_NET_CFG_BPF_STACK_SZ:	Total size of stack area in 64B chunks
- * @NFP_NET_CFG_BPF_INL_MTU:	Packet data split offset in 64B chunks
- * @NFP_NET_CFG_BPF_SIZE:	Size of the JITed BPF code in instructions
- * @NFP_NET_CFG_BPF_ADDR:	DMA address of the buffer with JITed BPF code
+ * %NFP_NET_CFG_BPF_ABI:	BPF ABI version
+ * %NFP_NET_CFG_BPF_CAP:	BPF capabilities
+ * %NFP_NET_CFG_BPF_MAX_LEN:	Maximum size of JITed BPF code in bytes
+ * %NFP_NET_CFG_BPF_START:	Offset at which BPF will be loaded
+ * %NFP_NET_CFG_BPF_DONE:	Offset to jump to on exit
+ * %NFP_NET_CFG_BPF_STACK_SZ:	Total size of stack area in 64B chunks
+ * %NFP_NET_CFG_BPF_INL_MTU:	Packet data split offset in 64B chunks
+ * %NFP_NET_CFG_BPF_SIZE:	Size of the JITed BPF code in instructions
+ * %NFP_NET_CFG_BPF_ADDR:	DMA address of the buffer with JITed BPF code
  */
 #define NFP_NET_CFG_BPF_ABI		0x0080
 #define   NFP_NET_BPF_ABI		2
@@ -278,9 +283,9 @@
 /**
  * RSS configuration (0x0100 - 0x01ac):
  * Used only when NFP_NET_CFG_CTRL_RSS is enabled
- * @NFP_NET_CFG_RSS_CFG:     RSS configuration word
- * @NFP_NET_CFG_RSS_KEY:     RSS "secret" key
- * @NFP_NET_CFG_RSS_ITBL:    RSS indirection table
+ * %NFP_NET_CFG_RSS_CFG:     RSS configuration word
+ * %NFP_NET_CFG_RSS_KEY:     RSS "secret" key
+ * %NFP_NET_CFG_RSS_ITBL:    RSS indirection table
  */
 #define NFP_NET_CFG_RSS_BASE            0x0100
 #define NFP_NET_CFG_RSS_CTRL            NFP_NET_CFG_RSS_BASE
@@ -305,13 +310,13 @@
 
 /**
  * TX ring configuration (0x200 - 0x800)
- * @NFP_NET_CFG_TXR_BASE:    Base offset for TX ring configuration
- * @NFP_NET_CFG_TXR_ADDR:    Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ:      Per TX ring ring size (1B entries)
- * @NFP_NET_CFG_TXR_VEC:     Per TX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_TXR_PRIO:    Per TX ring priority (1B entries)
- * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
+ * %NFP_NET_CFG_TXR_BASE:    Base offset for TX ring configuration
+ * %NFP_NET_CFG_TXR_ADDR:    Per TX ring DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_SZ:      Per TX ring ring size (1B entries)
+ * %NFP_NET_CFG_TXR_VEC:     Per TX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_TXR_PRIO:    Per TX ring priority (1B entries)
+ * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
  */
 #define NFP_NET_CFG_TXR_BASE            0x0200
 #define NFP_NET_CFG_TXR_ADDR(_x)        (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +330,12 @@
 
 /**
  * RX ring configuration (0x0800 - 0x0c00)
- * @NFP_NET_CFG_RXR_BASE:    Base offset for RX ring configuration
- * @NFP_NET_CFG_RXR_ADDR:    Per RX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ:      Per RX ring ring size (1B entries)
- * @NFP_NET_CFG_RXR_VEC:     Per RX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_RXR_PRIO:    Per RX ring priority (1B entries)
- * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
+ * %NFP_NET_CFG_RXR_BASE:    Base offset for RX ring configuration
+ * %NFP_NET_CFG_RXR_ADDR:    Per RX ring DMA address (8B entries)
+ * %NFP_NET_CFG_RXR_SZ:      Per RX ring ring size (1B entries)
+ * %NFP_NET_CFG_RXR_VEC:     Per RX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_RXR_PRIO:    Per RX ring priority (1B entries)
+ * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
  */
 #define NFP_NET_CFG_RXR_BASE            0x0800
 #define NFP_NET_CFG_RXR_ADDR(_x)        (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +348,7 @@
 /**
  * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
  * These registers are only used when MSI-X auto-masking is not
- * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set).  The array is index
+ * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set).  The array is index
  * by MSI-X entry and are 1B in size.  If an entry is zero, the
  * corresponding entry is enabled.  If the FW generates an interrupt,
  * it writes a cause into the corresponding field.  This also masks
@@ -393,8 +398,8 @@
 /**
  * Per ring stats (0x1000 - 0x1800)
  * options, 64bit per entry
- * @NFP_NET_CFG_TXR_STATS:   TX ring statistics (Packet and Byte count)
- * @NFP_NET_CFG_RXR_STATS:   RX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_TXR_STATS:   TX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_RXR_STATS:   RX ring statistics (Packet and Byte count)
  */
 #define NFP_NET_CFG_TXR_STATS_BASE      0x1000
 #define NFP_NET_CFG_TXR_STATS(_x)       (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -408,24 +413,105 @@
  * 4B used for update command and 4B return code
  * followed by a max of 504B of variable length value
  */
-#define NFP_NET_CFG_MBOX_CMD		0x1800
-#define NFP_NET_CFG_MBOX_RET		0x1804
-#define NFP_NET_CFG_MBOX_VAL		0x1808
+#define NFP_NET_CFG_MBOX_BASE		0x1800
 #define NFP_NET_CFG_MBOX_VAL_MAX_SZ	0x1F8
 
+#define NFP_NET_CFG_MBOX_SIMPLE_CMD	0x0
+#define NFP_NET_CFG_MBOX_SIMPLE_RET	0x4
+#define NFP_NET_CFG_MBOX_SIMPLE_VAL	0x8
+#define NFP_NET_CFG_MBOX_SIMPLE_LEN	0x12
+
 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
 
 /**
  * VLAN filtering using general use mailbox
- * @NFP_NET_CFG_VLAN_FILTER:		Base address of VLAN filter mailbox
- * @NFP_NET_CFG_VLAN_FILTER_VID:	VLAN ID to filter
- * @NFP_NET_CFG_VLAN_FILTER_PROTO:	VLAN proto to filter
- * @NFP_NET_CFG_VXLAN_SZ:		Size of the VLAN filter mailbox in bytes
+ * %NFP_NET_CFG_VLAN_FILTER:		Base address of VLAN filter mailbox
+ * %NFP_NET_CFG_VLAN_FILTER_VID:	VLAN ID to filter
+ * %NFP_NET_CFG_VLAN_FILTER_PROTO:	VLAN proto to filter
+ * %NFP_NET_CFG_VXLAN_SZ:		Size of the VLAN filter mailbox in bytes
  */
-#define NFP_NET_CFG_VLAN_FILTER		NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER		NFP_NET_CFG_MBOX_SIMPLE_VAL
 #define  NFP_NET_CFG_VLAN_FILTER_VID	NFP_NET_CFG_VLAN_FILTER
 #define  NFP_NET_CFG_VLAN_FILTER_PROTO	 (NFP_NET_CFG_VLAN_FILTER + 2)
 #define NFP_NET_CFG_VLAN_FILTER_SZ	 0x0004
 
+/**
+ * TLV capabilities
+ * %NFP_NET_CFG_TLV_TYPE:	Offset of type within the TLV
+ * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
+ * %NFP_NET_CFG_TLV_LENGTH:	Offset of length within the TLV
+ * %NFP_NET_CFG_TLV_LENGTH_INC:	TLV length increments
+ * %NFP_NET_CFG_TLV_VALUE:	Offset of value with the TLV
+ *
+ * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE.
+ * Last structure must be of type %NFP_NET_CFG_TLV_TYPE_END.  Presence of TLVs
+ * is indicated by %NFP_NET_CFG_TLV_BASE being non-zero.  TLV structures may
+ * fill the entire remainder of the BAR or be shorter.  FW must make sure TLVs
+ * don't conflict with other features which allocate space beyond
+ * %NFP_NET_CFG_TLV_BASE.  %NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap
+ * space used by such features.
+ * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH.
+ */
+#define NFP_NET_CFG_TLV_TYPE		0x00
+#define   NFP_NET_CFG_TLV_TYPE_REQUIRED	  0x8000
+#define NFP_NET_CFG_TLV_LENGTH		0x02
+#define   NFP_NET_CFG_TLV_LENGTH_INC	  4
+#define NFP_NET_CFG_TLV_VALUE		0x04
+
+#define NFP_NET_CFG_TLV_HEADER_REQUIRED	0x80000000
+#define NFP_NET_CFG_TLV_HEADER_TYPE	0x7fff0000
+#define NFP_NET_CFG_TLV_HEADER_LENGTH	0x0000ffff
+
+/**
+ * Capability TLV types
+ *
+ * %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ * Special TLV type to catch bugs, should never be encountered.  Drivers should
+ * treat encountering this type as error and refuse to probe.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_RESERVED:
+ * Reserved space, may contain legacy fixed-offset fields, or be used for
+ * padding.  The use of this type should be otherwise avoided.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_END:
+ * Empty, end of TLV list.  Must be the last TLV.  Drivers will stop processing
+ * further TLVs when encountered.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ * Single word, ME frequency in MHz as used in calculation for
+ * %NFP_NET_CFG_RXR_IRQ_MOD and %NFP_NET_CFG_TXR_IRQ_MOD.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX:
+ * Variable, mailbox area.  Overwrites the default location which is
+ * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ */
+#define NFP_NET_CFG_TLV_TYPE_UNKNOWN		0
+#define NFP_NET_CFG_TLV_TYPE_RESERVED		1
+#define NFP_NET_CFG_TLV_TYPE_END		2
+#define NFP_NET_CFG_TLV_TYPE_ME_FREQ		3
+#define NFP_NET_CFG_TLV_TYPE_MBOX		4
+
+struct device;
+
+/**
+ * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+ * @me_freq_mhz:	ME clock_freq (MHz)
+ * @mbox_off:		vNIC mailbox area offset
+ * @mbox_len:		vNIC mailbox area length
+ */
+struct nfp_net_tlv_caps {
+	u32 me_freq_mhz;
+	unsigned int mbox_off;
+	unsigned int mbox_len;
+};
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+			   struct nfp_net_tlv_caps *caps);
+
+static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
+{
+	return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
+}
+
 #endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
new file mode 100644
index 000000000000..bb8ed460086e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "nfp_asm.h"
+#include "nfp_main.h"
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp6000/nfp6000.h"
+
+#define NFP_DUMP_SPEC_RTSYM	"_abi_dump_spec"
+
+#define ALIGN8(x)	ALIGN(x, 8)
+
+enum nfp_dumpspec_type {
+	NFP_DUMPSPEC_TYPE_CPP_CSR = 0,
+	NFP_DUMPSPEC_TYPE_XPB_CSR = 1,
+	NFP_DUMPSPEC_TYPE_ME_CSR = 2,
+	NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR = 3,
+	NFP_DUMPSPEC_TYPE_RTSYM = 4,
+	NFP_DUMPSPEC_TYPE_HWINFO = 5,
+	NFP_DUMPSPEC_TYPE_FWNAME = 6,
+	NFP_DUMPSPEC_TYPE_HWINFO_FIELD = 7,
+	NFP_DUMPSPEC_TYPE_PROLOG = 10000,
+	NFP_DUMPSPEC_TYPE_ERROR = 10001,
+};
+
+/* The following structs must be carefully aligned so that they can be used to
+ * interpret the binary dumpspec and populate the dump data in a deterministic
+ * way.
+ */
+
+/* generic type plus length */
+struct nfp_dump_tl {
+	__be32 type;
+	__be32 length;	/* chunk length to follow, aligned to 8 bytes */
+	char data[0];
+};
+
+/* NFP CPP parameters */
+struct nfp_dumpspec_cpp_isl_id {
+	u8 target;
+	u8 action;
+	u8 token;
+	u8 island;
+};
+
+struct nfp_dump_common_cpp {
+	struct nfp_dumpspec_cpp_isl_id cpp_id;
+	__be32 offset;		/* address to start dump */
+	__be32 dump_length;	/* total bytes to dump, aligned to reg size */
+};
+
+/* CSR dumpables */
+struct nfp_dumpspec_csr {
+	struct nfp_dump_tl tl;
+	struct nfp_dump_common_cpp cpp;
+	__be32 register_width;	/* in bits */
+};
+
+struct nfp_dumpspec_rtsym {
+	struct nfp_dump_tl tl;
+	char rtsym[0];
+};
+
+/* header for register dumpable */
+struct nfp_dump_csr {
+	struct nfp_dump_tl tl;
+	struct nfp_dump_common_cpp cpp;
+	__be32 register_width;	/* in bits */
+	__be32 error;		/* error code encountered while reading */
+	__be32 error_offset;	/* offset being read when error occurred */
+};
+
+struct nfp_dump_rtsym {
+	struct nfp_dump_tl tl;
+	struct nfp_dump_common_cpp cpp;
+	__be32 error;		/* error code encountered while reading */
+	u8 padded_name_length;	/* pad so data starts at 8 byte boundary */
+	char rtsym[0];
+	/* after padded_name_length, there is dump_length data */
+};
+
+struct nfp_dump_prolog {
+	struct nfp_dump_tl tl;
+	__be32 dump_level;
+};
+
+struct nfp_dump_error {
+	struct nfp_dump_tl tl;
+	__be32 error;
+	char padding[4];
+	char spec[0];
+};
+
+/* to track state through debug size calculation TLV traversal */
+struct nfp_level_size {
+	__be32 requested_level;	/* input */
+	u32 total_size;		/* output */
+};
+
+/* to track state during debug dump creation TLV traversal */
+struct nfp_dump_state {
+	__be32 requested_level;	/* input param */
+	u32 dumped_size;	/* adds up to size of dumped data */
+	u32 buf_size;		/* size of buffer pointer to by p */
+	void *p;		/* current point in dump buffer */
+};
+
+typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl,
+			     void *param);
+
+static int
+nfp_traverse_tlvs(struct nfp_pf *pf, void *data, u32 data_length, void *param,
+		  nfp_tlv_visit tlv_visit)
+{
+	long long remaining = data_length;
+	struct nfp_dump_tl *tl;
+	u32 total_tlv_size;
+	void *p = data;
+	int err;
+
+	while (remaining >= sizeof(*tl)) {
+		tl = p;
+		if (!tl->type && !tl->length)
+			break;
+
+		if (be32_to_cpu(tl->length) > remaining - sizeof(*tl))
+			return -EINVAL;
+
+		total_tlv_size = sizeof(*tl) + be32_to_cpu(tl->length);
+
+		/* Spec TLVs should be aligned to 4 bytes. */
+		if (total_tlv_size % 4 != 0)
+			return -EINVAL;
+
+		p += total_tlv_size;
+		remaining -= total_tlv_size;
+		err = tlv_visit(pf, tl, param);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static u32 nfp_get_numeric_cpp_id(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+	return NFP_CPP_ISLAND_ID(cpp_id->target, cpp_id->action, cpp_id->token,
+				 cpp_id->island);
+}
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl)
+{
+	const struct nfp_rtsym *specsym;
+	struct nfp_dumpspec *dumpspec;
+	int bytes_read;
+	u32 cpp_id;
+
+	specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM);
+	if (!specsym)
+		return NULL;
+
+	/* expected size of this buffer is in the order of tens of kilobytes */
+	dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size);
+	if (!dumpspec)
+		return NULL;
+
+	dumpspec->size = specsym->size;
+
+	cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0,
+				   specsym->domain);
+
+	bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data,
+				  specsym->size);
+	if (bytes_read != specsym->size) {
+		vfree(dumpspec);
+		nfp_warn(cpp, "Debug dump specification read failed.\n");
+		return NULL;
+	}
+
+	return dumpspec;
+}
+
+static int nfp_dump_error_tlv_size(struct nfp_dump_tl *spec)
+{
+	return ALIGN8(sizeof(struct nfp_dump_error) + sizeof(*spec) +
+		      be32_to_cpu(spec->length));
+}
+
+static int nfp_calc_fwname_tlv_size(struct nfp_pf *pf)
+{
+	u32 fwname_len = strlen(nfp_mip_name(pf->mip));
+
+	return sizeof(struct nfp_dump_tl) + ALIGN8(fwname_len + 1);
+}
+
+static int nfp_calc_hwinfo_field_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+	u32 tl_len, key_len;
+	const char *value;
+
+	tl_len = be32_to_cpu(spec->length);
+	key_len = strnlen(spec->data, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv_size(spec);
+
+	value = nfp_hwinfo_lookup(pf->hwinfo, spec->data);
+	if (!value)
+		return nfp_dump_error_tlv_size(spec);
+
+	return sizeof(struct nfp_dump_tl) + ALIGN8(key_len + strlen(value) + 2);
+}
+
+static bool nfp_csr_spec_valid(struct nfp_dumpspec_csr *spec_csr)
+{
+	u32 required_read_sz = sizeof(*spec_csr) - sizeof(spec_csr->tl);
+	u32 available_sz = be32_to_cpu(spec_csr->tl.length);
+	u32 reg_width;
+
+	if (available_sz < required_read_sz)
+		return false;
+
+	reg_width = be32_to_cpu(spec_csr->register_width);
+
+	return reg_width == 32 || reg_width == 64;
+}
+
+static int
+nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+	struct nfp_rtsym_table *rtbl = pf->rtbl;
+	struct nfp_dumpspec_rtsym *spec_rtsym;
+	const struct nfp_rtsym *sym;
+	u32 tl_len, key_len;
+	u32 size;
+
+	spec_rtsym = (struct nfp_dumpspec_rtsym *)spec;
+	tl_len = be32_to_cpu(spec->length);
+	key_len = strnlen(spec_rtsym->rtsym, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv_size(spec);
+
+	sym = nfp_rtsym_lookup(rtbl, spec_rtsym->rtsym);
+	if (!sym)
+		return nfp_dump_error_tlv_size(spec);
+
+	if (sym->type == NFP_RTSYM_TYPE_ABS)
+		size = sizeof(sym->addr);
+	else
+		size = sym->size;
+
+	return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) +
+	       ALIGN8(size);
+}
+
+static int
+nfp_add_tlv_size(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+	struct nfp_dumpspec_csr *spec_csr;
+	u32 *size = param;
+	u32 hwinfo_size;
+
+	switch (be32_to_cpu(tl->type)) {
+	case NFP_DUMPSPEC_TYPE_FWNAME:
+		*size += nfp_calc_fwname_tlv_size(pf);
+		break;
+	case NFP_DUMPSPEC_TYPE_CPP_CSR:
+	case NFP_DUMPSPEC_TYPE_XPB_CSR:
+	case NFP_DUMPSPEC_TYPE_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		if (!nfp_csr_spec_valid(spec_csr))
+			*size += nfp_dump_error_tlv_size(tl);
+		else
+			*size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+				 ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+		break;
+	case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		if (!nfp_csr_spec_valid(spec_csr))
+			*size += nfp_dump_error_tlv_size(tl);
+		else
+			*size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+				 ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length) *
+					NFP_IND_NUM_CONTEXTS);
+		break;
+	case NFP_DUMPSPEC_TYPE_RTSYM:
+		*size += nfp_calc_rtsym_dump_sz(pf, tl);
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO:
+		hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+		*size += sizeof(struct nfp_dump_tl) + ALIGN8(hwinfo_size);
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+		*size += nfp_calc_hwinfo_field_sz(pf, tl);
+		break;
+	default:
+		*size += nfp_dump_error_tlv_size(tl);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nfp_calc_specific_level_size(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+			     void *param)
+{
+	struct nfp_level_size *lev_sz = param;
+
+	if (dump_level->type != lev_sz->requested_level)
+		return 0;
+
+	return nfp_traverse_tlvs(pf, dump_level->data,
+				 be32_to_cpu(dump_level->length),
+				 &lev_sz->total_size, nfp_add_tlv_size);
+}
+
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				u32 flag)
+{
+	struct nfp_level_size lev_sz;
+	int err;
+
+	lev_sz.requested_level = cpu_to_be32(flag);
+	lev_sz.total_size = ALIGN8(sizeof(struct nfp_dump_prolog));
+
+	err = nfp_traverse_tlvs(pf, spec->data, spec->size, &lev_sz,
+				nfp_calc_specific_level_size);
+	if (err)
+		return err;
+
+	return lev_sz.total_size;
+}
+
+static int nfp_add_tlv(u32 type, u32 total_tlv_sz, struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *tl = dump->p;
+
+	if (total_tlv_sz > dump->buf_size)
+		return -ENOSPC;
+
+	if (dump->buf_size - total_tlv_sz < dump->dumped_size)
+		return -ENOSPC;
+
+	tl->type = cpu_to_be32(type);
+	tl->length = cpu_to_be32(total_tlv_sz - sizeof(*tl));
+
+	dump->dumped_size += total_tlv_sz;
+	dump->p += total_tlv_sz;
+
+	return 0;
+}
+
+static int
+nfp_dump_error_tlv(struct nfp_dump_tl *spec, int error,
+		   struct nfp_dump_state *dump)
+{
+	struct nfp_dump_error *dump_header = dump->p;
+	u32 total_spec_size, total_size;
+	int err;
+
+	total_spec_size = sizeof(*spec) + be32_to_cpu(spec->length);
+	total_size = ALIGN8(sizeof(*dump_header) + total_spec_size);
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_ERROR, total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->error = cpu_to_be32(error);
+	memcpy(dump_header->spec, spec, total_spec_size);
+
+	return 0;
+}
+
+static int nfp_dump_fwname(struct nfp_pf *pf, struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *dump_header = dump->p;
+	u32 fwname_len, total_size;
+	const char *fwname;
+	int err;
+
+	fwname = nfp_mip_name(pf->mip);
+	fwname_len = strlen(fwname);
+	total_size = sizeof(*dump_header) + ALIGN8(fwname_len + 1);
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_FWNAME, total_size, dump);
+	if (err)
+		return err;
+
+	memcpy(dump_header->data, fwname, fwname_len);
+
+	return 0;
+}
+
+static int
+nfp_dump_hwinfo(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+		struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *dump_header = dump->p;
+	u32 hwinfo_size, total_size;
+	char *hwinfo;
+	int err;
+
+	hwinfo = nfp_hwinfo_get_packed_strings(pf->hwinfo);
+	hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+	total_size = sizeof(*dump_header) + ALIGN8(hwinfo_size);
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO, total_size, dump);
+	if (err)
+		return err;
+
+	memcpy(dump_header->data, hwinfo, hwinfo_size);
+
+	return 0;
+}
+
+static int nfp_dump_hwinfo_field(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+				 struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *dump_header = dump->p;
+	u32 tl_len, key_len, val_len;
+	const char *key, *value;
+	u32 total_size;
+	int err;
+
+	tl_len = be32_to_cpu(spec->length);
+	key_len = strnlen(spec->data, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv(spec, -EINVAL, dump);
+
+	key = spec->data;
+	value = nfp_hwinfo_lookup(pf->hwinfo, key);
+	if (!value)
+		return nfp_dump_error_tlv(spec, -ENOENT, dump);
+
+	val_len = strlen(value);
+	total_size = sizeof(*dump_header) + ALIGN8(key_len + val_len + 2);
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO_FIELD, total_size, dump);
+	if (err)
+		return err;
+
+	memcpy(dump_header->data, key, key_len + 1);
+	memcpy(dump_header->data + key_len + 1, value, val_len + 1);
+
+	return 0;
+}
+
+static bool is_xpb_read(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+	return cpp_id->target == NFP_CPP_TARGET_ISLAND_XPB &&
+	       cpp_id->action == 0 && cpp_id->token == 0;
+}
+
+static int
+nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
+		   struct nfp_dump_state *dump)
+{
+	struct nfp_dump_csr *dump_header = dump->p;
+	u32 reg_sz, header_size, total_size;
+	u32 cpp_rd_addr, max_rd_addr;
+	int bytes_read;
+	void *dest;
+	u32 cpp_id;
+	int err;
+
+	if (!nfp_csr_spec_valid(spec_csr))
+		return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+	reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+	header_size = ALIGN8(sizeof(*dump_header));
+	total_size = header_size +
+		     ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+	dest = dump->p + header_size;
+
+	err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->cpp = spec_csr->cpp;
+	dump_header->register_width = spec_csr->register_width;
+
+	cpp_id = nfp_get_numeric_cpp_id(&spec_csr->cpp.cpp_id);
+	cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+	max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+
+	while (cpp_rd_addr < max_rd_addr) {
+		if (is_xpb_read(&spec_csr->cpp.cpp_id)) {
+			err = nfp_xpb_readl(pf->cpp, cpp_rd_addr, (u32 *)dest);
+		} else {
+			bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr,
+						  dest, reg_sz);
+			err = bytes_read == reg_sz ? 0 : -EIO;
+		}
+		if (err) {
+			dump_header->error = cpu_to_be32(err);
+			dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+			break;
+		}
+		cpp_rd_addr += reg_sz;
+		dest += reg_sz;
+	}
+
+	return 0;
+}
+
+/* Write context to CSRCtxPtr, then read from it. Then the value can be read
+ * from IndCtxStatus.
+ */
+static int
+nfp_read_indirect_csr(struct nfp_cpp *cpp,
+		      struct nfp_dumpspec_cpp_isl_id cpp_params, u32 offset,
+		      u32 reg_sz, u32 context, void *dest)
+{
+	u32 csr_ctx_ptr_offs;
+	u32 cpp_id;
+	int result;
+
+	csr_ctx_ptr_offs = nfp_get_ind_csr_ctx_ptr_offs(offset);
+	cpp_id = NFP_CPP_ISLAND_ID(cpp_params.target,
+				   NFP_IND_ME_REFL_WR_SIG_INIT,
+				   cpp_params.token, cpp_params.island);
+	result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context);
+	if (result)
+		return result;
+
+	cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+	result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz);
+	if (result != reg_sz)
+		return result < 0 ? result : -EIO;
+
+	result = nfp_cpp_read(cpp, cpp_id, offset, dest, reg_sz);
+	if (result != reg_sz)
+		return result < 0 ? result : -EIO;
+
+	return 0;
+}
+
+static int
+nfp_read_all_indirect_csr_ctx(struct nfp_cpp *cpp,
+			      struct nfp_dumpspec_csr *spec_csr, u32 address,
+			      u32 reg_sz, void *dest)
+{
+	u32 ctx;
+	int err;
+
+	for (ctx = 0; ctx < NFP_IND_NUM_CONTEXTS; ctx++) {
+		err = nfp_read_indirect_csr(cpp, spec_csr->cpp.cpp_id, address,
+					    reg_sz, ctx, dest + ctx * reg_sz);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_indirect_csr_range(struct nfp_pf *pf,
+			    struct nfp_dumpspec_csr *spec_csr,
+			    struct nfp_dump_state *dump)
+{
+	struct nfp_dump_csr *dump_header = dump->p;
+	u32 reg_sz, header_size, total_size;
+	u32 cpp_rd_addr, max_rd_addr;
+	u32 reg_data_length;
+	void *dest;
+	int err;
+
+	if (!nfp_csr_spec_valid(spec_csr))
+		return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+	reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+	header_size = ALIGN8(sizeof(*dump_header));
+	reg_data_length = be32_to_cpu(spec_csr->cpp.dump_length) *
+			  NFP_IND_NUM_CONTEXTS;
+	total_size = header_size + ALIGN8(reg_data_length);
+	dest = dump->p + header_size;
+
+	err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->cpp = spec_csr->cpp;
+	dump_header->register_width = spec_csr->register_width;
+
+	cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+	max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+	while (cpp_rd_addr < max_rd_addr) {
+		err = nfp_read_all_indirect_csr_ctx(pf->cpp, spec_csr,
+						    cpp_rd_addr, reg_sz, dest);
+		if (err) {
+			dump_header->error = cpu_to_be32(err);
+			dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+			break;
+		}
+		cpp_rd_addr += reg_sz;
+		dest += reg_sz * NFP_IND_NUM_CONTEXTS;
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
+		      struct nfp_dump_state *dump)
+{
+	struct nfp_dump_rtsym *dump_header = dump->p;
+	struct nfp_dumpspec_cpp_isl_id cpp_params;
+	struct nfp_rtsym_table *rtbl = pf->rtbl;
+	u32 header_size, total_size, sym_size;
+	const struct nfp_rtsym *sym;
+	u32 tl_len, key_len;
+	int bytes_read;
+	u32 cpp_id;
+	void *dest;
+	int err;
+
+	tl_len = be32_to_cpu(spec->tl.length);
+	key_len = strnlen(spec->rtsym, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+
+	sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
+	if (!sym)
+		return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+
+	if (sym->type == NFP_RTSYM_TYPE_ABS)
+		sym_size = sizeof(sym->addr);
+	else
+		sym_size = sym->size;
+
+	header_size =
+		ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1);
+	total_size = header_size + ALIGN8(sym_size);
+	dest = dump->p + header_size;
+
+	err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->padded_name_length =
+		header_size - offsetof(struct nfp_dump_rtsym, rtsym);
+	memcpy(dump_header->rtsym, spec->rtsym, key_len + 1);
+	dump_header->cpp.dump_length = cpu_to_be32(sym_size);
+
+	if (sym->type == NFP_RTSYM_TYPE_ABS) {
+		*(u64 *)dest = sym->addr;
+	} else {
+		cpp_params.target = sym->target;
+		cpp_params.action = NFP_CPP_ACTION_RW;
+		cpp_params.token  = 0;
+		cpp_params.island = sym->domain;
+		cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+		dump_header->cpp.cpp_id = cpp_params;
+		dump_header->cpp.offset = cpu_to_be32(sym->addr);
+		bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest,
+					  sym_size);
+		if (bytes_read != sym_size) {
+			if (bytes_read >= 0)
+				bytes_read = -EIO;
+			dump_header->error = cpu_to_be32(bytes_read);
+		}
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_for_tlv(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+	struct nfp_dumpspec_rtsym *spec_rtsym;
+	struct nfp_dump_state *dump = param;
+	struct nfp_dumpspec_csr *spec_csr;
+	int err;
+
+	switch (be32_to_cpu(tl->type)) {
+	case NFP_DUMPSPEC_TYPE_FWNAME:
+		err = nfp_dump_fwname(pf, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_CPP_CSR:
+	case NFP_DUMPSPEC_TYPE_XPB_CSR:
+	case NFP_DUMPSPEC_TYPE_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		err = nfp_dump_csr_range(pf, spec_csr, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		err = nfp_dump_indirect_csr_range(pf, spec_csr, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_RTSYM:
+		spec_rtsym = (struct nfp_dumpspec_rtsym *)tl;
+		err = nfp_dump_single_rtsym(pf, spec_rtsym, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO:
+		err = nfp_dump_hwinfo(pf, tl, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+		err = nfp_dump_hwinfo_field(pf, tl, dump);
+		if (err)
+			return err;
+		break;
+	default:
+		err = nfp_dump_error_tlv(tl, -EOPNOTSUPP, dump);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_specific_level(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+			void *param)
+{
+	struct nfp_dump_state *dump = param;
+
+	if (dump_level->type != dump->requested_level)
+		return 0;
+
+	return nfp_traverse_tlvs(pf, dump_level->data,
+				 be32_to_cpu(dump_level->length), dump,
+				 nfp_dump_for_tlv);
+}
+
+static int nfp_dump_populate_prolog(struct nfp_dump_state *dump)
+{
+	struct nfp_dump_prolog *prolog = dump->p;
+	u32 total_size;
+	int err;
+
+	total_size = ALIGN8(sizeof(*prolog));
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_PROLOG, total_size, dump);
+	if (err)
+		return err;
+
+	prolog->dump_level = dump->requested_level;
+
+	return 0;
+}
+
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				 struct ethtool_dump *dump_param, void *dest)
+{
+	struct nfp_dump_state dump;
+	int err;
+
+	dump.requested_level = cpu_to_be32(dump_param->flag);
+	dump.dumped_size = 0;
+	dump.p = dest;
+	dump.buf_size = dump_param->len;
+
+	err = nfp_dump_populate_prolog(&dump);
+	if (err)
+		return err;
+
+	err = nfp_traverse_tlvs(pf, spec->data, spec->size, &dump,
+				nfp_dump_specific_level);
+	if (err)
+		return err;
+
+	/* Set size of actual dump, to trigger warning if different from
+	 * calculated size.
+	 */
+	dump_param->len = dump.dumped_size;
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6c02b2d6ba06..e1dae0616f52 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -47,18 +47,16 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/ethtool.h>
+#include <linux/firmware.h>
 
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_nsp.h"
 #include "nfp_app.h"
+#include "nfp_main.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
 #include "nfp_port.h"
 
-enum nfp_dump_diag {
-	NFP_DUMP_NSP_DIAG = 0,
-};
-
 struct nfp_et_stat {
 	char name[ETH_GSTRING_LEN];
 	int off;
@@ -1066,15 +1064,34 @@ exit_release:
 	return ret;
 }
 
+/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
+ * based dumps), since flag 0 (default) calculates the length in
+ * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
+ * without setting the flag first, for backward compatibility.
+ */
 static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
 {
 	struct nfp_app *app = nfp_app_from_netdev(netdev);
+	s64 len;
 
 	if (!app)
 		return -EOPNOTSUPP;
 
-	if (val->flag != NFP_DUMP_NSP_DIAG)
-		return -EINVAL;
+	if (val->flag == NFP_DUMP_NSP_DIAG) {
+		app->pf->dump_flag = val->flag;
+		return 0;
+	}
+
+	if (!app->pf->dumpspec)
+		return -EOPNOTSUPP;
+
+	len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
+					  val->flag);
+	if (len < 0)
+		return len;
+
+	app->pf->dump_flag = val->flag;
+	app->pf->dump_len = len;
 
 	return 0;
 }
@@ -1082,14 +1099,37 @@ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
 static int
 nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
 {
-	return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
+	struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+	if (!app)
+		return -EOPNOTSUPP;
+
+	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+		return nfp_dump_nsp_diag(app, dump, NULL);
+
+	dump->flag = app->pf->dump_flag;
+	dump->len = app->pf->dump_len;
+
+	return 0;
 }
 
 static int
 nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
 		      void *buffer)
 {
-	return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
+	struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+	if (!app)
+		return -EOPNOTSUPP;
+
+	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+		return nfp_dump_nsp_diag(app, dump, buffer);
+
+	dump->flag = app->pf->dump_flag;
+	dump->len = app->pf->dump_len;
+
+	return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
+					    buffer);
 }
 
 static int nfp_net_set_coalesce(struct net_device *netdev,
@@ -1230,6 +1270,57 @@ static int nfp_net_set_channels(struct net_device *netdev,
 	return nfp_net_set_num_rings(nn, total_rx, total_tx);
 }
 
+static int
+nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash)
+{
+	const struct firmware *fw;
+	struct nfp_app *app;
+	struct nfp_nsp *nsp;
+	struct device *dev;
+	int err;
+
+	if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+		return -EOPNOTSUPP;
+
+	app = nfp_app_from_netdev(netdev);
+	if (!app)
+		return -EOPNOTSUPP;
+
+	dev = &app->pdev->dev;
+
+	nsp = nfp_nsp_open(app->cpp);
+	if (IS_ERR(nsp)) {
+		err = PTR_ERR(nsp);
+		dev_err(dev, "Failed to access the NSP: %d\n", err);
+		return err;
+	}
+
+	err = request_firmware_direct(&fw, flash->data, dev);
+	if (err)
+		goto exit_close_nsp;
+
+	dev_info(dev, "Please be patient while writing flash image: %s\n",
+		 flash->data);
+	dev_hold(netdev);
+	rtnl_unlock();
+
+	err = nfp_nsp_write_flash(nsp, fw);
+	if (err < 0) {
+		dev_err(dev, "Flash write failed: %d\n", err);
+		goto exit_rtnl_lock;
+	}
+	dev_info(dev, "Finished writing flash image\n");
+
+exit_rtnl_lock:
+	rtnl_lock();
+	dev_put(netdev);
+	release_firmware(fw);
+
+exit_close_nsp:
+	nfp_nsp_close(nsp);
+	return err;
+}
+
 static const struct ethtool_ops nfp_net_ethtool_ops = {
 	.get_drvinfo		= nfp_net_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
@@ -1240,6 +1331,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
 	.get_sset_count		= nfp_net_get_sset_count,
 	.get_rxnfc		= nfp_net_get_rxnfc,
 	.set_rxnfc		= nfp_net_set_rxnfc,
+	.flash_device		= nfp_net_flash_device,
 	.get_rxfh_indir_size	= nfp_net_get_rxfh_indir_size,
 	.get_rxfh_key_size	= nfp_net_get_rxfh_key_size,
 	.get_rxfh		= nfp_net_get_rxfh,
@@ -1265,6 +1357,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
 	.get_strings		= nfp_port_get_strings,
 	.get_ethtool_stats	= nfp_port_get_stats,
 	.get_sset_count		= nfp_port_get_sset_count,
+	.flash_device		= nfp_net_flash_device,
 	.set_dump		= nfp_app_set_dump,
 	.get_dump_flag		= nfp_app_get_dump_flag,
 	.get_dump_data		= nfp_app_get_dump_data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index c505014121c4..15fa47f622aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -208,12 +208,6 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
 {
 	int err;
 
-	/* Get ME clock frequency from ctrl BAR
-	 * XXX for now frequency is hardcoded until we figure out how
-	 * to get the value from nfp-hwinfo into ctrl bar
-	 */
-	nn->me_freq_mhz = 1200;
-
 	err = nfp_net_init(nn);
 	if (err)
 		return err;
@@ -373,7 +367,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
 	if (IS_ERR(pf->app))
 		return PTR_ERR(pf->app);
 
+	mutex_lock(&pf->lock);
 	err = nfp_app_init(pf->app);
+	mutex_unlock(&pf->lock);
 	if (err)
 		goto err_free;
 
@@ -401,7 +397,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
 err_unmap:
 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 err_app_clean:
+	mutex_lock(&pf->lock);
 	nfp_app_clean(pf->app);
+	mutex_unlock(&pf->lock);
 err_free:
 	nfp_app_free(pf->app);
 	pf->app = NULL;
@@ -414,7 +412,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 	}
+
+	mutex_lock(&pf->lock);
 	nfp_app_clean(pf->app);
+	mutex_unlock(&pf->lock);
+
 	nfp_app_free(pf->app);
 	pf->app = NULL;
 }
@@ -570,17 +572,6 @@ err_unmap_ctrl:
 	return err;
 }
 
-static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
-{
-	nfp_net_pf_app_stop(pf);
-	/* stop app first, to avoid double free of ctrl vNIC's ddir */
-	nfp_net_debugfs_dir_clean(&pf->ddir);
-
-	nfp_net_pf_free_irqs(pf);
-	nfp_net_pf_app_clean(pf);
-	nfp_net_pci_unmap_mem(pf);
-}
-
 static int
 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
 			struct nfp_eth_table *eth_table)
@@ -655,9 +646,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
 		nfp_net_pf_free_vnic(pf, nn);
 	}
 
-	if (list_empty(&pf->vnics))
-		nfp_net_pci_remove_finish(pf);
-
 	return 0;
 }
 
@@ -707,6 +695,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
  */
 int nfp_net_pci_probe(struct nfp_pf *pf)
 {
+	struct devlink *devlink = priv_to_devlink(pf);
 	struct nfp_net_fw_version fw_ver;
 	u8 __iomem *ctrl_bar, *qc_bar;
 	int stride;
@@ -720,16 +709,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
 		return -EINVAL;
 	}
 
-	mutex_lock(&pf->lock);
 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
-	if ((int)pf->max_data_vnics < 0) {
-		err = pf->max_data_vnics;
-		goto err_unlock;
-	}
+	if ((int)pf->max_data_vnics < 0)
+		return pf->max_data_vnics;
 
 	err = nfp_net_pci_map_mem(pf);
 	if (err)
-		goto err_unlock;
+		return err;
 
 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
@@ -768,6 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
 	if (err)
 		goto err_unmap;
 
+	err = devlink_register(devlink, &pf->pdev->dev);
+	if (err)
+		goto err_app_clean;
+
+	mutex_lock(&pf->lock);
 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
 
 	/* Allocate the vnics and do basic init */
@@ -799,32 +790,39 @@ err_free_vnics:
 	nfp_net_pf_free_vnics(pf);
 err_clean_ddir:
 	nfp_net_debugfs_dir_clean(&pf->ddir);
+	mutex_unlock(&pf->lock);
+	cancel_work_sync(&pf->port_refresh_work);
+	devlink_unregister(devlink);
+err_app_clean:
 	nfp_net_pf_app_clean(pf);
 err_unmap:
 	nfp_net_pci_unmap_mem(pf);
-err_unlock:
-	mutex_unlock(&pf->lock);
-	cancel_work_sync(&pf->port_refresh_work);
 	return err;
 }
 
 void nfp_net_pci_remove(struct nfp_pf *pf)
 {
-	struct nfp_net *nn;
+	struct nfp_net *nn, *next;
 
 	mutex_lock(&pf->lock);
-	if (list_empty(&pf->vnics))
-		goto out;
-
-	list_for_each_entry(nn, &pf->vnics, vnic_list)
-		if (nfp_net_is_data_vnic(nn))
-			nfp_net_pf_clean_vnic(pf, nn);
+	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+		if (!nfp_net_is_data_vnic(nn))
+			continue;
+		nfp_net_pf_clean_vnic(pf, nn);
+		nfp_net_pf_free_vnic(pf, nn);
+	}
 
-	nfp_net_pf_free_vnics(pf);
+	nfp_net_pf_app_stop(pf);
+	/* stop app first, to avoid double free of ctrl vNIC's ddir */
+	nfp_net_debugfs_dir_clean(&pf->ddir);
 
-	nfp_net_pci_remove_finish(pf);
-out:
 	mutex_unlock(&pf->lock);
 
+	devlink_unregister(priv_to_devlink(pf));
+
+	nfp_net_pf_free_irqs(pf);
+	nfp_net_pf_app_clean(pf);
+	nfp_net_pci_unmap_mem(pf);
+
 	cancel_work_sync(&pf->port_refresh_work);
 }
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 78b36c67c232..f67da6bde9da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -46,6 +46,13 @@
 #include "nfp_net_sriov.h"
 #include "nfp_port.h"
 
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
+{
+	return rcu_dereference_protected(set->reprs[id],
+					 lockdep_is_held(&app->pf->lock));
+}
+
 static void
 nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
 		      int tx_status)
@@ -186,6 +193,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
 	return -EINVAL;
 }
 
+static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct nfp_repr *repr = netdev_priv(netdev);
+
+	return nfp_app_change_mtu(repr->app, netdev, new_mtu);
+}
+
 static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +254,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
 	.ndo_open		= nfp_repr_open,
 	.ndo_stop		= nfp_repr_stop,
 	.ndo_start_xmit		= nfp_repr_xmit,
+	.ndo_change_mtu		= nfp_repr_change_mtu,
 	.ndo_get_stats64	= nfp_repr_get_stats64,
 	.ndo_has_offload_stats	= nfp_repr_has_offload_stats,
 	.ndo_get_offload_stats	= nfp_repr_get_offload_stats,
@@ -336,6 +351,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
 	if (!netdev)
 		return NULL;
 
+	netif_carrier_off(netdev);
+
 	repr = netdev_priv(netdev);
 	repr->netdev = netdev;
 	repr->app = app;
@@ -359,29 +376,45 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr)
 	nfp_repr_free(repr);
 }
 
-void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
 {
+	struct net_device *netdev;
 	unsigned int i;
 
-	for (i = 0; i < reprs->num_reprs; i++)
-		if (reprs->reprs[i])
-			nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+	for (i = 0; i < reprs->num_reprs; i++) {
+		netdev = nfp_repr_get_locked(app, reprs, i);
+		if (netdev)
+			nfp_repr_clean_and_free(netdev_priv(netdev));
+	}
 
 	kfree(reprs);
 }
 
 void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
-				 enum nfp_repr_type type)
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
 {
+	struct net_device *netdev;
 	struct nfp_reprs *reprs;
+	int i;
 
-	reprs = nfp_app_reprs_set(app, type, NULL);
+	reprs = rcu_dereference_protected(app->reprs[type],
+					  lockdep_is_held(&app->pf->lock));
 	if (!reprs)
 		return;
 
+	/* Preclean must happen before we remove the reprs reference from the
+	 * app below.
+	 */
+	for (i = 0; i < reprs->num_reprs; i++) {
+		netdev = nfp_repr_get_locked(app, reprs, i);
+		if (netdev)
+			nfp_app_repr_preclean(app, netdev);
+	}
+
+	reprs = nfp_app_reprs_set(app, type, NULL);
+
 	synchronize_rcu();
-	nfp_reprs_clean_and_free(reprs);
+	nfp_reprs_clean_and_free(app, reprs);
 }
 
 struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
@@ -399,47 +432,29 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
 
 int nfp_reprs_resync_phys_ports(struct nfp_app *app)
 {
-	struct nfp_reprs *reprs, *old_reprs;
+	struct net_device *netdev;
+	struct nfp_reprs *reprs;
 	struct nfp_repr *repr;
 	int i;
 
-	old_reprs =
-		rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
-					  lockdep_is_held(&app->pf->lock));
-	if (!old_reprs)
-		return 0;
-
-	reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+	reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
 	if (!reprs)
-		return -ENOMEM;
-
-	for (i = 0; i < old_reprs->num_reprs; i++) {
-		if (!old_reprs->reprs[i])
-			continue;
-
-		repr = netdev_priv(old_reprs->reprs[i]);
-		if (repr->port->type == NFP_PORT_INVALID)
-			continue;
-
-		reprs->reprs[i] = old_reprs->reprs[i];
-	}
-
-	old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
-	synchronize_rcu();
+		return 0;
 
-	/* Now we free up removed representors */
-	for (i = 0; i < old_reprs->num_reprs; i++) {
-		if (!old_reprs->reprs[i])
+	for (i = 0; i < reprs->num_reprs; i++) {
+		netdev = nfp_repr_get_locked(app, reprs, i);
+		if (!netdev)
 			continue;
 
-		repr = netdev_priv(old_reprs->reprs[i]);
+		repr = netdev_priv(netdev);
 		if (repr->port->type != NFP_PORT_INVALID)
 			continue;
 
-		nfp_app_repr_stop(app, repr);
+		nfp_app_repr_preclean(app, netdev);
+		rcu_assign_pointer(reprs->reprs[i], NULL);
+		synchronize_rcu();
 		nfp_repr_clean(repr);
 	}
 
-	kfree(old_reprs);
 	return 0;
 }
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897bc9c6..a621e8ff528e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -35,6 +35,7 @@
 #define NFP_NET_REPR_H
 
 struct metadata_dst;
+struct nfp_app;
 struct nfp_net;
 struct nfp_port;
 
@@ -47,7 +48,7 @@ struct nfp_port;
  */
 struct nfp_reprs {
 	unsigned int num_reprs;
-	struct net_device *reprs[0];
+	struct net_device __rcu *reprs[0];
 };
 
 /**
@@ -89,6 +90,7 @@ struct nfp_repr {
  * @NFP_REPR_TYPE_PHYS_PORT:	external NIC port
  * @NFP_REPR_TYPE_PF:		physical function
  * @NFP_REPR_TYPE_VF:		virtual function
+ * @__NFP_REPR_TYPE_MAX:	number of representor types
  */
 enum nfp_repr_type {
 	NFP_REPR_TYPE_PHYS_PORT,
@@ -113,16 +115,18 @@ static inline int nfp_repr_get_port_id(struct net_device *netdev)
 	return priv->dst->u.port_info.port_id;
 }
 
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
+		    unsigned int id);
+
 void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
 		  u32 cmsg_port_id, struct nfp_port *port,
 		  struct net_device *pf_netdev);
 struct net_device *nfp_repr_alloc(struct nfp_app *app);
-void
-nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
-void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
-				 enum nfp_repr_type type);
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
+void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+				      enum nfp_repr_type type);
 struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
 int nfp_reprs_resync_phys_ports(struct nfp_app *app);
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c879626e035b..b802a1d55449 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -277,12 +277,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 	}
 	nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
 
-	/* Get ME clock frequency from ctrl BAR
-	 * XXX for now frequency is hardcoded until we figure out how
-	 * to get the value from nfp-hwinfo into ctrl bar
-	 */
-	nn->me_freq_mhz = 1200;
-
 	err = nfp_net_init(nn);
 	if (err)
 		goto err_irqs_disable;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 3ce51f03126f..ced62d112aa2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -49,6 +49,8 @@
 struct nfp_hwinfo;
 struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
 const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo);
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo);
 
 /* Implemented in nfp_nsp.c, low level functions */
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 5798adc57cbc..c8f2c064cce3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -242,6 +242,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
 		      void *buffer, size_t length);
 int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
 		       const void *buffer, size_t length);
+size_t nfp_cpp_area_size(struct nfp_cpp_area *area);
 const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
 void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
 struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 04dd5758ecf5..ef30597aa319 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -372,8 +372,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
  * that it can be accessed directly.
  *
  * NOTE: @address and @size must be 32-bit aligned values.
- *
- * NOTE: The area must also be 'released' when the structure is freed.
+ * The area must also be 'released' when the structure is freed.
  *
  * Return: NFP CPP Area handle, or NULL
  */
@@ -536,8 +535,7 @@ void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
  * Read data from indicated CPP region.
  *
  * NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
  *
  * Return: length of io, or -ERRNO
  */
@@ -558,8 +556,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area,
  * Write data to indicated CPP region.
  *
  * NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
  *
  * Return: length of io, or -ERRNO
  */
@@ -571,6 +568,17 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
 }
 
 /**
+ * nfp_cpp_area_size() - return size of a CPP area
+ * @cpp_area:	CPP area handle
+ *
+ * Return: Size of the area
+ */
+size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
+{
+	return cpp_area->size;
+}
+
+/**
  * nfp_cpp_area_name() - return name of a CPP area
  * @cpp_area:	CPP area handle
  *
@@ -666,18 +674,20 @@ void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
  * @offset:	Offset into area
  * @value:	Pointer to read buffer
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_area_readl(struct nfp_cpp_area *area,
 		       unsigned long offset, u32 *value)
 {
 	u8 tmp[4];
-	int err;
+	int n;
 
-	err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
-	*value = get_unaligned_le32(tmp);
+	n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+	if (n != sizeof(tmp))
+		return n < 0 ? n : -EIO;
 
-	return err;
+	*value = get_unaligned_le32(tmp);
+	return 0;
 }
 
 /**
@@ -686,16 +696,18 @@ int nfp_cpp_area_readl(struct nfp_cpp_area *area,
  * @offset:	Offset into area
  * @value:	Value to write
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_area_writel(struct nfp_cpp_area *area,
 			unsigned long offset, u32 value)
 {
 	u8 tmp[4];
+	int n;
 
 	put_unaligned_le32(value, tmp);
+	n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 
-	return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+	return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 }
 
 /**
@@ -704,18 +716,20 @@ int nfp_cpp_area_writel(struct nfp_cpp_area *area,
  * @offset:	Offset into area
  * @value:	Pointer to read buffer
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_area_readq(struct nfp_cpp_area *area,
 		       unsigned long offset, u64 *value)
 {
 	u8 tmp[8];
-	int err;
+	int n;
 
-	err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
-	*value = get_unaligned_le64(tmp);
+	n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+	if (n != sizeof(tmp))
+		return n < 0 ? n : -EIO;
 
-	return err;
+	*value = get_unaligned_le64(tmp);
+	return 0;
 }
 
 /**
@@ -724,16 +738,18 @@ int nfp_cpp_area_readq(struct nfp_cpp_area *area,
  * @offset:	Offset into area
  * @value:	Value to write
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
 			unsigned long offset, u64 value)
 {
 	u8 tmp[8];
+	int n;
 
 	put_unaligned_le64(value, tmp);
+	n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 
-	return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+	return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 }
 
 /**
@@ -1072,7 +1088,7 @@ static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
  * @xpb_addr:	Address for operation
  * @value:	Pointer to read buffer
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
 {
@@ -1087,7 +1103,7 @@ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
  * @xpb_addr:	Address for operation
  * @value:	Value to write
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
 {
@@ -1105,7 +1121,7 @@ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
  *
  * KERNEL: This operation is safe to call in interrupt or softirq context.
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
 		    u32 mask, u32 value)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index ab86bceb93f2..20bad05e2e92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -64,18 +64,20 @@
  * @address:	Address for operation
  * @value:	Pointer to read buffer
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
 		  unsigned long long address, u32 *value)
 {
 	u8 tmp[4];
-	int err;
+	int n;
 
-	err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
-	*value = get_unaligned_le32(tmp);
+	n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+	if (n != sizeof(tmp))
+		return n < 0 ? n : -EIO;
 
-	return err;
+	*value = get_unaligned_le32(tmp);
+	return 0;
 }
 
 /**
@@ -85,15 +87,18 @@ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
  * @address:	Address for operation
  * @value:	Value to write
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
 		   unsigned long long address, u32 value)
 {
 	u8 tmp[4];
+	int n;
 
 	put_unaligned_le32(value, tmp);
-	return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+	n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+	return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 }
 
 /**
@@ -103,18 +108,20 @@ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
  * @address:	Address for operation
  * @value:	Pointer to read buffer
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
 		  unsigned long long address, u64 *value)
 {
 	u8 tmp[8];
-	int err;
+	int n;
 
-	err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
-	*value = get_unaligned_le64(tmp);
+	n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+	if (n != sizeof(tmp))
+		return n < 0 ? n : -EIO;
 
-	return err;
+	*value = get_unaligned_le64(tmp);
+	return 0;
 }
 
 /**
@@ -124,15 +131,18 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
  * @address:	Address for operation
  * @value:	Value to write
  *
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
  */
 int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
 		   unsigned long long address, u64 value)
 {
 	u8 tmp[8];
+	int n;
 
 	put_unaligned_le64(value, tmp);
-	return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+	n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+	return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 }
 
 /* NOTE: This code should not use nfp_xpb_* functions,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 4f24aff1e772..063a9a6243d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -302,3 +302,13 @@ const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
 
 	return NULL;
 }
+
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo)
+{
+	return hwinfo->data;
+}
+
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo)
+{
+	return le32_to_cpu(hwinfo->size) - sizeof(u32);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 14a6d1ba51a9..39abac678b71 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -51,6 +51,9 @@
 #include "nfp_cpp.h"
 #include "nfp_nsp.h"
 
+#define NFP_NSP_TIMEOUT_DEFAULT	30
+#define NFP_NSP_TIMEOUT_BOOT	30
+
 /* Offsets relative to the CSR base */
 #define NSP_STATUS		0x00
 #define   NSP_STATUS_MAGIC	GENMASK_ULL(63, 48)
@@ -93,6 +96,7 @@ enum nfp_nsp_cmd {
 	SPCODE_FW_LOAD		= 6, /* Load fw from buffer, len in option */
 	SPCODE_ETH_RESCAN	= 7, /* Rescan ETHs, write ETH_TABLE to buf */
 	SPCODE_ETH_CONTROL	= 8, /* Update media config from buffer */
+	SPCODE_NSP_WRITE_FLASH	= 11, /* Load and flash image from buffer */
 	SPCODE_NSP_SENSORS	= 12, /* Read NSP sensor(s) */
 	SPCODE_NSP_IDENTIFY	= 13, /* Read NSP version */
 };
@@ -260,10 +264,10 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
 }
 
 static int
-nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
-		 u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
+		 u64 mask, u64 val, u32 timeout_sec)
 {
-	const unsigned long wait_until = jiffies + 30 * HZ;
+	const unsigned long wait_until = jiffies + timeout_sec * HZ;
 	int err;
 
 	for (;;) {
@@ -285,12 +289,13 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
 }
 
 /**
- * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * __nfp_nsp_command() - Execute a command on the NFP Service Processor
  * @state:	NFP SP state
  * @code:	NFP SP Command Code
  * @option:	NFP SP Command Argument
  * @buff_cpp:	NFP SP Buffer CPP Address info
  * @buff_addr:	NFP SP Buffer Host address
+ * @timeout_sec:Timeout value to wait for completion in seconds
  *
  * Return: 0 for success with no result
  *
@@ -300,10 +305,11 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
  *	-ENODEV if the NSP is not a supported model
  *	-EBUSY if the NSP is stuck
  *	-EINTR if interrupted while waiting for completion
- *	-ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ *	-ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete
  */
-static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
-			   u32 buff_cpp, u64 buff_addr)
+static int
+__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+		  u64 buff_addr, u32 timeout_sec)
 {
 	u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
 	struct nfp_cpp *cpp = state->cpp;
@@ -341,8 +347,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
 		return err;
 
 	/* Wait for NSP_COMMAND_START to go to 0 */
-	err = nfp_nsp_wait_reg(cpp, &reg,
-			       nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+	err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+			       NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT);
 	if (err) {
 		nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
 			err, code);
@@ -350,8 +356,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
 	}
 
 	/* Wait for NSP_STATUS_BUSY to go to 0 */
-	err = nfp_nsp_wait_reg(cpp, &reg,
-			       nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+	err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+			       0, timeout_sec);
 	if (err) {
 		nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
 			err, code);
@@ -374,9 +380,18 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
 	return ret_val;
 }
 
-static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
-			       const void *in_buf, unsigned int in_size,
-			       void *out_buf, unsigned int out_size)
+static int
+nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+		u64 buff_addr)
+{
+	return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr,
+				 NFP_NSP_TIMEOUT_DEFAULT);
+}
+
+static int
+__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+		      const void *in_buf, unsigned int in_size, void *out_buf,
+		      unsigned int out_size, u32 timeout_sec)
 {
 	struct nfp_cpp *cpp = nsp->cpp;
 	unsigned int max_size;
@@ -429,7 +444,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
 			return err;
 	}
 
-	ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+	ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf,
+				timeout_sec);
 	if (ret < 0)
 		return ret;
 
@@ -442,12 +458,23 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
 	return ret;
 }
 
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+		    const void *in_buf, unsigned int in_size, void *out_buf,
+		    unsigned int out_size)
+{
+	return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size,
+				     out_buf, out_size,
+				     NFP_NSP_TIMEOUT_DEFAULT);
+}
+
 int nfp_nsp_wait(struct nfp_nsp *state)
 {
-	const unsigned long wait_until = jiffies + 30 * HZ;
+	const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
 	int err;
 
-	nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+	nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n",
+		NFP_NSP_TIMEOUT_BOOT);
 
 	for (;;) {
 		const unsigned long start_time = jiffies;
@@ -488,6 +515,17 @@ int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
 				   fw->size, NULL, 0);
 }
 
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
+{
+	/* The flash time is specified to take a maximum of 70s so we add an
+	 * additional factor to this spec time.
+	 */
+	u32 timeout_sec = 2.5 * 70;
+
+	return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size,
+				     fw->data, fw->size, NULL, 0, timeout_sec);
+}
+
 int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
 {
 	return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 650ca1a5bd21..e983c9d7f86c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,7 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
 int nfp_nsp_wait(struct nfp_nsp *state);
 int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
 int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
 int nfp_nsp_mac_reinit(struct nfp_nsp *state);
 
 static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index ecda474ac7c3..46107aefad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -277,10 +277,6 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
 		break;
 	}
 
-	if (err == sym->size)
-		err = 0;
-	else if (err >= 0)
-		err = -EIO;
 exit:
 	if (error)
 		*error = err;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 481876b5424c..66c665d0b926 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -793,9 +793,9 @@ struct fe_priv {
 	/* rx specific fields.
 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 	 */
-	union ring_type get_rx, put_rx, first_rx, last_rx;
+	union ring_type get_rx, put_rx, last_rx;
 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
-	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+	struct nv_skb_map *last_rx_ctx;
 	struct nv_skb_map *rx_skb;
 
 	union ring_type rx_ring;
@@ -822,9 +822,9 @@ struct fe_priv {
 	/*
 	 * tx specific fields.
 	 */
-	union ring_type get_tx, put_tx, first_tx, last_tx;
+	union ring_type get_tx, put_tx, last_tx;
 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
-	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+	struct nv_skb_map *last_tx_ctx;
 	struct nv_skb_map *tx_skb;
 
 	union ring_type tx_ring;
@@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev)
 	struct ring_desc *less_rx;
 
 	less_rx = np->get_rx.orig;
-	if (less_rx-- == np->first_rx.orig)
+	if (less_rx-- == np->rx_ring.orig)
 		less_rx = np->last_rx.orig;
 
 	while (np->put_rx.orig != less_rx) {
 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
-		if (skb) {
+		if (likely(skb)) {
 			np->put_rx_ctx->skb = skb;
 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
 							     skb->data,
@@ -1833,9 +1833,9 @@ static int nv_alloc_rx(struct net_device *dev)
 			wmb();
 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
-				np->put_rx.orig = np->first_rx.orig;
+				np->put_rx.orig = np->rx_ring.orig;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
-				np->put_rx_ctx = np->first_rx_ctx;
+				np->put_rx_ctx = np->rx_skb;
 		} else {
 packet_dropped:
 			u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 	struct ring_desc_ex *less_rx;
 
 	less_rx = np->get_rx.ex;
-	if (less_rx-- == np->first_rx.ex)
+	if (less_rx-- == np->rx_ring.ex)
 		less_rx = np->last_rx.ex;
 
 	while (np->put_rx.ex != less_rx) {
 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
-		if (skb) {
+		if (likely(skb)) {
 			np->put_rx_ctx->skb = skb;
 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
 							     skb->data,
@@ -1875,9 +1875,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 			wmb();
 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
-				np->put_rx.ex = np->first_rx.ex;
+				np->put_rx.ex = np->rx_ring.ex;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
-				np->put_rx_ctx = np->first_rx_ctx;
+				np->put_rx_ctx = np->rx_skb;
 		} else {
 packet_dropped:
 			u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1903,13 +1903,15 @@ static void nv_init_rx(struct net_device *dev)
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
 
-	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+	np->get_rx = np->rx_ring;
+	np->put_rx = np->rx_ring;
 
 	if (!nv_optimized(np))
 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
 	else
 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
-	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+	np->get_rx_ctx = np->rx_skb;
+	np->put_rx_ctx = np->rx_skb;
 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
 
 	for (i = 0; i < np->rx_ring_size; i++) {
@@ -1932,13 +1934,15 @@ static void nv_init_tx(struct net_device *dev)
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
 
-	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+	np->get_tx = np->tx_ring;
+	np->put_tx = np->tx_ring;
 
 	if (!nv_optimized(np))
 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
 	else
 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
-	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+	np->get_tx_ctx = np->tx_skb;
+	np->put_tx_ctx = np->tx_skb;
 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
 	netdev_reset_queue(np->dev);
 	np->tx_pkts_in_progress = 0;
@@ -2248,9 +2252,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		offset += bcnt;
 		size -= bcnt;
 		if (unlikely(put_tx++ == np->last_tx.orig))
-			put_tx = np->first_tx.orig;
+			put_tx = np->tx_ring.orig;
 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-			np->put_tx_ctx = np->first_tx_ctx;
+			np->put_tx_ctx = np->tx_skb;
 	} while (size);
 
 	/* setup the fragments */
@@ -2276,7 +2280,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 				do {
 					nv_unmap_txskb(np, start_tx_ctx);
 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-						tmp_tx_ctx = np->first_tx_ctx;
+						tmp_tx_ctx = np->tx_skb;
 				} while (tmp_tx_ctx != np->put_tx_ctx);
 				dev_kfree_skb_any(skb);
 				np->put_tx_ctx = start_tx_ctx;
@@ -2294,18 +2298,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 			offset += bcnt;
 			frag_size -= bcnt;
 			if (unlikely(put_tx++ == np->last_tx.orig))
-				put_tx = np->first_tx.orig;
+				put_tx = np->tx_ring.orig;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-				np->put_tx_ctx = np->first_tx_ctx;
+				np->put_tx_ctx = np->tx_skb;
 		} while (frag_size);
 	}
 
-	if (unlikely(put_tx == np->first_tx.orig))
+	if (unlikely(put_tx == np->tx_ring.orig))
 		prev_tx = np->last_tx.orig;
 	else
 		prev_tx = put_tx - 1;
 
-	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+	if (unlikely(np->put_tx_ctx == np->tx_skb))
 		prev_tx_ctx = np->last_tx_ctx;
 	else
 		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2406,9 +2410,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 		offset += bcnt;
 		size -= bcnt;
 		if (unlikely(put_tx++ == np->last_tx.ex))
-			put_tx = np->first_tx.ex;
+			put_tx = np->tx_ring.ex;
 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-			np->put_tx_ctx = np->first_tx_ctx;
+			np->put_tx_ctx = np->tx_skb;
 	} while (size);
 
 	/* setup the fragments */
@@ -2434,7 +2438,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 				do {
 					nv_unmap_txskb(np, start_tx_ctx);
 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-						tmp_tx_ctx = np->first_tx_ctx;
+						tmp_tx_ctx = np->tx_skb;
 				} while (tmp_tx_ctx != np->put_tx_ctx);
 				dev_kfree_skb_any(skb);
 				np->put_tx_ctx = start_tx_ctx;
@@ -2452,18 +2456,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 			offset += bcnt;
 			frag_size -= bcnt;
 			if (unlikely(put_tx++ == np->last_tx.ex))
-				put_tx = np->first_tx.ex;
+				put_tx = np->tx_ring.ex;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-				np->put_tx_ctx = np->first_tx_ctx;
+				np->put_tx_ctx = np->tx_skb;
 		} while (frag_size);
 	}
 
-	if (unlikely(put_tx == np->first_tx.ex))
+	if (unlikely(put_tx == np->tx_ring.ex))
 		prev_tx = np->last_tx.ex;
 	else
 		prev_tx = put_tx - 1;
 
-	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+	if (unlikely(np->put_tx_ctx == np->tx_skb))
 		prev_tx_ctx = np->last_tx_ctx;
 	else
 		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2563,7 +2567,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
 
 		if (np->desc_ver == DESC_VER_1) {
 			if (flags & NV_TX_LASTPACKET) {
-				if (flags & NV_TX_ERROR) {
+				if (unlikely(flags & NV_TX_ERROR)) {
 					if ((flags & NV_TX_RETRYERROR)
 					    && !(flags & NV_TX_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
@@ -2580,7 +2584,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
 			}
 		} else {
 			if (flags & NV_TX2_LASTPACKET) {
-				if (flags & NV_TX2_ERROR) {
+				if (unlikely(flags & NV_TX2_ERROR)) {
 					if ((flags & NV_TX2_RETRYERROR)
 					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
@@ -2597,9 +2601,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
 			}
 		}
 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
-			np->get_tx.orig = np->first_tx.orig;
+			np->get_tx.orig = np->tx_ring.orig;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-			np->get_tx_ctx = np->first_tx_ctx;
+			np->get_tx_ctx = np->tx_skb;
 	}
 
 	netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2626,7 +2630,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 		nv_unmap_txskb(np, np->get_tx_ctx);
 
 		if (flags & NV_TX2_LASTPACKET) {
-			if (flags & NV_TX2_ERROR) {
+			if (unlikely(flags & NV_TX2_ERROR)) {
 				if ((flags & NV_TX2_RETRYERROR)
 				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
 					if (np->driver_data & DEV_HAS_GEAR_MODE)
@@ -2651,9 +2655,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 		}
 
 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
-			np->get_tx.ex = np->first_tx.ex;
+			np->get_tx.ex = np->tx_ring.ex;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-			np->get_tx_ctx = np->first_tx_ctx;
+			np->get_tx_ctx = np->tx_skb;
 	}
 
 	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
@@ -2909,9 +2913,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
 		u64_stats_update_end(&np->swstats_rx_syncp);
 next_pkt:
 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
-			np->get_rx.orig = np->first_rx.orig;
+			np->get_rx.orig = np->rx_ring.orig;
 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
-			np->get_rx_ctx = np->first_rx_ctx;
+			np->get_rx_ctx = np->rx_skb;
 
 		rx_work++;
 	}
@@ -2998,9 +3002,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
 		}
 next_pkt:
 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
-			np->get_rx.ex = np->first_rx.ex;
+			np->get_rx.ex = np->rx_ring.ex;
 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
-			np->get_rx_ctx = np->first_rx_ctx;
+			np->get_rx_ctx = np->rx_skb;
 
 		rx_work++;
 	}
@@ -5507,11 +5511,9 @@ static int nv_open(struct net_device *dev)
 	/* One manual link speed update: Interrupts are enabled, future link
 	 * speed changes cause interrupts and are handled by nv_link_irq().
 	 */
-	{
-		u32 miistat;
-		miistat = readl(base + NvRegMIIStatus);
-		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
-	}
+	readl(base + NvRegMIIStatus);
+	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
 	 * to init hw */
 	np->linkspeed = 0;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c9a55b774935..07a2eb3781b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
 		return -ENOENT;
 	}
 
-	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-		   &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
-	    != ETH_ALEN) {
+	if (!mac_pton(maddr, addr)) {
 		dev_warn(&pdev->dev,
 			 "can't parse mac address, not configuring\n");
 		return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 26ddf092e3ec..0ee2490db729 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -85,6 +85,7 @@ config QED
 	tristate "QLogic QED 25/40/100Gb core driver"
 	depends on PCI
 	select ZLIB_INFLATE
+	select CRC8
 	---help---
 	  This enables the support for ...
 
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 0a66389c06c2..1cd39c9a0345 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2502,12 +2502,10 @@ netxen_collect_minidump(struct netxen_adapter *adapter)
 {
 	int ret = 0;
 	struct netxen_minidump_template_hdr *hdr;
-	struct timespec val;
 	hdr = (struct netxen_minidump_template_hdr *)
 				adapter->mdump.md_template;
 	hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
-	jiffies_to_timespec(jiffies, &val);
-	hdr->driver_timestamp = (u32) val.tv_sec;
+	hdr->driver_timestamp = ktime_get_seconds();
 	hdr->driver_info_word2 = adapter->fw_version;
 	hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
 	ret = netxen_parse_md_template(adapter);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 91003bc6f00b..69488554f4b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -52,10 +52,10 @@
 
 extern const struct qed_common_ops qed_common_ops_pass;
 
-#define QED_MAJOR_VERSION               8
-#define QED_MINOR_VERSION               10
-#define QED_REVISION_VERSION            11
-#define QED_ENGINEERING_VERSION 21
+#define QED_MAJOR_VERSION		8
+#define QED_MINOR_VERSION		33
+#define QED_REVISION_VERSION		0
+#define QED_ENGINEERING_VERSION		20
 
 #define QED_VERSION						 \
 	((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
 	return sw_fid;
 }
 
-#define PURE_LB_TC 8
-#define PKT_LB_TC 9
+#define PKT_LB_TC	9
+#define MAX_NUM_VOQS_E4	20
 
 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index afd07ad91631..6f546e869d8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -86,22 +86,22 @@
 
 /* connection context union */
 union conn_context {
-	struct core_conn_context core_ctx;
-	struct eth_conn_context eth_ctx;
-	struct iscsi_conn_context iscsi_ctx;
-	struct fcoe_conn_context fcoe_ctx;
-	struct roce_conn_context roce_ctx;
+	struct e4_core_conn_context core_ctx;
+	struct e4_eth_conn_context eth_ctx;
+	struct e4_iscsi_conn_context iscsi_ctx;
+	struct e4_fcoe_conn_context fcoe_ctx;
+	struct e4_roce_conn_context roce_ctx;
 };
 
 /* TYPE-0 task context - iSCSI, FCOE */
 union type0_task_context {
-	struct iscsi_task_context iscsi_ctx;
-	struct fcoe_task_context fcoe_ctx;
+	struct e4_iscsi_task_context iscsi_ctx;
+	struct e4_fcoe_task_context fcoe_ctx;
 };
 
 /* TYPE-1 task context - ROCE */
 union type1_task_context {
-	struct rdma_task_context roce_ctx;
+	struct e4_rdma_task_context roce_ctx;
 };
 
 struct src_ent {
@@ -109,8 +109,8 @@ struct src_ent {
 	u64 next;
 };
 
-#define CDUT_SEG_ALIGNMET 3	/* in 4k chunks */
-#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+#define CDUT_SEG_ALIGNMET		3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES	BIT(CDUT_SEG_ALIGNMET + 12)
 
 #define CONN_CXT_SIZE(p_hwfn) \
 	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 	p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 
 	qed_cxt_qm_iids(p_hwfn, &qm_iids);
-	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+	total = qed_qm_pf_mem_size(qm_iids.cids,
 				   qm_iids.vf_cids, qm_iids.tids,
 				   p_hwfn->qm_info.num_pqs,
 				   p_hwfn->qm_info.num_vf_pqs);
@@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
 		u32 size;
 
 		size = min_t(u32, sz_left, p_blk->real_size_in_page);
-		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-					    size, &p_phys, GFP_KERNEL);
+		p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+					     &p_phys, GFP_KERNEL);
 		if (!p_virt)
 			return -ENOMEM;
-		memset(p_virt, 0, size);
 
 		ilt_shadow[line].p_phys = p_phys;
 		ilt_shadow[line].p_virt = p_virt;
@@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
 	}
 }
 
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, bool is_pf_loading)
 {
-	struct qed_qm_pf_rt_init_params params;
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+	struct qed_qm_pf_rt_init_params params;
+	struct qed_mcp_link_state *p_link;
 	struct qed_qm_iids iids;
 
 	memset(&iids, 0, sizeof(iids));
 	qed_cxt_qm_iids(p_hwfn, &iids);
 
+	p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
 	memset(&params, 0, sizeof(params));
 	params.port_id = p_hwfn->port_id;
 	params.pf_id = p_hwfn->rel_pf_id;
 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
-	params.is_first_pf = p_hwfn->first_on_engine;
+	params.is_pf_loading = is_pf_loading;
 	params.num_pf_cids = iids.cids;
 	params.num_vf_cids = iids.vf_cids;
 	params.num_tids = iids.tids;
@@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	params.num_vports = qm_info->num_vports;
 	params.pf_wfq = qm_info->pf_wfq;
 	params.pf_rl = qm_info->pf_rl;
+	params.link_speed = p_link->speed;
 	params.pq_params = qm_info->qm_pq_params;
 	params.vport_params = qm_info->qm_vport_params;
 
@@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
 
 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	qed_qm_init_pf(p_hwfn, p_ptt);
+	qed_qm_init_pf(p_hwfn, p_ptt, true);
 	qed_cm_init_pf(p_hwfn);
 	qed_dq_init_pf(p_hwfn);
 	qed_cdu_init_pf(p_hwfn);
@@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 		goto out0;
 	}
 
-	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-				    p_blk->real_size_in_page,
-				    &p_phys, GFP_KERNEL);
+	p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+				     p_blk->real_size_in_page, &p_phys,
+				     GFP_KERNEL);
 	if (!p_virt) {
 		rc = -ENOMEM;
 		goto out1;
 	}
-	memset(p_virt, 0, p_blk->real_size_in_page);
 
 	/* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
 	 * to compensate for a HW bug, but it is configured even if DIF is not
@@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 		for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
 			elem = (union type1_task_context *)elem_start;
 			SET_FIELD(elem->roce_ctx.tdif_context.flags1,
-				  TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+				  TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
 			elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
 		}
 	}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 17836349a274..a4e95869889f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  *
  * @param p_hwfn
  * @param p_ptt
+ * @param is_pf_loading
  */
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, bool is_pf_loading);
 
 /**
  * @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index fe7c1f230028..449777f21237 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
 				   struct pf_update_ramrod_data *p_dest)
 {
 	struct protocol_dcb_data *p_dcb_data;
-	bool update_flag = false;
-
-	p_dest->pf_id = p_src->pf_id;
+	u8 update_flag;
 
 	update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
 	p_dest->update_fcoe_dcb_data_mode = update_flag;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 03c3cf77aaff..f2633ec87a6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -21,25 +21,26 @@ enum mem_groups {
 	MEM_GROUP_DMAE_MEM,
 	MEM_GROUP_CM_MEM,
 	MEM_GROUP_QM_MEM,
-	MEM_GROUP_TM_MEM,
+	MEM_GROUP_DORQ_MEM,
 	MEM_GROUP_BRB_RAM,
 	MEM_GROUP_BRB_MEM,
 	MEM_GROUP_PRS_MEM,
-	MEM_GROUP_SDM_MEM,
 	MEM_GROUP_IOR,
-	MEM_GROUP_RAM,
 	MEM_GROUP_BTB_RAM,
-	MEM_GROUP_RDIF_CTX,
-	MEM_GROUP_TDIF_CTX,
-	MEM_GROUP_CFC_MEM,
 	MEM_GROUP_CONN_CFC_MEM,
 	MEM_GROUP_TASK_CFC_MEM,
 	MEM_GROUP_CAU_PI,
 	MEM_GROUP_CAU_MEM,
 	MEM_GROUP_PXP_ILT,
+	MEM_GROUP_TM_MEM,
+	MEM_GROUP_SDM_MEM,
 	MEM_GROUP_PBUF,
+	MEM_GROUP_RAM,
 	MEM_GROUP_MULD_MEM,
 	MEM_GROUP_BTB_MEM,
+	MEM_GROUP_RDIF_CTX,
+	MEM_GROUP_TDIF_CTX,
+	MEM_GROUP_CFC_MEM,
 	MEM_GROUP_IGU_MEM,
 	MEM_GROUP_IGU_MSIX,
 	MEM_GROUP_CAU_SB,
@@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = {
 	"DMAE_MEM",
 	"CM_MEM",
 	"QM_MEM",
-	"TM_MEM",
+	"DORQ_MEM",
 	"BRB_RAM",
 	"BRB_MEM",
 	"PRS_MEM",
-	"SDM_MEM",
 	"IOR",
-	"RAM",
 	"BTB_RAM",
-	"RDIF_CTX",
-	"TDIF_CTX",
-	"CFC_MEM",
 	"CONN_CFC_MEM",
 	"TASK_CFC_MEM",
 	"CAU_PI",
 	"CAU_MEM",
 	"PXP_ILT",
+	"TM_MEM",
+	"SDM_MEM",
 	"PBUF",
+	"RAM",
 	"MULD_MEM",
 	"BTB_MEM",
+	"RDIF_CTX",
+	"TDIF_CTX",
+	"CFC_MEM",
 	"IGU_MEM",
 	"IGU_MSIX",
 	"CAU_SB",
@@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm)
 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
 }
 
-static u32 cond14(const u32 *r, const u32 *imm)
-{
-	return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
 static u32 cond6(const u32 *r, const u32 *imm)
 {
 	return (r[0] & imm[0]) != imm[1];
@@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
 	cond11,
 	cond12,
 	cond13,
-	cond14,
 };
 
 /******************************* Data Types **********************************/
@@ -203,6 +199,8 @@ struct chip_defs {
 struct platform_defs {
 	const char *name;
 	u32 delay_factor;
+	u32 dmae_thresh;
+	u32 log_thresh;
 };
 
 /* Storm constant definitions.
@@ -234,7 +232,7 @@ struct storm_defs {
 /* Block constant definitions */
 struct block_defs {
 	const char *name;
-	bool has_dbg_bus[MAX_CHIP_IDS];
+	bool exists[MAX_CHIP_IDS];
 	bool associated_to_storm;
 
 	/* Valid only if associated_to_storm is true */
@@ -258,8 +256,8 @@ struct block_defs {
 /* Reset register definitions */
 struct reset_reg_defs {
 	u32 addr;
-	u32 unreset_val;
 	bool exists[MAX_CHIP_IDS];
+	u32 unreset_val[MAX_CHIP_IDS];
 };
 
 struct grc_param_defs {
@@ -276,8 +274,8 @@ struct rss_mem_defs {
 	const char *mem_name;
 	const char *type_name;
 	u32 addr;
+	u32 entry_width;
 	u32 num_entries[MAX_CHIP_IDS];
-	u32 entry_width[MAX_CHIP_IDS];
 };
 
 struct vfc_ram_defs {
@@ -294,7 +292,9 @@ struct big_ram_defs {
 	enum dbg_grc_params grc_param;
 	u32 addr_reg_addr;
 	u32 data_reg_addr;
-	u32 num_of_blocks[MAX_CHIP_IDS];
+	u32 is_256b_reg_addr;
+	u32 is_256b_bit_offset[MAX_CHIP_IDS];
+	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
 };
 
 struct phy_defs {
@@ -358,20 +358,14 @@ struct phy_defs {
 			(arr)[i] = qed_rd(dev, ptt, addr); \
 	} while (0)
 
-#ifndef DWORDS_TO_BYTES
 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
-#endif
-#ifndef BYTES_TO_DWORDS
 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
-#endif
 
-/* extra lines include a signature line + optional latency events line */
-#ifndef NUM_DBG_LINES
+/* Extra lines include a signature line + optional latency events line */
 #define NUM_EXTRA_DBG_LINES(block_desc) \
 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
 #define NUM_DBG_LINES(block_desc) \
 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
-#endif
 
 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
 #define RAM_LINES_TO_BYTES(lines) \
@@ -424,9 +418,6 @@ struct phy_defs {
 #define NUM_RSS_MEM_TYPES		5
 
 #define NUM_BIG_RAM_TYPES		3
-#define BIG_RAM_BLOCK_SIZE_BYTES	128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
-	BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
 
 #define NUM_PHY_TBUS_ADDRESSES		2048
 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
@@ -441,23 +432,17 @@ struct phy_defs {
 
 #define FW_IMG_MAIN			1
 
-#ifndef REG_FIFO_ELEMENT_DWORDS
 #define REG_FIFO_ELEMENT_DWORDS		2
-#endif
 #define REG_FIFO_DEPTH_ELEMENTS		32
 #define REG_FIFO_DEPTH_DWORDS \
 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
 
-#ifndef IGU_FIFO_ELEMENT_DWORDS
 #define IGU_FIFO_ELEMENT_DWORDS		4
-#endif
 #define IGU_FIFO_DEPTH_ELEMENTS		64
 #define IGU_FIFO_DEPTH_DWORDS \
 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
 
-#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
-#endif
 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
@@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
 	   {0, 0, 0},
 	   {0, 0, 0},
+	   {0, 0, 0} } },
+	{ "reserved",
+	   {{0, 0, 0},
+	   {0, 0, 0},
+	   {0, 0, 0},
 	   {0, 0, 0} } }
 };
 
@@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 static struct storm_defs s_storm_defs[] = {
 	/* Tstorm */
 	{'T', BLOCK_TSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+	  DBG_BUS_CLIENT_RBCT}, true,
 	 TSEM_REG_FAST_MEMORY,
 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Mstorm */
 	{'M', BLOCK_MSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+	  DBG_BUS_CLIENT_RBCM}, false,
 	 MSEM_REG_FAST_MEMORY,
 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Ustorm */
 	{'U', BLOCK_USEM,
-	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+	  DBG_BUS_CLIENT_RBCU}, false,
 	 USEM_REG_FAST_MEMORY,
 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Xstorm */
 	{'X', BLOCK_XSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+	  DBG_BUS_CLIENT_RBCX}, false,
 	 XSEM_REG_FAST_MEMORY,
 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Ystorm */
 	{'Y', BLOCK_YSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+	  DBG_BUS_CLIENT_RBCY}, false,
 	 YSEM_REG_FAST_MEMORY,
 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Pstorm */
 	{'P', BLOCK_PSEM,
-	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+	  DBG_BUS_CLIENT_RBCS}, true,
 	 PSEM_REG_FAST_MEMORY,
 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = {
 
 static struct block_defs block_grc_defs = {
 	"grc",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
 	GRC_REG_DBG_FORCE_FRAME,
@@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = {
 };
 
 static struct block_defs block_miscs_defs = {
-	"miscs", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"miscs", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-	"misc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-	"dbu", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbu", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
 	"pglue_b",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
 	PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = {
 
 static struct block_defs block_cnig_defs = {
 	"cnig",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-	CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
-	CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
-	CNIG_REG_DBG_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+	 DBG_BUS_CLIENT_RBCW},
+	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
+	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
+	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
 };
 
 static struct block_defs block_cpmu_defs = {
-	"cpmu", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"cpmu", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
 	"ncsi",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
 	NCSI_REG_DBG_FORCE_FRAME,
@@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = {
 };
 
 static struct block_defs block_opte_defs = {
-	"opte", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"opte", {true, true, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
 	"bmb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
 	BMB_REG_DBG_FORCE_FRAME,
@@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = {
 
 static struct block_defs block_pcie_defs = {
 	"pcie",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-	PCIE_REG_DBG_COMMON_SELECT_K2,
-	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
-	PCIE_REG_DBG_COMMON_SHIFT_K2,
-	PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
-	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+	 DBG_BUS_CLIENT_RBCH},
+	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp_defs = {
-	"mcp", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"mcp", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
 	"mcp2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
 	MCP2_REG_DBG_FORCE_FRAME,
@@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = {
 
 static struct block_defs block_pswhst_defs = {
 	"pswhst",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
 	PSWHST_REG_DBG_FORCE_FRAME,
@@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = {
 
 static struct block_defs block_pswhst2_defs = {
 	"pswhst2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
 	PSWHST2_REG_DBG_FORCE_FRAME,
@@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = {
 
 static struct block_defs block_pswrd_defs = {
 	"pswrd",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
 	PSWRD_REG_DBG_FORCE_FRAME,
@@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = {
 
 static struct block_defs block_pswrd2_defs = {
 	"pswrd2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
 	PSWRD2_REG_DBG_FORCE_FRAME,
@@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = {
 
 static struct block_defs block_pswwr_defs = {
 	"pswwr",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
 	PSWWR_REG_DBG_FORCE_FRAME,
@@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = {
 };
 
 static struct block_defs block_pswwr2_defs = {
-	"pswwr2", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"pswwr2", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
 	"pswrq",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
 	PSWRQ_REG_DBG_FORCE_FRAME,
@@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = {
 
 static struct block_defs block_pswrq2_defs = {
 	"pswrq2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
 	PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = {
 
 static struct block_defs block_pglcs_defs = {
 	"pglcs",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-	PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
-	PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
-	PGLCS_REG_DBG_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+	 DBG_BUS_CLIENT_RBCH},
+	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
+	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
+	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
 };
 
 static struct block_defs block_ptu_defs = {
 	"ptu",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
 	PTU_REG_DBG_FORCE_FRAME,
@@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = {
 
 static struct block_defs block_dmae_defs = {
 	"dmae",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
 	DMAE_REG_DBG_FORCE_FRAME,
@@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = {
 
 static struct block_defs block_tcm_defs = {
 	"tcm",
-	{true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
 	TCM_REG_DBG_FORCE_FRAME,
@@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = {
 
 static struct block_defs block_mcm_defs = {
 	"mcm",
-	{true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
 	MCM_REG_DBG_FORCE_FRAME,
@@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = {
 
 static struct block_defs block_ucm_defs = {
 	"ucm",
-	{true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
 	UCM_REG_DBG_FORCE_FRAME,
@@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = {
 
 static struct block_defs block_xcm_defs = {
 	"xcm",
-	{true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
 	XCM_REG_DBG_FORCE_FRAME,
@@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = {
 
 static struct block_defs block_ycm_defs = {
 	"ycm",
-	{true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
 	YCM_REG_DBG_FORCE_FRAME,
@@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = {
 
 static struct block_defs block_pcm_defs = {
 	"pcm",
-	{true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
 	PCM_REG_DBG_FORCE_FRAME,
@@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = {
 
 static struct block_defs block_qm_defs = {
 	"qm",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
 	QM_REG_DBG_FORCE_FRAME,
@@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = {
 
 static struct block_defs block_tm_defs = {
 	"tm",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
 	TM_REG_DBG_FORCE_FRAME,
@@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = {
 
 static struct block_defs block_dorq_defs = {
 	"dorq",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
 	DORQ_REG_DBG_FORCE_FRAME,
@@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = {
 
 static struct block_defs block_brb_defs = {
 	"brb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
 	BRB_REG_DBG_FORCE_FRAME,
@@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = {
 
 static struct block_defs block_src_defs = {
 	"src",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
 	SRC_REG_DBG_FORCE_FRAME,
@@ -910,8 +909,8 @@ static struct block_defs block_src_defs = {
 
 static struct block_defs block_prs_defs = {
 	"prs",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
 	PRS_REG_DBG_FORCE_FRAME,
@@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = {
 
 static struct block_defs block_tsdm_defs = {
 	"tsdm",
-	{true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
 	TSDM_REG_DBG_FORCE_FRAME,
@@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = {
 
 static struct block_defs block_msdm_defs = {
 	"msdm",
-	{true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
 	MSDM_REG_DBG_FORCE_FRAME,
@@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = {
 
 static struct block_defs block_usdm_defs = {
 	"usdm",
-	{true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
 	USDM_REG_DBG_FORCE_FRAME,
@@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = {
 
 static struct block_defs block_xsdm_defs = {
 	"xsdm",
-	{true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
 	XSDM_REG_DBG_FORCE_FRAME,
@@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = {
 
 static struct block_defs block_ysdm_defs = {
 	"ysdm",
-	{true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
 	YSDM_REG_DBG_FORCE_FRAME,
@@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = {
 
 static struct block_defs block_psdm_defs = {
 	"psdm",
-	{true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
 	PSDM_REG_DBG_FORCE_FRAME,
@@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = {
 
 static struct block_defs block_tsem_defs = {
 	"tsem",
-	{true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
 	TSEM_REG_DBG_FORCE_FRAME,
@@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = {
 
 static struct block_defs block_msem_defs = {
 	"msem",
-	{true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
 	MSEM_REG_DBG_FORCE_FRAME,
@@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = {
 
 static struct block_defs block_usem_defs = {
 	"usem",
-	{true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
 	USEM_REG_DBG_FORCE_FRAME,
@@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = {
 
 static struct block_defs block_xsem_defs = {
 	"xsem",
-	{true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
 	XSEM_REG_DBG_FORCE_FRAME,
@@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = {
 
 static struct block_defs block_ysem_defs = {
 	"ysem",
-	{true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
 	YSEM_REG_DBG_FORCE_FRAME,
@@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = {
 
 static struct block_defs block_psem_defs = {
 	"psem",
-	{true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
 	PSEM_REG_DBG_FORCE_FRAME,
@@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = {
 
 static struct block_defs block_rss_defs = {
 	"rss",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
 	RSS_REG_DBG_FORCE_FRAME,
@@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = {
 
 static struct block_defs block_tmld_defs = {
 	"tmld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
 	TMLD_REG_DBG_FORCE_FRAME,
@@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = {
 
 static struct block_defs block_muld_defs = {
 	"muld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
 	MULD_REG_DBG_FORCE_FRAME,
@@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = {
 
 static struct block_defs block_yuld_defs = {
 	"yuld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, false}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+	 MAX_DBG_BUS_CLIENTS},
 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
@@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = {
 
 static struct block_defs block_xyld_defs = {
 	"xyld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
 	XYLD_REG_DBG_FORCE_FRAME,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
 };
 
+static struct block_defs block_ptld_defs = {
+	"ptld",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+	PTLD_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+	28
+};
+
+static struct block_defs block_ypld_defs = {
+	"ypld",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+	YPLD_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+	27
+};
+
 static struct block_defs block_prm_defs = {
 	"prm",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
 	PRM_REG_DBG_FORCE_FRAME,
@@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = {
 
 static struct block_defs block_pbf_pb1_defs = {
 	"pbf_pb1",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
 	PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = {
 
 static struct block_defs block_pbf_pb2_defs = {
 	"pbf_pb2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
 	PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = {
 
 static struct block_defs block_rpb_defs = {
 	"rpb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
 	RPB_REG_DBG_FORCE_FRAME,
@@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = {
 
 static struct block_defs block_btb_defs = {
 	"btb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
 	BTB_REG_DBG_FORCE_FRAME,
@@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = {
 
 static struct block_defs block_pbf_defs = {
 	"pbf",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
 	PBF_REG_DBG_FORCE_FRAME,
@@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = {
 
 static struct block_defs block_rdif_defs = {
 	"rdif",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
 	RDIF_REG_DBG_FORCE_FRAME,
@@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = {
 
 static struct block_defs block_tdif_defs = {
 	"tdif",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
 	TDIF_REG_DBG_FORCE_FRAME,
@@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = {
 
 static struct block_defs block_cdu_defs = {
 	"cdu",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
 	CDU_REG_DBG_FORCE_FRAME,
@@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = {
 
 static struct block_defs block_ccfc_defs = {
 	"ccfc",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
 	CCFC_REG_DBG_FORCE_FRAME,
@@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = {
 
 static struct block_defs block_tcfc_defs = {
 	"tcfc",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
 	TCFC_REG_DBG_FORCE_FRAME,
@@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = {
 
 static struct block_defs block_igu_defs = {
 	"igu",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
 	IGU_REG_DBG_FORCE_FRAME,
@@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = {
 
 static struct block_defs block_cau_defs = {
 	"cau",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
 	CAU_REG_DBG_FORCE_FRAME,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
 };
 
+static struct block_defs block_rgfs_defs = {
+	"rgfs", {false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
+};
+
+static struct block_defs block_rgsrc_defs = {
+	"rgsrc",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+	RGSRC_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+	30
+};
+
+static struct block_defs block_tgfs_defs = {
+	"tgfs", {false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
+};
+
+static struct block_defs block_tgsrc_defs = {
+	"tgsrc",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+	TGSRC_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+	31
+};
+
 static struct block_defs block_umac_defs = {
 	"umac",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-	UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
-	UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
-	UMAC_REG_DBG_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+	 DBG_BUS_CLIENT_RBCZ},
+	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
+	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
+	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
 };
 
 static struct block_defs block_xmac_defs = {
-	"xmac", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"xmac", {true, false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-	"dbg", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbg", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
 	"nig",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
 	NIG_REG_DBG_FORCE_FRAME,
@@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = {
 
 static struct block_defs block_wol_defs = {
 	"wol",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-	WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
-	WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
-	WOL_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
+	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
+	WOL_REG_DBG_FORCE_FRAME_K2_E5,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
 };
 
 static struct block_defs block_bmbn_defs = {
 	"bmbn",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
-	BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
-	BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
-	BMBN_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+	 DBG_BUS_CLIENT_RBCB},
+	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
+	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
+	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_ipc_defs = {
-	"ipc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"ipc", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
 	"nwm",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-	NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
-	NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
-	NWM_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
+	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
+	NWM_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
 };
 
 static struct block_defs block_nws_defs = {
 	"nws",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-	NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
-	NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
-	NWS_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
+	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
+	NWS_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
 };
 
 static struct block_defs block_ms_defs = {
 	"ms",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-	MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
-	MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
-	MS_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
+	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
+	MS_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
 };
 
 static struct block_defs block_phy_pcie_defs = {
 	"phy_pcie",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-	PCIE_REG_DBG_COMMON_SELECT_K2,
-	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
-	PCIE_REG_DBG_COMMON_SHIFT_K2,
-	PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
-	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+	 DBG_BUS_CLIENT_RBCH},
+	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_led_defs = {
-	"led", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"led", {false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
 };
 
 static struct block_defs block_avs_wrap_defs = {
-	"avs_wrap", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"avs_wrap", {false, true, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
 };
 
-static struct block_defs block_rgfs_defs = {
-	"rgfs", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_rgsrc_defs = {
-	"rgsrc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgfs_defs = {
-	"tgfs", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgsrc_defs = {
-	"tgsrc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ptld_defs = {
-	"ptld", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ypld_defs = {
-	"ypld", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+static struct block_defs block_pxpreqbus_defs = {
+	"pxpreqbus", {false, false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_aeu_defs = {
-	"misc_aeu", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc_aeu", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-	"bar0_map", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"bar0_map", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
@@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
 	&block_phy_pcie_defs,
 	&block_led_defs,
 	&block_avs_wrap_defs,
+	&block_pxpreqbus_defs,
 	&block_misc_aeu_defs,
 	&block_bar0_map_defs,
 };
 
 static struct platform_defs s_platform_defs[] = {
-	{"asic", 1},
-	{"reserved", 0},
-	{"reserved2", 0},
-	{"reserved3", 0}
+	{"asic", 1, 256, 32768},
+	{"reserved", 0, 0, 0},
+	{"reserved2", 0, 0, 0},
+	{"reserved3", 0, 0, 0}
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
 	/* DBG_GRC_PARAM_DUMP_TSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_MSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_USTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_XSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_YSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_REGS */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_RAM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PBUF */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_IOR */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_VFC */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_ILT */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_RSS */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_CAU */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_QM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_MCP */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_RESERVED */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_CFC */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_IGU */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_BRB */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_BTB */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_BMB */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_NIG */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_MULD */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PRS */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_DMAE */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_TM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_SDM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_DIF */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_STATIC */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_UNSTALL */
-	{{0, 0}, 0, 1, false, 0, 0},
+	{{0, 0, 0}, 0, 1, false, 0, 0},
 
 	/* DBG_GRC_PARAM_NUM_LCIDS */
-	{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
 	 MAX_LCIDS},
 
 	/* DBG_GRC_PARAM_NUM_LTIDS */
-	{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
 	 MAX_LTIDS},
 
 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
-	{{0, 0}, 0, 1, true, 0, 0},
+	{{0, 0, 0}, 0, 1, true, 0, 0},
 
 	/* DBG_GRC_PARAM_CRASH */
-	{{0, 0}, 0, 1, true, 0, 0},
+	{{0, 0, 0}, 0, 1, true, 0, 0},
 
 	/* DBG_GRC_PARAM_PARITY_SAFE */
-	{{0, 0}, 0, 1, false, 1, 0},
+	{{0, 0, 0}, 0, 1, false, 1, 0},
 
 	/* DBG_GRC_PARAM_DUMP_CM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PHY */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_NO_MCP */
-	{{0, 0}, 0, 1, false, 0, 0},
+	{{0, 0, 0}, 0, 1, false, 0, 0},
 
 	/* DBG_GRC_PARAM_NO_FW_VER */
-	{{0, 0}, 0, 1, false, 0, 0}
+	{{0, 0, 0}, 0, 1, false, 0, 0}
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
-	{ "rss_mem_cid", "rss_cid", 0,
-	  {256, 320},
-	  {32, 32} },
+	{ "rss_mem_cid", "rss_cid", 0, 32,
+	  {256, 320, 512} },
 
-	{ "rss_mem_key_msb", "rss_key", 1024,
-	  {128, 208},
-	  {256, 256} },
+	{ "rss_mem_key_msb", "rss_key", 1024, 256,
+	  {128, 208, 257} },
 
-	{ "rss_mem_key_lsb", "rss_key", 2048,
-	  {128, 208},
-	  {64, 64} },
+	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
+	  {128, 208, 257} },
 
-	{ "rss_mem_info", "rss_info", 3072,
-	  {128, 208},
-	  {16, 16} },
+	{ "rss_mem_info", "rss_info", 3072, 16,
+	  {128, 208, 256} },
 
-	{ "rss_mem_ind", "rss_ind", 4096,
-	  {16384, 26624},
-	  {16, 16} }
+	{ "rss_mem_ind", "rss_ind", 4096, 16,
+	  {16384, 26624, 32768} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
 static struct big_ram_defs s_big_ram_defs[] = {
 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-	  {4800, 5632} },
+	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+	  {153600, 180224, 282624} },
 
 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-	  {2880, 3680} },
+	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+	  {92160, 117760, 168960} },
 
 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-	  {1152, 1152} }
+	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+	  {36864, 36864, 36864} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
 	/* DBG_RESET_REG_MISCS_PL_UA */
-	{ MISCS_REG_RESET_PL_UA, 0x0,
-	  {true, true} },
+	{ MISCS_REG_RESET_PL_UA,
+	  {true, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISCS_PL_HV */
-	{ MISCS_REG_RESET_PL_HV, 0x0,
-	  {true, true} },
+	{ MISCS_REG_RESET_PL_HV,
+	  {true, true, true}, {0x0, 0x400, 0x600} },
 
 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
-	{ MISCS_REG_RESET_PL_HV_2_K2, 0x0,
-	  {false, true} },
+	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
+	  {false, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISC_PL_UA */
-	{ MISC_REG_RESET_PL_UA, 0x0,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_UA,
+	  {true, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISC_PL_HV */
-	{ MISC_REG_RESET_PL_HV, 0x0,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_HV,
+	  {true, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
-	{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
+	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
 
 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
-	{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
+	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
 
 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
-	{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_PDA_VAUX,
+	  {true, true, true}, {0x2, 0x2, 0x2} },
 };
 
 static struct phy_defs s_phy_defs[] = {
 	{"nw_phy", NWS_REG_NWS_CMU_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
-	{"sgmii_phy", MS_REG_MS_CMU_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
-	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
-	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
+	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
 };
 
 /**************************** Private Functions ******************************/
@@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
 	/* Initializes the GRC parameters */
 	qed_dbg_grc_init_params(p_hwfn);
 
-	dev_data->initialized = true;
+	dev_data->use_dmae = true;
+	dev_data->num_regs_read = 0;
+	dev_data->initialized = 1;
 
 	return DBG_STATUS_OK;
 }
@@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
 	 * The address is located in the last line of the Storm RAM.
 	 */
 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
-	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
 	       sizeof(fw_info_location);
 	dest = (u32 *)&fw_info_location;
 
@@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
 /* Writes the "last" section (including CRC) to the specified buffer at the
  * given offset. Returns the dumped size in dwords.
  */
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
-				 u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
 {
 	u32 start_offset = offset;
 
@@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
 	case MEM_GROUP_CFC_MEM:
 	case MEM_GROUP_CONN_CFC_MEM:
 	case MEM_GROUP_TASK_CFC_MEM:
-		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
 	case MEM_GROUP_IGU_MEM:
 	case MEM_GROUP_IGU_MSIX:
 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
 		struct block_defs *block = s_block_defs[block_id];
 
-		if (block->has_reset_bit && block->unreset)
+		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+		    block->unreset)
 			reg_val[block->reset_reg] |=
 			    BIT(block->reset_bit_offset);
 	}
@@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
 			continue;
 
-		reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+		reg_val[i] |=
+			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
 
 		if (reg_val[i])
 			qed_wr(p_hwfn,
@@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
 	return offset;
 }
 
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+	u32 i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
 /* Dumps the GRC registers in the specified address range.
  * Returns the dumped size in dwords.
  * The addr and len arguments are specified in dwords.
@@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
 				   u32 *dump_buf,
 				   bool dump, u32 addr, u32 len, bool wide_bus)
 {
-	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 
 	if (!dump)
 		return len;
 
-	for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
-		*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+	/* Print log if needed */
+	dev_data->num_regs_read += len;
+	if (dev_data->num_regs_read >=
+	    s_platform_defs[dev_data->platform_id].log_thresh) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_DEBUG,
+			   "Dumping %d registers...\n",
+			   dev_data->num_regs_read);
+		dev_data->num_regs_read = 0;
+	}
 
-	return offset;
+	/* Try reading using DMAE */
+	if (dev_data->use_dmae &&
+	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+	     wide_bus)) {
+		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+				       (u64)(uintptr_t)(dump_buf), len, 0))
+			return len;
+		dev_data->use_dmae = 0;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_DEBUG,
+			   "Failed reading from chip using DMAE, using GRC instead\n");
+	}
+
+	/* Read registers */
+	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+	return len;
 }
 
 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
@@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 	chip = &s_chip_defs[dev_data->chip_id];
 	chip_platform = &chip->per_platform[dev_data->platform_id];
 
-	if (dump)
-		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
 	while (input_offset <
 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
 		const struct dbg_dump_split_hdr *split_hdr;
@@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 
 		offset += qed_dump_str_param(dump_buf + offset,
 					     dump, "name", buf);
-		if (dump)
-			DP_VERBOSE(p_hwfn,
-				   QED_MSG_DEBUG,
-				   "Dumping %d registers from %s...\n",
-				   len, buf);
 	} else {
 		/* Dump address */
 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
 
 		offset += qed_dump_num_param(dump_buf + offset,
 					     dump, "addr", addr_in_bytes);
-		if (dump && len > 64)
-			DP_VERBOSE(p_hwfn,
-				   QED_MSG_DEBUG,
-				   "Dumping %d registers from address 0x%x...\n",
-				   len, addr_in_bytes);
 	}
 
 	/* Dump len */
@@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 	u8 rss_mem_id;
 
 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
-		u32 rss_addr, num_entries, entry_width, total_dwords, i;
+		u32 rss_addr, num_entries, total_dwords;
 		struct rss_mem_defs *rss_defs;
-		u32 addr, size;
+		u32 addr, num_dwords_to_read;
 		bool packed;
 
 		rss_defs = &s_rss_mem_defs[rss_mem_id];
 		rss_addr = rss_defs->addr;
 		num_entries = rss_defs->num_entries[dev_data->chip_id];
-		entry_width = rss_defs->entry_width[dev_data->chip_id];
-		total_dwords = (num_entries * entry_width) / 32;
-		packed = (entry_width == 16);
+		total_dwords = (num_entries * rss_defs->entry_width) / 32;
+		packed = (rss_defs->entry_width == 16);
 
 		offset += qed_grc_dump_mem_hdr(p_hwfn,
 					       dump_buf + offset,
@@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 					       rss_defs->mem_name,
 					       0,
 					       total_dwords,
-					       entry_width,
+					       rss_defs->entry_width,
 					       packed,
 					       rss_defs->type_name, false, 0);
 
@@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 		}
 
 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
-		size = RSS_REG_RSS_RAM_DATA_SIZE;
-		for (i = 0; i < total_dwords; i += size, rss_addr++) {
+		while (total_dwords) {
+			num_dwords_to_read = min_t(u32,
+						   RSS_REG_RSS_RAM_DATA_SIZE,
+						   total_dwords);
 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
 			offset += qed_grc_dump_addr_range(p_hwfn,
 							  p_ptt,
 							  dump_buf + offset,
 							  dump,
 							  addr,
-							  size,
+							  num_dwords_to_read,
 							  false);
+			total_dwords -= num_dwords_to_read;
+			rss_addr++;
 		}
 	}
 
@@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 				u32 *dump_buf, bool dump, u8 big_ram_id)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 total_blocks, ram_size, offset = 0, i;
+	u32 block_size, ram_size, offset = 0, reg_val, i;
 	char mem_name[12] = "???_BIG_RAM";
 	char type_name[8] = "???_RAM";
 	struct big_ram_defs *big_ram;
 
 	big_ram = &s_big_ram_defs[big_ram_id];
-	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
-	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+	ram_size = big_ram->ram_size[dev_data->chip_id];
+
+	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+	block_size = reg_val &
+		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+									 : 128;
 
 	strncpy(type_name, big_ram->instance_name,
 		strlen(big_ram->instance_name));
@@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 				       mem_name,
 				       0,
 				       ram_size,
-				       BIG_RAM_BLOCK_SIZE_BYTES * 8,
+				       block_size * 8,
 				       false, type_name, false, 0);
 
 	/* Read and dump Big RAM data */
@@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 		return offset + ram_size;
 
 	/* Dump Big RAM */
-	for (i = 0; i < total_blocks / 2; i++) {
+	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+	     i++) {
 		u32 addr, len;
 
 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
-		len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+		len = BRB_REG_BIG_RAM_DATA_SIZE;
 		offset += qed_grc_dump_addr_range(p_hwfn,
 						  p_ptt,
 						  dump_buf + offset,
@@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 				   dump,
 				   NULL,
 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
-				   MCP_REG_SCRATCH_SIZE,
+				   MCP_REG_SCRATCH_SIZE_BB_K2,
 				   false, 0, false, "MCP", false, 0);
 
 	/* Dump MCP cpu_reg_file */
@@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
 			       phy_defs->tbus_data_lo_addr;
 		data_hi_addr = phy_defs->base_addr +
 			       phy_defs->tbus_data_hi_addr;
-		bytes_buf = (u8 *)(dump_buf + offset);
 
 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
 			     phy_defs->phy_name) < 0)
@@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
 			continue;
 		}
 
+		bytes_buf = (u8 *)(dump_buf + offset);
 		for (tbus_hi_offset = 0;
 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
 		     tbus_hi_offset++) {
@@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 	u32 block_id, line_id, offset = 0;
 
-	/* Skip static debug if a debug bus recording is in progress */
-	if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+	/* Don't dump static debug if a debug bus recording is in progress */
+	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
 		return 0;
 
 	if (dump) {
-		DP_VERBOSE(p_hwfn,
-			   QED_MSG_DEBUG, "Dumping static debug data...\n");
-
 		/* Disable all blocks debug output */
 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
 			struct block_defs *block = s_block_defs[block_id];
 
-			if (block->has_dbg_bus[dev_data->chip_id])
+			if (block->dbg_client_id[dev_data->chip_id] !=
+			    MAX_DBG_BUS_CLIENTS)
 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
 				       0);
 		}
@@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 		u32 block_dwords, addr, len;
 		u8 dbg_client_id;
 
-		if (!block->has_dbg_bus[dev_data->chip_id])
+		if (block->dbg_client_id[dev_data->chip_id] ==
+		    MAX_DBG_BUS_CLIENTS)
 			continue;
 
-		block_desc =
-			get_dbg_bus_block_desc(p_hwfn,
-					       (enum block_id)block_id);
+		block_desc = get_dbg_bus_block_desc(p_hwfn,
+						    (enum block_id)block_id);
 		block_dwords = NUM_DBG_LINES(block_desc) *
 			       STATIC_DEBUG_LINE_DWORDS;
 
@@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 						    dump_buf + offset, dump);
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	if (dump) {
 		/* Unstall storms */
@@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		if (!check_rule && dump)
 			continue;
 
+		if (!dump) {
+			u32 entry_dump_size =
+				qed_idle_chk_dump_failure(p_hwfn,
+							  p_ptt,
+							  dump_buf + offset,
+							  false,
+							  rule->rule_id,
+							  rule,
+							  0,
+							  NULL);
+
+			offset += num_reg_entries * entry_dump_size;
+			(*num_failing_rules) += num_reg_entries;
+			continue;
+		}
+
 		/* Go over all register entries (number of entries is the same
 		 * for all condition registers).
 		 */
 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
 			u32 next_reg_offset = 0;
 
-			if (!dump) {
-				offset += qed_idle_chk_dump_failure(p_hwfn,
-							p_ptt,
-							dump_buf + offset,
-							false,
-							rule->rule_id,
-							rule,
-							entry_id,
-							NULL);
-				(*num_failing_rules)++;
-				break;
-			}
-
 			/* Read current entry of all condition registers */
 			for (reg_id = 0; reg_id < rule->num_cond_regs;
 			     reg_id++) {
 				const struct dbg_idle_chk_cond_reg *reg =
-				    &cond_regs[reg_id];
+					&cond_regs[reg_id];
 				u32 padded_entry_size, addr;
 				bool wide_bus;
 
@@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 				if (reg->num_entries > 1 ||
 				    reg->start_entry > 0) {
 					padded_entry_size =
-					    reg->entry_size > 1 ?
-					    roundup_pow_of_two(reg->entry_size)
-					    : 1;
+					   reg->entry_size > 1 ?
+					   roundup_pow_of_two(reg->entry_size) :
+					   1;
 					addr += (reg->start_entry + entry_id) *
 						padded_entry_size;
 				}
@@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 							entry_id,
 							cond_reg_values);
 				(*num_failing_rules)++;
-				break;
 			}
 		}
 	}
@@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
 				   dump, "num_rules", num_failing_rules);
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	return offset;
 }
@@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
 				       (nvram_offset_bytes +
 					read_offset) |
 				       (bytes_to_copy <<
-					DRV_MB_PARAM_NVM_LEN_SHIFT),
+					DRV_MB_PARAM_NVM_LEN_OFFSET),
 				       &ret_mcp_resp, &ret_mcp_param,
 				       &ret_read_size,
 				       (u32 *)((u8 *)ret_buf + read_offset)))
@@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 		offset += trace_meta_size_dwords;
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 					 u32 *dump_buf,
 					 bool dump, u32 *num_dumped_dwords)
 {
-	u32 dwords_read, size_param_offset, offset = 0;
+	u32 dwords_read, size_param_offset, offset = 0, addr, len;
 	bool fifo_has_data;
 
 	*num_dumped_dwords = 0;
@@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 	 * buffer size since more entries could be added to the buffer as we are
 	 * emptying it.
 	 */
+	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+	len = REG_FIFO_ELEMENT_DWORDS;
 	for (dwords_read = 0;
 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
-	     dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
-	     REG_FIFO_ELEMENT_DWORDS) {
-		if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
-				      (u64)(uintptr_t)(&dump_buf[offset]),
-				      REG_FIFO_ELEMENT_DWORDS, 0))
-			return DBG_STATUS_DMAE_FAILED;
+	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+		offset += qed_grc_dump_addr_range(p_hwfn,
+						  p_ptt,
+						  dump_buf + offset,
+						  true,
+						  addr,
+						  len,
+						  true);
 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
 	}
@@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 			   dwords_read);
 out:
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 					 u32 *dump_buf,
 					 bool dump, u32 *num_dumped_dwords)
 {
-	u32 dwords_read, size_param_offset, offset = 0;
+	u32 dwords_read, size_param_offset, offset = 0, addr, len;
 	bool fifo_has_data;
 
 	*num_dumped_dwords = 0;
@@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 	 * buffer size since more entries could be added to the buffer as we are
 	 * emptying it.
 	 */
+	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+	len = IGU_FIFO_ELEMENT_DWORDS;
 	for (dwords_read = 0;
 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
-	     dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
-	     IGU_FIFO_ELEMENT_DWORDS) {
-		if (qed_dmae_grc2host(p_hwfn, p_ptt,
-				      IGU_REG_ERROR_HANDLING_MEMORY,
-				      (u64)(uintptr_t)(&dump_buf[offset]),
-				      IGU_FIFO_ELEMENT_DWORDS, 0))
-			return DBG_STATUS_DMAE_FAILED;
-		fifo_has_data =	qed_rd(p_hwfn, p_ptt,
+	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+		offset += qed_grc_dump_addr_range(p_hwfn,
+						  p_ptt,
+						  dump_buf + offset,
+						  true,
+						  addr,
+						  len,
+						  true);
+		fifo_has_data = qed_rd(p_hwfn, p_ptt,
 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
 	}
 
@@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 			   dwords_read);
 out:
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
 						    bool dump,
 						    u32 *num_dumped_dwords)
 {
-	u32 size_param_offset, override_window_dwords, offset = 0;
+	u32 size_param_offset, override_window_dwords, offset = 0, addr;
 
 	*num_dumped_dwords = 0;
 
@@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
 
 	/* Add override window info to buffer */
 	override_window_dwords =
-		qed_rd(p_hwfn, p_ptt,
-		       GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
-		       PROTECTION_OVERRIDE_ELEMENT_DWORDS;
-	if (qed_dmae_grc2host(p_hwfn, p_ptt,
-			      GRC_REG_PROTECTION_OVERRIDE_WINDOW,
-			      (u64)(uintptr_t)(dump_buf + offset),
-			      override_window_dwords, 0))
-		return DBG_STATUS_DMAE_FAILED;
-	offset += override_window_dwords;
+		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset,
+					  true,
+					  addr,
+					  override_window_dwords,
+					  true);
 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
 			   override_window_dwords);
 out:
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		next_list_idx_addr = fw_asserts_section_addr +
 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
-		last_list_idx = (next_list_idx > 0
-				 ? next_list_idx
-				 : asserts->list_num_elements) - 1;
+		last_list_idx = (next_list_idx > 0 ?
+				 next_list_idx :
+				 asserts->list_num_elements) - 1;
 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
 		       asserts->list_dword_offset +
 		       last_list_idx * asserts->list_element_dword_size;
@@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 	}
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	return offset;
 }
@@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data {
 
 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
 
-/********************************* Macros ************************************/
-
-#define BYTES_TO_DWORDS(bytes)			((bytes) / BYTES_IN_DWORD)
-
 /***************************** Constant Arrays *******************************/
 
 struct user_dbg_array {
@@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = {
 	{"phy_pcie", BLOCK_PHY_PCIE},
 	{"led", BLOCK_LED},
 	{"avs_wrap", BLOCK_AVS_WRAP},
+	{"pxpreqbus", BLOCK_PXPREQBUS},
 	{"misc_aeu", BLOCK_MISC_AEU},
 	{"bar0_map", BLOCK_BAR0_MAP}
 };
@@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = {
 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
 	"Failed to resume MCP after halt",
 
-	/* DBG_STATUS_DMAE_FAILED */
-	"DMAE transaction failed",
+	/* DBG_STATUS_RESERVED2 */
+	"Reserved debug status - shouldn't be returned",
 
 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
 	"Failed to empty SEMI sync FIFO",
@@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf,
 	if (*(char_buf + offset++)) {
 		/* String param */
 		*param_str_val = char_buf + offset;
+		*param_num_val = 0;
 		offset += strlen(*param_str_val) + 1;
 		if (offset & 0x3)
 			offset += (4 - (offset & 0x3));
@@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf,
 /* Parses the idle check rules and returns the number of characters printed.
  * In case of parsing error, returns 0.
  */
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
-					 u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
 					 u32 *dump_buf_end,
 					 u32 num_rules,
 					 bool print_fw_idle_chk,
@@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
-					       u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
 					       u32 num_dumped_dwords,
 					       char *results_buf,
 					       u32 *parsed_results_bytes,
@@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
 					    results_offset),
 			    "FW_IDLE_CHECK:\n");
 		rules_print_size =
-			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-						      dump_buf_end, num_rules,
+			qed_parse_idle_chk_dump_rules(dump_buf,
+						      dump_buf_end,
+						      num_rules,
 						      true,
 						      results_buf ?
 						      results_buf +
-						      results_offset : NULL,
-						      num_errors, num_warnings);
+						      results_offset :
+						      NULL,
+						      num_errors,
+						      num_warnings);
 		results_offset += rules_print_size;
 		if (!rules_print_size)
 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
 					    results_offset),
 			    "\nLSI_IDLE_CHECK:\n");
 		rules_print_size =
-			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-						      dump_buf_end, num_rules,
+			qed_parse_idle_chk_dump_rules(dump_buf,
+						      dump_buf_end,
+						      num_rules,
 						      false,
 						      results_buf ?
 						      results_buf +
-						      results_offset : NULL,
-						      num_errors, num_warnings);
+						      results_offset :
+						      NULL,
+						      num_errors,
+						      num_warnings);
 		results_offset += rules_print_size;
 		if (!rules_print_size)
 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
  */
 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 						u32 *dump_buf,
-						u32 num_dumped_dwords,
 						char *results_buf,
 						u32 *parsed_results_bytes)
 {
@@ -6725,9 +6794,7 @@ free_mem:
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
-					       u32 *dump_buf,
-					       u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
 					       char *results_buf,
 					       u32 *parsed_results_bytes)
 {
@@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
 						  *element, char
 						  *results_buf,
-						  u32 *results_offset,
-						  u32 *parsed_results_bytes)
+						  u32 *results_offset)
 {
 	const struct igu_fifo_addr_data *found_addr = NULL;
 	u8 source, err_type, i, is_cleanup;
@@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
 				prod_cons,
 				update_flag ? "update" : "nop",
-				en_dis_int_for_sb
-				? (en_dis_int_for_sb == 1 ? "disable" : "nop")
-				: "enable",
+				en_dis_int_for_sb ?
+				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
+				"enable",
 				segment ? "attn" : "regular",
 				timer_mask);
 		}
@@ -6969,9 +7035,7 @@ out:
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
-					       u32 *dump_buf,
-					       u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
 					       char *results_buf,
 					       u32 *parsed_results_bytes)
 {
@@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 	for (i = 0; i < num_elements; i++) {
 		status = qed_parse_igu_fifo_element(&elements[i],
 						    results_buf,
-						    &results_offset,
-						    parsed_results_bytes);
+						    &results_offset);
 		if (status != DBG_STATUS_OK)
 			return status;
 	}
@@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 }
 
 static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
-				   u32 *dump_buf,
-				   u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
 				   char *results_buf,
 				   u32 *parsed_results_bytes)
 {
@@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
-						 u32 *dump_buf,
-						 u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
 						 char *results_buf,
 						 u32 *parsed_results_bytes)
 {
@@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
 {
 	u32 num_errors, num_warnings;
 
-	return qed_parse_idle_chk_dump(p_hwfn,
-				       dump_buf,
+	return qed_parse_idle_chk_dump(dump_buf,
 				       num_dumped_dwords,
 				       NULL,
 				       results_buf_size,
@@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
 					   u32 *dump_buf,
 					   u32 num_dumped_dwords,
 					   char *results_buf,
-					   u32 *num_errors, u32 *num_warnings)
+					   u32 *num_errors,
+					   u32 *num_warnings)
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_idle_chk_dump(p_hwfn,
-				       dump_buf,
+	return qed_parse_idle_chk_dump(dump_buf,
 				       num_dumped_dwords,
 				       results_buf,
 				       &parsed_buf_size,
@@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
 						   u32 *results_buf_size)
 {
 	return qed_parse_mcp_trace_dump(p_hwfn,
-					dump_buf,
-					num_dumped_dwords,
-					NULL, results_buf_size);
+					dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
 
 	return qed_parse_mcp_trace_dump(p_hwfn,
 					dump_buf,
-					num_dumped_dwords,
 					results_buf, &parsed_buf_size);
 }
 
@@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
 						  u32 num_dumped_dwords,
 						  u32 *results_buf_size)
 {
-	return qed_parse_reg_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       NULL, results_buf_size);
+	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_reg_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       results_buf, &parsed_buf_size);
+	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
 						  u32 num_dumped_dwords,
 						  u32 *results_buf_size)
 {
-	return qed_parse_igu_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       NULL, results_buf_size);
+	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_igu_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       results_buf, &parsed_buf_size);
+	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status
@@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
 					     u32 num_dumped_dwords,
 					     u32 *results_buf_size)
 {
-	return qed_parse_protection_override_dump(p_hwfn,
-						  dump_buf,
-						  num_dumped_dwords,
+	return qed_parse_protection_override_dump(dump_buf,
 						  NULL, results_buf_size);
 }
 
@@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_protection_override_dump(p_hwfn,
-						  dump_buf,
-						  num_dumped_dwords,
+	return qed_parse_protection_override_dump(dump_buf,
 						  results_buf,
 						  &parsed_buf_size);
 }
@@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
 						    u32 num_dumped_dwords,
 						    u32 *results_buf_size)
 {
-	return qed_parse_fw_asserts_dump(p_hwfn,
-					 dump_buf,
-					 num_dumped_dwords,
-					 NULL, results_buf_size);
+	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
@@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_fw_asserts_dump(p_hwfn,
-					 dump_buf,
-					 num_dumped_dwords,
+	return qed_parse_fw_asserts_dump(dump_buf,
 					 results_buf, &parsed_buf_size);
 }
 
@@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
 
 	/* Go over registers with a non-zero attention status */
 	for (i = 0; i < num_regs; i++) {
+		struct dbg_attn_bit_mapping *bit_mapping;
 		struct dbg_attn_reg_result *reg_result;
-		struct dbg_attn_bit_mapping *mapping;
 		u8 num_reg_attn, bit_idx = 0;
 
 		reg_result = &results->reg_results[i];
 		num_reg_attn = GET_FIELD(reg_result->data,
 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
-		mapping = &((struct dbg_attn_bit_mapping *)
-			    block_attn->ptr)[reg_result->block_attn_offset];
+		bit_mapping = &((struct dbg_attn_bit_mapping *)
+				block_attn->ptr)[reg_result->block_attn_offset];
 
 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
 
 		/* Go over attention status bits */
 		for (j = 0; j < num_reg_attn; j++) {
-			u16 attn_idx_val = GET_FIELD(mapping[j].data,
+			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
 						     DBG_ATTN_BIT_MAPPING_VAL);
 			const char *attn_name, *attn_type_str, *masked_str;
-			u32 name_offset, sts_addr;
+			u32 attn_name_offset, sts_addr;
 
 			/* Check if bit mask should be advanced (due to unused
 			 * bits).
 			 */
-			if (GET_FIELD(mapping[j].data,
+			if (GET_FIELD(bit_mapping[j].data,
 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
 				bit_idx += (u8)attn_idx_val;
 				continue;
@@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
 			}
 
 			/* Find attention name */
-			name_offset = block_attn_name_offsets[attn_idx_val];
+			attn_name_offset =
+				block_attn_name_offsets[attn_idx_val];
 			attn_name = &((const char *)
-				      pstrings->ptr)[name_offset];
+				      pstrings->ptr)[attn_name_offset];
 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
 					"Interrupt" : "Parity";
 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 58a689fb04db..553a6d17260e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
 /* This function reconfigures the QM pf on the fly.
  * For this purpose we:
  * 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
  * 4. activate init tool in QM_PF stage
  * 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	qed_init_clear_rt_data(p_hwfn);
 
 	/* prepare QM portion of runtime array */
-	qed_qm_init_pf(p_hwfn, p_ptt);
+	qed_qm_init_pf(p_hwfn, p_ptt, false);
 
 	/* activate init tool on runtime array */
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
 	}
 
-	/* Protocl Configuration  */
+	/* Protocol Configuration */
 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
 		     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 	if (rc)
 		return rc;
 
+	/* Sanity check before the PF init sequence that uses DMAE */
+	rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+	if (rc)
+		return rc;
+
 	/* PF Init sequence */
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
 	if (rc)
@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 			/* No need for a case for QED_CMDQS_CQS since
 			 * CNQ/CMDQS are the same resource.
 			 */
-			resc_max_val = NUM_OF_CMDQS_CQS;
+			resc_max_val = NUM_OF_GLOBAL_QUEUES;
 			break;
 		case QED_RDMA_STATS_QUEUE:
 			resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
 	case QED_RDMA_CNQ_RAM:
 	case QED_CMDQS_CQS:
 		/* CNQ/CMDQS are the same resource */
-		*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+		*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
 		break;
 	case QED_RDMA_STATS_QUEUE:
 		*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index df195c02b711..2dc9b312a795 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
 	struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
 	struct fcoe_init_ramrod_params *p_ramrod = NULL;
 	struct fcoe_init_func_ramrod_data *p_data;
-	struct fcoe_conn_context *p_cxt = NULL;
+	struct e4_fcoe_conn_context *p_cxt = NULL;
 	struct qed_spq_entry *p_ent = NULL;
 	struct qed_sp_init_data init_data;
 	struct qed_cxt_info cxt_info;
@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
 	}
 	p_cxt = cxt_info.p_cxt;
 	SET_FIELD(p_cxt->tstorm_ag_context.flags3,
-		  TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+		  E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
 
 	fcoe_pf_params->dummy_icid = (u16)dummy_cid;
 
@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
 
 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
 {
-	struct fcoe_task_context *p_task_ctx = NULL;
+	struct e4_fcoe_task_context *p_task_ctx = NULL;
 	int rc;
 	u32 i;
 
@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
 		if (rc)
 			continue;
 
-		memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+		memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
 		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
 			  TIMERS_CONTEXT_VALIDLC0, 1);
 		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
 			  TIMERS_CONTEXT_VALIDLC1, 1);
 		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
-			  TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+			  E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
 	}
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 3427fe7049b5..de873d770575 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -54,7 +54,7 @@
 struct qed_hwfn;
 struct qed_ptt;
 
-/* opcodes for the event ring */
+/* Opcodes for the event ring */
 enum common_event_opcode {
 	COMMON_EVENT_PF_START,
 	COMMON_EVENT_PF_STOP,
@@ -82,487 +82,7 @@ enum common_ramrod_cmd_id {
 	MAX_COMMON_RAMROD_CMD_ID
 };
 
-/* The core storm context for the Ystorm */
-struct ystorm_core_conn_st_ctx {
-	__le32 reserved[4];
-};
-
-/* The core storm context for the Pstorm */
-struct pstorm_core_conn_st_ctx {
-	__le32 reserved[4];
-};
-
-/* Core Slowpath Connection storm context of Xstorm */
-struct xstorm_core_conn_st_ctx {
-	__le32 spq_base_lo;
-	__le32 spq_base_hi;
-	struct regpair consolid_base_addr;
-	__le16 spq_cons;
-	__le16 consolid_cons;
-	__le32 reserved0[55];
-};
-
-struct xstorm_core_conn_ag_ctx {
-	u8 reserved0;
-	u8 core_state;
-	u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT		2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT		6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT		7
-	u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT		2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT		3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
-	u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT	6
-	u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT	6
-	u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT	6
-	u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT	6
-	u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT		2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT	6
-	u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		7
-	u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT	5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT	7
-	u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT			0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT			1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT			2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT			3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT			4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT			5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT			7
-	u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT	7
-	u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT		3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT		7
-	u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT		6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT		7
-	u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
-	u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT	5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT	6
-	u8 byte2;
-	__le16 physical_q0;
-	__le16 consolid_prod;
-	__le16 reserved16;
-	__le16 tx_bd_cons;
-	__le16 tx_bd_or_spq_prod;
-	__le16 word5;
-	__le16 conn_dpi;
-	u8 byte3;
-	u8 byte4;
-	u8 byte5;
-	u8 byte6;
-	__le32 reg0;
-	__le32 reg1;
-	__le32 reg2;
-	__le32 reg3;
-	__le32 reg4;
-	__le32 reg5;
-	__le32 reg6;
-	__le16 word7;
-	__le16 word8;
-	__le16 word9;
-	__le16 word10;
-	__le32 reg7;
-	__le32 reg8;
-	__le32 reg9;
-	u8 byte7;
-	u8 byte8;
-	u8 byte9;
-	u8 byte10;
-	u8 byte11;
-	u8 byte12;
-	u8 byte13;
-	u8 byte14;
-	u8 byte15;
-	u8 e5_reserved;
-	__le16 word11;
-	__le32 reg10;
-	__le32 reg11;
-	__le32 reg12;
-	__le32 reg13;
-	__le32 reg14;
-	__le32 reg15;
-	__le32 reg16;
-	__le32 reg17;
-	__le32 reg18;
-	__le32 reg19;
-	__le16 word12;
-	__le16 word13;
-	__le16 word14;
-	__le16 word15;
-};
-
-struct tstorm_core_conn_ag_ctx {
-	u8 byte0;
-	u8 byte1;
-	u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1	/* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1	/* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1	/* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1	/* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1	/* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1	/* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3	/* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
-	u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3	/* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3	/* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3	/* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3	/* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
-	u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3	/* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3	/* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3	/* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3	/* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
-	u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3	/* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3	/* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1	/* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1	/* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1	/* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1	/* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
-	u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1	/* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1	/* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1	/* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1	/* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1	/* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1	/* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1	/* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1	/* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
-	u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1	/* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1	/* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1	/* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1	/* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1	/* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1	/* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1	/* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1	/* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
-	__le32 reg0;
-	__le32 reg1;
-	__le32 reg2;
-	__le32 reg3;
-	__le32 reg4;
-	__le32 reg5;
-	__le32 reg6;
-	__le32 reg7;
-	__le32 reg8;
-	u8 byte2;
-	u8 byte3;
-	__le16 word0;
-	u8 byte4;
-	u8 byte5;
-	__le16 word1;
-	__le16 word2;
-	__le16 word3;
-	__le32 reg9;
-	__le32 reg10;
-};
-
-struct ustorm_core_conn_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
-	u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT	6
-	u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT	5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT	6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	7
-	u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT	7
-	u8 byte2;
-	u8 byte3;
-	__le16 word0;
-	__le16 word1;
-	__le32 rx_producers;
-	__le32 reg1;
-	__le32 reg2;
-	__le32 reg3;
-	__le16 word2;
-	__le16 word3;
-};
-
-/* The core storm context for the Mstorm */
-struct mstorm_core_conn_st_ctx {
-	__le32 reserved[24];
-};
-
-/* The core storm context for the Ustorm */
-struct ustorm_core_conn_st_ctx {
-	__le32 reserved[4];
-};
-
-/* core connection context */
-struct core_conn_context {
-	struct ystorm_core_conn_st_ctx ystorm_st_context;
-	struct regpair ystorm_st_padding[2];
-	struct pstorm_core_conn_st_ctx pstorm_st_context;
-	struct regpair pstorm_st_padding[2];
-	struct xstorm_core_conn_st_ctx xstorm_st_context;
-	struct xstorm_core_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_core_conn_ag_ctx tstorm_ag_context;
-	struct ustorm_core_conn_ag_ctx ustorm_ag_context;
-	struct mstorm_core_conn_st_ctx mstorm_st_context;
-	struct ustorm_core_conn_st_ctx ustorm_st_context;
-	struct regpair ustorm_st_padding[2];
-};
-
+/* How ll2 should deal with packet upon errors */
 enum core_error_handle {
 	LL2_DROP_PACKET,
 	LL2_DO_NOTHING,
@@ -570,21 +90,25 @@ enum core_error_handle {
 	MAX_CORE_ERROR_HANDLE
 };
 
+/* Opcodes for the event ring */
 enum core_event_opcode {
 	CORE_EVENT_TX_QUEUE_START,
 	CORE_EVENT_TX_QUEUE_STOP,
 	CORE_EVENT_RX_QUEUE_START,
 	CORE_EVENT_RX_QUEUE_STOP,
 	CORE_EVENT_RX_QUEUE_FLUSH,
+	CORE_EVENT_TX_QUEUE_UPDATE,
 	MAX_CORE_EVENT_OPCODE
 };
 
+/* The L4 pseudo checksum mode for Core */
 enum core_l4_pseudo_checksum_mode {
 	CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
 	CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
 	MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
 };
 
+/* Light-L2 RX Producers in Tstorm RAM */
 struct core_ll2_port_stats {
 	struct regpair gsi_invalid_hdr;
 	struct regpair gsi_invalid_pkt_length;
@@ -592,6 +116,7 @@ struct core_ll2_port_stats {
 	struct regpair gsi_crcchksm_error;
 };
 
+/* Ethernet TX Per Queue Stats */
 struct core_ll2_pstorm_per_queue_stat {
 	struct regpair sent_ucast_bytes;
 	struct regpair sent_mcast_bytes;
@@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat {
 	struct regpair sent_bcast_pkts;
 };
 
+/* Light-L2 RX Producers in Tstorm RAM */
 struct core_ll2_rx_prod {
 	__le16 bd_prod;
 	__le16 cqe_prod;
@@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat {
 	struct regpair rcv_bcast_pkts;
 };
 
+/* Core Ramrod Command IDs (light L2) */
 enum core_ramrod_cmd_id {
 	CORE_RAMROD_UNUSED,
 	CORE_RAMROD_RX_QUEUE_START,
@@ -628,53 +155,64 @@ enum core_ramrod_cmd_id {
 	CORE_RAMROD_RX_QUEUE_STOP,
 	CORE_RAMROD_TX_QUEUE_STOP,
 	CORE_RAMROD_RX_QUEUE_FLUSH,
+	CORE_RAMROD_TX_QUEUE_UPDATE,
 	MAX_CORE_RAMROD_CMD_ID
 };
 
+/* Core RX CQE Type for Light L2 */
 enum core_roce_flavor_type {
 	CORE_ROCE,
 	CORE_RROCE,
 	MAX_CORE_ROCE_FLAVOR_TYPE
 };
 
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
 struct core_rx_action_on_error {
 	u8 error_type;
 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK	0x3
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK	0x3
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT	2
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK	0xF
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT	4
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT	0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK		0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT		2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK		0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT		4
 };
 
+/* Core RX BD for Light L2 */
 struct core_rx_bd {
 	struct regpair addr;
 	__le16 reserved[4];
 };
 
+/* Core RX CM offload BD for Light L2 */
 struct core_rx_bd_with_buff_len {
 	struct regpair addr;
 	__le16 buff_length;
 	__le16 reserved[3];
 };
 
+/* Core RX CM offload BD for Light L2 */
 union core_rx_bd_union {
 	struct core_rx_bd rx_bd;
 	struct core_rx_bd_with_buff_len rx_bd_with_len;
 };
 
+/* Opaque Data for Light L2 RX CQE */
 struct core_rx_cqe_opaque_data {
 	__le32 data[2];
 };
 
+/* Core RX CQE Type for Light L2 */
 enum core_rx_cqe_type {
-	CORE_RX_CQE_ILLIGAL_TYPE,
+	CORE_RX_CQE_ILLEGAL_TYPE,
 	CORE_RX_CQE_TYPE_REGULAR,
 	CORE_RX_CQE_TYPE_GSI_OFFLOAD,
 	CORE_RX_CQE_TYPE_SLOW_PATH,
 	MAX_CORE_RX_CQE_TYPE
 };
 
+/* Core RX CQE for Light L2 */
 struct core_rx_fast_path_cqe {
 	u8 type;
 	u8 placement_offset;
@@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe {
 	__le32 reserved1[3];
 };
 
+/* Core Rx CM offload CQE */
 struct core_rx_gsi_offload_cqe {
 	u8 type;
 	u8 data_length_error;
@@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe {
 	__le32 src_mac_addrhi;
 	__le16 src_mac_addrlo;
 	__le16 qp_id;
-	__le32 gid_dst[4];
+	__le32 src_qp;
+	__le32 reserved[3];
 };
 
+/* Core RX CQE for Light L2 */
 struct core_rx_slow_path_cqe {
 	u8 type;
 	u8 ramrod_cmd_id;
@@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe {
 	__le32 reserved1[5];
 };
 
+/* Core RX CM offload BD for Light L2 */
 union core_rx_cqe_union {
 	struct core_rx_fast_path_cqe rx_cqe_fp;
 	struct core_rx_gsi_offload_cqe rx_cqe_gsi;
 	struct core_rx_slow_path_cqe rx_cqe_sp;
 };
 
+/* Ramrod data for rx queue start ramrod */
 struct core_rx_start_ramrod_data {
 	struct regpair bd_base;
 	struct regpair cqe_pbl_addr;
@@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data {
 	u8 complete_event_flg;
 	u8 drop_ttl0_flg;
 	__le16 num_of_pbl_pages;
-	u8 inner_vlan_removal_en;
+	u8 inner_vlan_stripping_en;
+	u8 report_outer_vlan;
 	u8 queue_id;
 	u8 main_func_queue;
 	u8 mf_si_bcast_accept_all;
 	u8 mf_si_mcast_accept_all;
 	struct core_rx_action_on_error action_on_error;
 	u8 gsi_offload_flag;
-	u8 reserved[7];
+	u8 reserved[6];
 };
 
+/* Ramrod data for rx queue stop ramrod */
 struct core_rx_stop_ramrod_data {
 	u8 complete_cqe_flg;
 	u8 complete_event_flg;
@@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data {
 	__le16 reserved2[2];
 };
 
+/* Flags for Core TX BD */
 struct core_tx_bd_data {
 	__le16 as_bitfield;
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK	0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK	0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
-#define CORE_TX_BD_DATA_START_BD_MASK	0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT            2
-#define CORE_TX_BD_DATA_IP_CSUM_MASK	0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
-#define CORE_TX_BD_DATA_L4_CSUM_MASK	0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK	0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK	0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK		0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT		0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK		0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT		1
+#define CORE_TX_BD_DATA_START_BD_MASK			0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT			2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK			0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT			3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK			0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT			4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK			0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT			5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK		0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT		6
 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK	0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_DATA_NBDS_MASK	0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT                8
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK	0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
-#define CORE_TX_BD_DATA_IP_LEN_MASK	0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
-#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
-};
-
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT	7
+#define CORE_TX_BD_DATA_NBDS_MASK			0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT			8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK			0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT			12
+#define CORE_TX_BD_DATA_IP_LEN_MASK			0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT			13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK	0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT	14
+#define CORE_TX_BD_DATA_RESERVED0_MASK			0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT			15
+};
+
+/* Core TX BD for Light L2 */
 struct core_tx_bd {
 	struct regpair addr;
 	__le16 nbytes;
 	__le16 nw_vlan_or_lb_echo;
 	struct core_tx_bd_data bd_data;
 	__le16 bitfield1;
-#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK	0x3FFF
-#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK		0x3
-#define CORE_TX_BD_TX_DST_SHIFT		14
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK		0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT	0
+#define CORE_TX_BD_TX_DST_MASK			0x3
+#define CORE_TX_BD_TX_DST_SHIFT			14
 };
 
+/* Light L2 TX Destination */
 enum core_tx_dest {
 	CORE_TX_DEST_NW,
 	CORE_TX_DEST_LB,
@@ -789,6 +339,7 @@ enum core_tx_dest {
 	MAX_CORE_TX_DEST
 };
 
+/* Ramrod data for tx queue start ramrod */
 struct core_tx_start_ramrod_data {
 	struct regpair pbl_base_addr;
 	__le16 mtu;
@@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data {
 	u8 resrved[3];
 };
 
+/* Ramrod data for tx queue stop ramrod */
 struct core_tx_stop_ramrod_data {
 	__le32 reserved0[2];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+	u8 update_qm_pq_id_flg;
+	u8 reserved0;
+	__le16 qm_pq_id;
+	__le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
 enum dcb_dscp_update_mode {
 	DONT_UPDATE_DCB_DSCP,
 	UPDATE_DCB,
@@ -815,6 +376,487 @@ enum dcb_dscp_update_mode {
 	MAX_DCB_DSCP_UPDATE_MODE
 };
 
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+	__le32 spq_base_lo;
+	__le32 spq_base_hi;
+	struct regpair consolid_base_addr;
+	__le16 spq_cons;
+	__le16 consolid_cons;
+	__le32 reserved0[55];
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+	u8 reserved0;
+	u8 state;
+	u8 flags0;
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT	7
+	u8 flags1;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
+	u8 flags2;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT	6
+	u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT	6
+	u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT	6
+	u8 flags5;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT	6
+	u8 flags6;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK			0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT			2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK			0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT			4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK		0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT		6
+	u8 flags7;
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		7
+	u8 flags8;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT	7
+	u8 flags9;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT			4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT			5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT			7
+	u8 flags10;
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT		2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT		3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT		4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT			5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT		6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT		7
+	u8 flags11;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT	7
+	u8 flags12;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT	7
+	u8 flags13;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
+	u8 flags14;
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT	6
+	u8 byte2;
+	__le16 physical_q0;
+	__le16 consolid_prod;
+	__le16 reserved16;
+	__le16 tx_bd_cons;
+	__le16 tx_bd_or_spq_prod;
+	__le16 word5;
+	__le16 conn_dpi;
+	u8 byte3;
+	u8 byte4;
+	u8 byte5;
+	u8 byte6;
+	__le32 reg0;
+	__le32 reg1;
+	__le32 reg2;
+	__le32 reg3;
+	__le32 reg4;
+	__le32 reg5;
+	__le32 reg6;
+	__le16 word7;
+	__le16 word8;
+	__le16 word9;
+	__le16 word10;
+	__le32 reg7;
+	__le32 reg8;
+	__le32 reg9;
+	u8 byte7;
+	u8 byte8;
+	u8 byte9;
+	u8 byte10;
+	u8 byte11;
+	u8 byte12;
+	u8 byte13;
+	u8 byte14;
+	u8 byte15;
+	u8 e5_reserved;
+	__le16 word11;
+	__le32 reg10;
+	__le32 reg11;
+	__le32 reg12;
+	__le32 reg13;
+	__le32 reg14;
+	__le32 reg15;
+	__le32 reg16;
+	__le32 reg17;
+	__le32 reg18;
+	__le32 reg19;
+	__le16 word12;
+	__le16 word13;
+	__le16 word14;
+	__le16 word15;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+	u8 byte0;
+	u8 byte1;
+	u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT	3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT	5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	6
+	u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT	6
+	u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT	6
+	u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	7
+	u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT		0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT		1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT		2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT		3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT		4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT		5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT		6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	7
+	u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT	7
+	__le32 reg0;
+	__le32 reg1;
+	__le32 reg2;
+	__le32 reg3;
+	__le32 reg4;
+	__le32 reg5;
+	__le32 reg6;
+	__le32 reg7;
+	__le32 reg8;
+	u8 byte2;
+	u8 byte3;
+	__le16 word0;
+	u8 byte4;
+	u8 byte5;
+	__le16 word1;
+	__le16 word2;
+	__le16 word3;
+	__le32 reg9;
+	__le32 reg10;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
+	u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT	6
+	u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT		3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT		4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT		5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT		6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	7
+	u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT	7
+	u8 byte2;
+	u8 byte3;
+	__le16 word0;
+	__le16 word1;
+	__le32 rx_producers;
+	__le32 reg1;
+	__le32 reg2;
+	__le32 reg3;
+	__le16 word2;
+	__le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+	__le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* core connection context */
+struct e4_core_conn_context {
+	struct ystorm_core_conn_st_ctx ystorm_st_context;
+	struct regpair ystorm_st_padding[2];
+	struct pstorm_core_conn_st_ctx pstorm_st_context;
+	struct regpair pstorm_st_padding[2];
+	struct xstorm_core_conn_st_ctx xstorm_st_context;
+	struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+	struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+	struct mstorm_core_conn_st_ctx mstorm_st_context;
+	struct ustorm_core_conn_st_ctx ustorm_st_context;
+	struct regpair ustorm_st_padding[2];
+};
+
 struct eth_mstorm_per_pf_stat {
 	struct regpair gre_discard_pkts;
 	struct regpair vxlan_discard_pkts;
@@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat {
 	struct regpair rcv_bcast_pkts;
 };
 
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+	struct regpair msg_addr;
+};
+
+/* Event Ring malicious VF data */
+struct malicious_vf_eqe_data {
+	u8 vf_id;
+	u8 err_id;
+	__le16 reserved[3];
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+	u8 vf_id;
+	u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+	u8 bytes[8];
+	struct vf_pf_channel_eqe_data vf_pf_channel;
+	struct iscsi_eqe_data iscsi_info;
+	struct iscsi_connect_done_results iscsi_conn_done_info;
+	union rdma_eqe_data rdma_data;
+	struct malicious_vf_eqe_data malicious_vf;
+	struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+	u8 protocol_id;
+	u8 opcode;
+	__le16 reserved0;
+	__le16 echo;
+	u8 fw_return_code;
+	u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK		0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT		0
+#define EVENT_RING_ENTRY_RESERVED1_MASK		0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT	1
+	union event_ring_data data;
+};
+
 /* Event Ring Next Page Address */
 struct event_ring_next_addr {
 	struct regpair addr;
@@ -908,12 +994,21 @@ union event_ring_element {
 	struct event_ring_next_addr next_addr;
 };
 
+/* Ports mode */
 enum fw_flow_ctrl_mode {
 	flow_ctrl_pause,
 	flow_ctrl_pfc,
 	MAX_FW_FLOW_CTRL_MODE
 };
 
+/* GFT profile type */
+enum gft_profile_type {
+	GFT_PROFILE_TYPE_4_TUPLE,
+	GFT_PROFILE_TYPE_L4_DST_PORT,
+	GFT_PROFILE_TYPE_IP_DST_PORT,
+	MAX_GFT_PROFILE_TYPE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
 	u8 minor_ver_arr[2];
@@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct {
 };
 
 enum iwarp_ll2_tx_queues {
-	IWARP_LL2_IN_ORDER_TX_QUEUE =			1,
+	IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
 	IWARP_LL2_ALIGNED_TX_QUEUE,
 	IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
 	IWARP_LL2_ERROR,
 	MAX_IWARP_LL2_TX_QUEUES
 };
 
-/* Mstorm non-triggering VF zone */
+/* Malicious VF error ID */
 enum malicious_vf_error_id {
 	MALICIOUS_VF_NO_ERROR,
 	VF_PF_CHANNEL_NOT_READY,
@@ -951,9 +1046,11 @@ enum malicious_vf_error_id {
 	ETH_TUNN_IPV6_EXT_NBD_ERR,
 	ETH_CONTROL_PACKET_VIOLATION,
 	ETH_ANTI_SPOOFING_ERR,
+	ETH_PACKET_SIZE_TOO_LARGE,
 	MAX_MALICIOUS_VF_ERROR_ID
 };
 
+/* Mstorm non-triggering VF zone */
 struct mstorm_non_trigger_vf_zone {
 	struct eth_mstorm_per_queue_stat eth_queue_stat;
 	struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
@@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone {
 /* Mstorm VF zone */
 struct mstorm_vf_zone {
 	struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+	__le16 tpid;
+	__le16 tci;
+};
 
+/* outer tag configurations */
+struct outer_tag_config_struct {
+	u8 enable_stag_pri_change;
+	u8 pri_map_valid;
+	u8 reserved[2];
+	struct vlan_header outer_tag;
+	u8 inner_to_outer_pri_map[8];
 };
 
 /* personality per PF */
@@ -974,7 +1085,7 @@ enum personality_type {
 	PERSONALITY_RDMA,
 	PERSONALITY_CORE,
 	PERSONALITY_ETH,
-	PERSONALITY_RESERVED4,
+	PERSONALITY_RESERVED,
 	MAX_PERSONALITY_TYPE
 };
 
@@ -997,7 +1108,6 @@ struct pf_start_ramrod_data {
 	struct regpair event_ring_pbl_addr;
 	struct regpair consolid_q_pbl_addr;
 	struct pf_start_tunnel_config tunnel_config;
-	__le32 reserved;
 	__le16 event_ring_sb_id;
 	u8 base_vf_id;
 	u8 num_vfs;
@@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data {
 	u8 mf_mode;
 	u8 integ_phase;
 	u8 allow_npar_tx_switching;
-	u8 inner_to_outer_pri_map[8];
-	u8 pri_map_valid;
-	__le32 outer_tag;
+	u8 reserved0;
 	struct hsi_fp_ver_struct hsi_fp_ver;
+	struct outer_tag_config_struct outer_tag_config;
 };
 
+/* Data for port update ramrod */
 struct protocol_dcb_data {
 	u8 dcb_enable_flag;
-	u8 reserved_a;
+	u8 dscp_enable_flag;
 	u8 dcb_priority;
 	u8 dcb_tc;
-	u8 reserved_b;
+	u8 dscp_val;
 	u8 reserved0;
 };
 
+/* Update tunnel configuration */
 struct pf_update_tunnel_config {
 	u8 update_rx_pf_clss;
 	u8 update_rx_def_ucast_clss;
@@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config {
 	__le16 reserved;
 };
 
+/* Data for port update ramrod */
 struct pf_update_ramrod_data {
-	u8 pf_id;
 	u8 update_eth_dcb_data_mode;
 	u8 update_fcoe_dcb_data_mode;
 	u8 update_iscsi_dcb_data_mode;
@@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data {
 	u8 update_rroce_dcb_data_mode;
 	u8 update_iwarp_dcb_data_mode;
 	u8 update_mf_vlan_flag;
+	u8 update_enable_stag_pri_change;
 	struct protocol_dcb_data eth_dcb_data;
 	struct protocol_dcb_data fcoe_dcb_data;
 	struct protocol_dcb_data iscsi_dcb_data;
@@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data {
 	struct protocol_dcb_data rroce_dcb_data;
 	struct protocol_dcb_data iwarp_dcb_data;
 	__le16 mf_vlan;
-	__le16 reserved;
+	u8 enable_stag_pri_change;
+	u8 reserved;
 	struct pf_update_tunnel_config tunnel_config;
 };
 
@@ -1079,11 +1192,13 @@ enum protocol_version_array_key {
 	MAX_PROTOCOL_VERSION_ARRAY_KEY
 };
 
+/* RDMA TX Stats */
 struct rdma_sent_stats {
 	struct regpair sent_bytes;
 	struct regpair sent_pkts;
 };
 
+/* Pstorm non-triggering VF zone */
 struct pstorm_non_trigger_vf_zone {
 	struct eth_pstorm_per_queue_stat eth_queue_stat;
 	struct rdma_sent_stats rdma_stats;
@@ -1103,11 +1218,34 @@ struct ramrod_header {
 	__le16 echo;
 };
 
+/* RDMA RX Stats */
 struct rdma_rcv_stats {
 	struct regpair rcv_bytes;
 	struct regpair rcv_pkts;
 };
 
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+	u8 qcn_update_param_flg;
+	u8 dcqcn_update_param_flg;
+	u8 rl_init_flg;
+	u8 rl_start_flg;
+	u8 rl_stop_flg;
+	u8 rl_id_first;
+	u8 rl_id_last;
+	u8 rl_dc_qcn_flg;
+	__le32 rl_bc_rate;
+	__le16 rl_max_rate;
+	__le16 rl_r_ai;
+	__le16 rl_r_hai;
+	__le16 dcqcn_g;
+	__le32 dcqcn_k_us;
+	__le32 dcqcn_timeuot_us;
+	__le32 qcn_timeuot_us;
+	__le32 reserved[2];
+};
+
+/* Slowpath Element (SPQE) */
 struct slow_path_element {
 	struct ramrod_header hdr;
 	struct regpair data_ptr;
@@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat {
 	struct regpair roce_irregular_pkt;
 	struct regpair iwarp_irregular_pkt;
 	struct regpair eth_irregular_pkt;
-	struct regpair reserved1;
+	struct regpair toe_irregular_pkt;
 	struct regpair preroce_irregular_pkt;
 	struct regpair eth_gre_tunn_filter_discard;
 	struct regpair eth_vxlan_tunn_filter_discard;
 	struct regpair eth_geneve_tunn_filter_discard;
+	struct regpair eth_gft_drop_pkt;
 };
 
 /* Tstorm VF zone */
@@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data {
 	__le32 reserved2;
 };
 
+/* VF zone size mode */
 enum vf_zone_size_mode {
 	VF_ZONE_SIZE_MODE_DEFAULT,
 	VF_ZONE_SIZE_MODE_DOUBLE,
@@ -1204,6 +1344,7 @@ enum vf_zone_size_mode {
 	MAX_VF_ZONE_SIZE_MODE
 };
 
+/* Attentions status block */
 struct atten_status_block {
 	__le32 atten_bits;
 	__le32 atten_ack;
@@ -1212,12 +1353,6 @@ struct atten_status_block {
 	__le32 reserved1;
 };
 
-enum command_type_bit {
-	IGU_COMMAND_TYPE_NOP = 0,
-	IGU_COMMAND_TYPE_SET = 1,
-	MAX_COMMAND_TYPE_BIT
-};
-
 /* DMAE command */
 struct dmae_cmd {
 	__le32 opcode;
@@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum {
 	MAX_DMAE_CMD_SRC_ENUM
 };
 
-struct mstorm_core_conn_ag_ctx {
+struct e4_mstorm_core_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct ystorm_core_conn_ag_ctx {
+struct e4_ystorm_core_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask {
 };
 
 /* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
 	__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK		0x1
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT		0
-#define QM_RF_PQ_MAP_RL_ID_MASK			0xFF
-#define QM_RF_PQ_MAP_RL_ID_SHIFT		1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK		0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT		9
-#define QM_RF_PQ_MAP_VOQ_MASK			0x1F
-#define QM_RF_PQ_MAP_VOQ_SHIFT			18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK	0x3
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT	23
-#define QM_RF_PQ_MAP_RL_VALID_MASK		0x1
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT		25
-#define QM_RF_PQ_MAP_RESERVED_MASK		0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT		26
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK		0x1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT		0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK		0xFF
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT		1
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK		0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT		9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK		0x1F
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT		18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK	0x3
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT	23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK		0x1
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT		25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK		0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT		26
 };
 
 /* Completion params for aggregated interrupt completion */
@@ -1643,8 +1778,8 @@ enum block_addr {
 	GRCBASE_MULD = 0x4e0000,
 	GRCBASE_YULD = 0x4c8000,
 	GRCBASE_XYLD = 0x4c0000,
-	GRCBASE_PTLD = 0x590000,
-	GRCBASE_YPLD = 0x5b0000,
+	GRCBASE_PTLD = 0x5a0000,
+	GRCBASE_YPLD = 0x5c0000,
 	GRCBASE_PRM = 0x230000,
 	GRCBASE_PBF_PB1 = 0xda0000,
 	GRCBASE_PBF_PB2 = 0xda4000,
@@ -1675,6 +1810,7 @@ enum block_addr {
 	GRCBASE_PHY_PCIE = 0x620000,
 	GRCBASE_LED = 0x6b8000,
 	GRCBASE_AVS_WRAP = 0x6b0000,
+	GRCBASE_PXPREQBUS = 0x56000,
 	GRCBASE_MISC_AEU = 0x8000,
 	GRCBASE_BAR0_MAP = 0x1c00000,
 	MAX_BLOCK_ADDR
@@ -1766,6 +1902,7 @@ enum block_id {
 	BLOCK_PHY_PCIE,
 	BLOCK_LED,
 	BLOCK_AVS_WRAP,
+	BLOCK_PXPREQBUS,
 	BLOCK_MISC_AEU,
 	BLOCK_BAR0_MAP,
 	MAX_BLOCK_ID
@@ -1841,7 +1978,7 @@ struct dbg_attn_block_result {
 	struct dbg_attn_reg_result reg_results[15];
 };
 
-/* mode header */
+/* Mode header */
 struct dbg_mode_hdr {
 	__le16 data;
 #define DBG_MODE_HDR_EVAL_MODE_MASK		0x1
@@ -1863,80 +2000,83 @@ struct dbg_attn_reg {
 	__le32 mask_address;
 };
 
-/* attention types */
+/* Attention types */
 enum dbg_attn_type {
 	ATTN_TYPE_INTERRUPT,
 	ATTN_TYPE_PARITY,
 	MAX_DBG_ATTN_TYPE
 };
 
+/* Debug Bus block data */
 struct dbg_bus_block {
 	u8 num_of_lines;
 	u8 has_latency_events;
 	__le16 lines_offset;
 };
 
+/* Debug Bus block user data */
 struct dbg_bus_block_user_data {
 	u8 num_of_lines;
 	u8 has_latency_events;
 	__le16 names_offset;
 };
 
+/* Block Debug line data */
 struct dbg_bus_line {
 	u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK  0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK        0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT       4
-#define DBG_BUS_LINE_RESERVED_MASK       0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT      5
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK		0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT	0
+#define DBG_BUS_LINE_IS_256B_MASK		0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT		4
+#define DBG_BUS_LINE_RESERVED_MASK		0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT		5
 	u8 group_sizes;
 };
 
-/* condition header for registers dump */
+/* Condition header for registers dump */
 struct dbg_dump_cond_hdr {
 	struct dbg_mode_hdr mode; /* Mode header */
 	u8 block_id; /* block ID */
 	u8 data_size; /* size in dwords of the data following this header */
 };
 
-/* memory data for registers dump */
+/* Memory data for registers dump */
 struct dbg_dump_mem {
 	__le32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT      0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK  0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+#define DBG_DUMP_MEM_ADDRESS_MASK	0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT	0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK	0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT	24
 	__le32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT       0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK      0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT     24
-#define DBG_DUMP_MEM_RESERVED_MASK      0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT     25
+#define DBG_DUMP_MEM_LENGTH_MASK	0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT	0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK	0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT	24
+#define DBG_DUMP_MEM_RESERVED_MASK	0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT	25
 };
 
-/* register data for registers dump */
+/* Register data for registers dump */
 struct dbg_dump_reg {
 	__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK  0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK	0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT	0
+#define DBG_DUMP_REG_WIDE_BUS_MASK	0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT	23
+#define DBG_DUMP_REG_LENGTH_MASK	0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT	24
 };
 
-/* split header for registers dump */
+/* Split header for registers dump */
 struct dbg_dump_split_hdr {
 	__le32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK      0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT     0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK  0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK	0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT	0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK	0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT	24
 };
 
-/* condition header for idle check */
+/* Condition header for idle check */
 struct dbg_idle_chk_cond_hdr {
 	struct dbg_mode_hdr mode; /* Mode header */
 	__le16 data_size; /* size in dwords of the data following this header */
@@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr {
 /* Idle Check condition register */
 struct dbg_idle_chk_cond_reg {
 	__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK   0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT  0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK  0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK  0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK	0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT	0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK	0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT	23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK	0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT	24
 	__le16 num_entries;
 	u8 entry_size;
 	u8 start_entry;
@@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg {
 /* Idle Check info register */
 struct dbg_idle_chk_info_reg {
 	__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK   0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT  0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK  0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK  0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK	0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT	0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK	0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT	23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK	0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT	24
 	__le16 size; /* register size in dwords */
 	struct dbg_mode_hdr mode; /* Mode header */
 };
@@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule {
 /* Idle Check rule parsing data */
 struct dbg_idle_chk_rule_parsing_data {
 	__le32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK  0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK  0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK	0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT	0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK	0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT	1
 };
 
-/* idle check severity types */
+/* Idle check severity types */
 enum dbg_idle_chk_severity_types {
 	/* idle check failure should cause an error */
 	IDLE_CHK_SEVERITY_ERROR,
@@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types {
 /* Debug Bus block data */
 struct dbg_bus_block_data {
 	__le16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT      0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT      4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK		0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT		0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK		0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT		4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK	0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT	8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK	0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT	12
 	u8 line_num;
 	u8 hw_id;
 };
@@ -2072,6 +2212,7 @@ enum dbg_bus_clients {
 	MAX_DBG_BUS_CLIENTS
 };
 
+/* Debug Bus constraint operation types */
 enum dbg_bus_constraint_ops {
 	DBG_BUS_CONSTRAINT_OP_EQ,
 	DBG_BUS_CONSTRAINT_OP_NE,
@@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops {
 	MAX_DBG_BUS_CONSTRAINT_OPS
 };
 
+/* Debug Bus trigger state data */
 struct dbg_bus_trigger_state_data {
 	u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK  0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK      0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT     4
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK	0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT	0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK		0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT		4
 };
 
 /* Debug Bus memory address */
@@ -2165,6 +2307,7 @@ struct dbg_bus_data {
 	struct dbg_bus_storm_data storms[6];
 };
 
+/* Debug bus filter types */
 enum dbg_bus_filter_types {
 	DBG_BUS_FILTER_TYPE_OFF,
 	DBG_BUS_FILTER_TYPE_PRE,
@@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes {
 	MAX_DBG_BUS_FRAME_MODES
 };
 
+/* Debug bus other engine mode */
 enum dbg_bus_other_engine_modes {
 	DBG_BUS_OTHER_ENGINE_MODE_NONE,
 	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes {
 	MAX_DBG_BUS_OTHER_ENGINE_MODES
 };
 
+/* Debug bus post-trigger recording types */
 enum dbg_bus_post_trigger_types {
 	DBG_BUS_POST_TRIGGER_RECORD,
 	DBG_BUS_POST_TRIGGER_DROP,
 	MAX_DBG_BUS_POST_TRIGGER_TYPES
 };
 
+/* Debug bus pre-trigger recording types */
 enum dbg_bus_pre_trigger_types {
 	DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
 	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
@@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types {
 	MAX_DBG_BUS_PRE_TRIGGER_TYPES
 };
 
+/* Debug bus SEMI frame modes */
 enum dbg_bus_semi_frame_modes {
-	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
-	    0,
-	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
-	    3,
+	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
 	MAX_DBG_BUS_SEMI_FRAME_MODES
 };
 
@@ -2220,6 +2365,7 @@ enum dbg_bus_states {
 	MAX_DBG_BUS_STATES
 };
 
+/* Debug Bus Storm modes */
 enum dbg_bus_storm_modes {
 	DBG_BUS_STORM_MODE_PRINTF,
 	DBG_BUS_STORM_MODE_PRAM_ADDR,
@@ -2352,7 +2498,7 @@ enum dbg_status {
 	DBG_STATUS_MCP_TRACE_NO_META,
 	DBG_STATUS_MCP_COULD_NOT_HALT,
 	DBG_STATUS_MCP_COULD_NOT_RESUME,
-	DBG_STATUS_DMAE_FAILED,
+	DBG_STATUS_RESERVED2,
 	DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
 	DBG_STATUS_IGU_FIFO_BAD_DATA,
 	DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2396,7 +2542,8 @@ struct dbg_tools_data {
 	u8 chip_id;
 	u8 platform_id;
 	u8 initialized;
-	u8 reserved;
+	u8 use_dmae;
+	__le32 num_regs_read;
 };
 
 /********************************/
@@ -2406,6 +2553,7 @@ struct dbg_tools_data {
 /* Number of VLAN priorities */
 #define NUM_OF_VLAN_PRIORITIES	8
 
+/* BRB RAM init requirements */
 struct init_brb_ram_req {
 	__le32 guranteed_per_tc;
 	__le32 headroom_per_tc;
@@ -2414,17 +2562,20 @@ struct init_brb_ram_req {
 	u8 num_active_tcs[MAX_NUM_PORTS];
 };
 
+/* ETS per-TC init requirements */
 struct init_ets_tc_req {
 	u8 use_sp;
 	u8 use_wfq;
 	__le16 weight;
 };
 
+/* ETS init requirements */
 struct init_ets_req {
 	__le32 mtu;
 	struct init_ets_tc_req tc_req[NUM_OF_TCS];
 };
 
+/* NIG LB RL init requirements */
 struct init_nig_lb_rl_req {
 	__le16 lb_mac_rate;
 	__le16 lb_rate;
@@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req {
 	__le16 tc_rate[NUM_OF_PHYS_TCS];
 };
 
+/* NIG TC mapping for each priority */
 struct init_nig_pri_tc_map_entry {
 	u8 tc_id;
 	u8 valid;
 };
 
+/* NIG priority to TC map init requirements */
 struct init_nig_pri_tc_map_req {
 	struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
 };
 
+/* QM per-port init parameters */
 struct init_qm_port_params {
 	u8 active;
 	u8 active_phys_tcs;
@@ -2563,7 +2717,7 @@ struct bin_buffer_hdr {
 	__le32 length;
 };
 
-/* binary init buffer types */
+/* Binary init buffer types */
 enum bin_init_buffer_type {
 	BIN_BUF_INIT_FW_VER_INFO,
 	BIN_BUF_INIT_CMD,
@@ -2793,6 +2947,7 @@ struct iro {
 };
 
 /***************************** Public Functions *******************************/
+
 /**
  * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
  *	arrays.
@@ -2802,6 +2957,18 @@ struct iro {
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
 
 /**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
  * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
  *	default value.
  *
@@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
 #define MAX_NAME_LEN	16
 
 /***************************** Public Functions *******************************/
+
 /**
  * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
  *	debug arrays.
@@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
 					   u32 *num_warnings);
 
 /**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ *	meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
+/**
  * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
  *	for MCP Trace results (in bytes).
  *
@@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = {
 	0x00000000,		/* bar0_map, bb, 0 lines */
 	0x00000000,		/* bar0_map, k2, 0 lines */
 	0x00000000,
+	0x00000000,		/* bar0_map, bb, 0 lines */
+	0x00000000,		/* bar0_map, k2, 0 lines */
+	0x00000000,
 };
 
 /* Win 2 */
@@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = {
  * Returns the required host memory size in 4KB units.
  * Must be called before all QM init HSI functions.
  *
- * @param pf_id - physical function ID
  * @param num_pf_cids - number of connections used by this PF
  * @param num_vf_cids - number of connections used by VFs of this PF
  * @param num_tids - number of tasks used by this PF
@@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = {
  *
  * @return The required host memory size in 4KB units.
  */
-u32 qed_qm_pf_mem_size(u8 pf_id,
-		       u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
 		       u32 num_vf_cids,
 		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
 
@@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params {
 	u8 port_id;
 	u8 pf_id;
 	u8 max_phys_tcs_per_port;
-	bool is_first_pf;
+	bool is_pf_loading;
 	u32 num_pf_cids;
 	u32 num_vf_cids;
 	u32 num_tids;
@@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params {
 	u8 num_vports;
 	u16 pf_wfq;
 	u32 pf_rl;
+	u32 link_speed;
 	struct init_qm_pq_params *pq_params;
 	struct init_qm_vport_params *vport_params;
 };
@@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
  * @param p_ptt - ptt window used for writing the registers
  * @param vport_id - VPORT ID
  * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
  *
  * @return 0 on success, -1 on error.
  */
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+		      struct qed_ptt *p_ptt,
+		      u8 vport_id, u32 vport_rl, u32 link_speed);
+
 /**
  * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
  *
@@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  * @param start_pq - first PQ ID to stop
  * @param num_pqs - Number of PQs to stop, starting from start_pq.
  *
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ *	QM command done.
  */
 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt,
@@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - vxlan destination udp port.
  */
@@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param vxlan_enable - vxlan enable flag.
  */
@@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param eth_gre_enable - eth GRE enable enable flag.
  * @param ip_gre_enable - IP GRE enable enable flag.
@@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - geneve destination udp port.
  */
@@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 			   struct qed_ptt *p_ptt,
 			   bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-			      struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-			     u16 pf_id, bool tcp, bool udp,
-			     bool ipv4, bool ipv6);
-
-#define	YSTORM_FLOW_CONTROL_MODE_OFFSET			(IRO[0].base)
-#define	YSTORM_FLOW_CONTROL_MODE_SIZE			(IRO[0].size)
-#define	TSTORM_PORT_STAT_OFFSET(port_id) \
+
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp  packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    u16 pf_id,
+		    bool tcp,
+		    bool udp,
+		    bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ *	validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ *	session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+				     u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ *	context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+				  u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ *	preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ *	validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET			(IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE			(IRO[0].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
 	(IRO[1].base + ((port_id) * IRO[1].m1))
-#define	TSTORM_PORT_STAT_SIZE				(IRO[1].size)
+#define TSTORM_PORT_STAT_SIZE				(IRO[1].size)
+
+/* Tstorm ll2 port statistics */
 #define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
 	(IRO[2].base + ((port_id) * IRO[2].m1))
 #define TSTORM_LL2_PORT_STAT_SIZE			(IRO[2].size)
-#define	USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
 	(IRO[3].base + ((vf_id) * IRO[3].m1))
-#define	USTORM_VF_PF_CHANNEL_READY_SIZE			(IRO[3].size)
-#define	USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
-	(IRO[4].base + (pf_id) * IRO[4].m1)
-#define	USTORM_FLR_FINAL_ACK_SIZE			(IRO[4].size)
-#define	USTORM_EQE_CONS_OFFSET(pf_id) \
+#define USTORM_VF_PF_CHANNEL_READY_SIZE			(IRO[3].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+	(IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE			(IRO[4].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
 	(IRO[5].base + ((pf_id) * IRO[5].m1))
-#define	USTORM_EQE_CONS_SIZE				(IRO[5].size)
-#define	USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+#define USTORM_EQE_CONS_SIZE				(IRO[5].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
 	(IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define	USTORM_ETH_QUEUE_ZONE_SIZE			(IRO[6].size)
-#define	USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+#define USTORM_ETH_QUEUE_ZONE_SIZE			(IRO[6].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
 	(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define	USTORM_COMMON_QUEUE_CONS_SIZE			(IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_SIZE			(IRO[7].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET			(IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE			(IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET			(IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE			(IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET			(IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE			(IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET			(IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE			(IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET			(IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE			(IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET			(IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE			(IRO[13].size)
+
+/* Tstorm producers */
 #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
-	(IRO[14].base +	((core_rx_queue_id) * IRO[14].m1))
+	(IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
 #define TSTORM_LL2_RX_PRODS_SIZE			(IRO[14].size)
+
+/* Tstorm LightL2 queue statistics */
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
 	(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE		(IRO[15].size)
+
+/* Ustorm LiteL2 queue statistics */
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
-	(IRO[16].base +	((core_rx_queue_id) * IRO[16].m1))
+	(IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE		(IRO[16].size)
+
+/* Pstorm LiteL2 queue statistics */
 #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
-	(IRO[17].base +	((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE	(IRO[17].	size)
-#define	MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+	(IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE		(IRO[17].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define	MSTORM_QUEUE_STAT_SIZE				(IRO[18].size)
-#define	MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+#define MSTORM_QUEUE_STAT_SIZE				(IRO[18].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
 	(IRO[19].base + ((queue_id) * IRO[19].m1))
-#define	MSTORM_ETH_PF_PRODS_SIZE			(IRO[19].size)
+#define MSTORM_ETH_PF_PRODS_SIZE			(IRO[19].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
 #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
-	(IRO[20].base +	((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+	(IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
 #define MSTORM_ETH_VF_PRODS_SIZE			(IRO[20].size)
-#define	MSTORM_TPA_TIMEOUT_US_OFFSET			(IRO[21].base)
-#define	MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[21].size)
-#define	MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET			(IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[21].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
 	(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define	MSTORM_ETH_PF_STAT_SIZE				(IRO[22].size)
-#define	USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define MSTORM_ETH_PF_STAT_SIZE				(IRO[22].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define	USTORM_QUEUE_STAT_SIZE				(IRO[23].size)
-#define	USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define USTORM_QUEUE_STAT_SIZE				(IRO[23].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
 	(IRO[24].base + ((pf_id) * IRO[24].m1))
-#define	USTORM_ETH_PF_STAT_SIZE				(IRO[24].size)
-#define	PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define USTORM_ETH_PF_STAT_SIZE				(IRO[24].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define	PSTORM_QUEUE_STAT_SIZE				(IRO[25].size)
-#define	PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define PSTORM_QUEUE_STAT_SIZE				(IRO[25].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
 	(IRO[26].base + ((pf_id) * IRO[26].m1))
-#define	PSTORM_ETH_PF_STAT_SIZE				(IRO[26].size)
-#define	PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
-	(IRO[27].base + ((ethtype) * IRO[27].m1))
-#define	PSTORM_CTL_FRAME_ETHTYPE_SIZE			(IRO[27].size)
-#define	TSTORM_ETH_PRS_INPUT_OFFSET			(IRO[28].base)
-#define	TSTORM_ETH_PRS_INPUT_SIZE			(IRO[28].size)
-#define	ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+#define PSTORM_ETH_PF_STAT_SIZE				(IRO[26].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+	(IRO[27].base + ((eth_type_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE			(IRO[27].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET			(IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE			(IRO[28].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
 	(IRO[29].base + ((pf_id) * IRO[29].m1))
-#define	ETH_RX_RATE_LIMIT_SIZE				(IRO[29].size)
-#define	XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+#define ETH_RX_RATE_LIMIT_SIZE				(IRO[29].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
 	(IRO[30].base + ((queue_id) * IRO[30].m1))
-#define	XSTORM_ETH_QUEUE_ZONE_SIZE			(IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_SIZE			(IRO[30].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+	(IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE				(IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+	(IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE				(IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+	(IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE			(IRO[33].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
 #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
-	(IRO[34].base +	((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE				(IRO[34].size)
+	(IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE			(IRO[34].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
 #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-	(IRO[35].base +	((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE				(IRO[35].size)
+	(IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE			(IRO[35].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
 #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-	(IRO[36].base +	((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE				(IRO[36].size)
+	(IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE			(IRO[36].size)
+
+/* Tstorm iSCSI RX stats */
 #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-	(IRO[37].base +	((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE				(IRO[37].size)
+	(IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE			(IRO[37].size)
+
+/* Mstorm iSCSI RX stats */
 #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-	(IRO[38].base +	((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE				(IRO[38].size)
+	(IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE			(IRO[38].size)
+
+/* Ustorm iSCSI RX stats */
 #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-	(IRO[39].base +	((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE				(IRO[39].size)
+	(IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE			(IRO[39].size)
+
+/* Xstorm iSCSI TX stats */
 #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-	(IRO[40].base +	((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE				(IRO[40].size)
+	(IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE			(IRO[40].size)
+
+/* Ystorm iSCSI TX stats */
 #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-	(IRO[41].base +	((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE				(IRO[41].size)
+	(IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE			(IRO[41].size)
+
+/* Pstorm iSCSI TX stats */
 #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-	(IRO[42].base +	((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE				(IRO[42].size)
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
-	(IRO[45].base +	((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE				(IRO[45].size)
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
-	(IRO[46].base +	((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE				(IRO[46].size)
+	(IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE			(IRO[42].size)
+
+/* Tstorm FCoE RX stats */
 #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
-	(IRO[43].base +	((pf_id) * IRO[43].m1))
+	(IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE			(IRO[43].size)
+
+/* Pstorm FCoE TX stats */
 #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
 	(IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE			(IRO[44].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+	(IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE			(IRO[45].size)
 
-static const struct iro iro_arr[49] = {
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+	(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE			(IRO[46].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+	(IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE			(IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+	(IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE			(IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+	(IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE		(IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+	(IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE		(IRO[50].size)
+
+static const struct iro iro_arr[51] = {
 	{0x0, 0x0, 0x0, 0x0, 0x8},
-	{0x4cb0, 0x80, 0x0, 0x0, 0x80},
-	{0x6518, 0x20, 0x0, 0x0, 0x20},
+	{0x4cb8, 0x88, 0x0, 0x0, 0x88},
+	{0x6530, 0x20, 0x0, 0x0, 0x20},
 	{0xb00, 0x8, 0x0, 0x0, 0x4},
 	{0xa80, 0x8, 0x0, 0x0, 0x4},
 	{0x0, 0x8, 0x0, 0x0, 0x2},
 	{0x80, 0x8, 0x0, 0x0, 0x4},
 	{0x84, 0x8, 0x0, 0x0, 0x2},
+	{0x4c48, 0x0, 0x0, 0x0, 0x78},
+	{0x3e18, 0x0, 0x0, 0x0, 0x78},
+	{0x2b58, 0x0, 0x0, 0x0, 0x78},
 	{0x4c40, 0x0, 0x0, 0x0, 0x78},
-	{0x3df0, 0x0, 0x0, 0x0, 0x78},
-	{0x29b0, 0x0, 0x0, 0x0, 0x78},
-	{0x4c38, 0x0, 0x0, 0x0, 0x78},
-	{0x4990, 0x0, 0x0, 0x0, 0x78},
-	{0x7f48, 0x0, 0x0, 0x0, 0x78},
+	{0x4998, 0x0, 0x0, 0x0, 0x78},
+	{0x7f50, 0x0, 0x0, 0x0, 0x78},
 	{0xa28, 0x8, 0x0, 0x0, 0x8},
-	{0x61f8, 0x10, 0x0, 0x0, 0x10},
-	{0xbd20, 0x30, 0x0, 0x0, 0x30},
-	{0x95b8, 0x30, 0x0, 0x0, 0x30},
-	{0x4b60, 0x80, 0x0, 0x0, 0x40},
+	{0x6210, 0x10, 0x0, 0x0, 0x10},
+	{0xb820, 0x30, 0x0, 0x0, 0x30},
+	{0x96c0, 0x30, 0x0, 0x0, 0x30},
+	{0x4b68, 0x80, 0x0, 0x0, 0x40},
 	{0x1f8, 0x4, 0x0, 0x0, 0x4},
-	{0x53a0, 0x80, 0x4, 0x0, 0x4},
-	{0xc7c8, 0x0, 0x0, 0x0, 0x4},
-	{0x4ba0, 0x80, 0x0, 0x0, 0x20},
-	{0x8150, 0x40, 0x0, 0x0, 0x30},
-	{0xec70, 0x60, 0x0, 0x0, 0x60},
-	{0x2b48, 0x80, 0x0, 0x0, 0x38},
-	{0xf1b0, 0x78, 0x0, 0x0, 0x78},
+	{0x53a8, 0x80, 0x4, 0x0, 0x4},
+	{0xc7d0, 0x0, 0x0, 0x0, 0x4},
+	{0x4ba8, 0x80, 0x0, 0x0, 0x20},
+	{0x8158, 0x40, 0x0, 0x0, 0x30},
+	{0xe770, 0x60, 0x0, 0x0, 0x60},
+	{0x2cf0, 0x80, 0x0, 0x0, 0x38},
+	{0xf2b8, 0x78, 0x0, 0x0, 0x78},
 	{0x1f8, 0x4, 0x0, 0x0, 0x4},
-	{0xaef8, 0x0, 0x0, 0x0, 0xf0},
-	{0xafe8, 0x8, 0x0, 0x0, 0x8},
+	{0xaf20, 0x0, 0x0, 0x0, 0xf0},
+	{0xb010, 0x8, 0x0, 0x0, 0x8},
 	{0x1f8, 0x8, 0x0, 0x0, 0x8},
 	{0xac0, 0x8, 0x0, 0x0, 0x8},
 	{0x2578, 0x8, 0x0, 0x0, 0x8},
 	{0x24f8, 0x8, 0x0, 0x0, 0x8},
 	{0x0, 0x8, 0x0, 0x0, 0x8},
-	{0x200, 0x10, 0x8, 0x0, 0x8},
-	{0xb78, 0x10, 0x8, 0x0, 0x2},
-	{0xd9a8, 0x38, 0x0, 0x0, 0x24},
-	{0x12988, 0x10, 0x0, 0x0, 0x8},
-	{0x11fa0, 0x38, 0x0, 0x0, 0x18},
-	{0xa580, 0x38, 0x0, 0x0, 0x10},
-	{0x86f8, 0x30, 0x0, 0x0, 0x18},
-	{0x101f8, 0x10, 0x0, 0x0, 0x10},
-	{0xde28, 0x48, 0x0, 0x0, 0x38},
-	{0x10660, 0x20, 0x0, 0x0, 0x20},
-	{0x2b80, 0x80, 0x0, 0x0, 0x10},
-	{0x5020, 0x10, 0x0, 0x0, 0x10},
-	{0xc9b0, 0x30, 0x0, 0x0, 0x10},
-	{0xeec0, 0x10, 0x0, 0x0, 0x10},
+	{0x400, 0x18, 0x8, 0x0, 0x8},
+	{0xb78, 0x18, 0x8, 0x0, 0x2},
+	{0xd898, 0x50, 0x0, 0x0, 0x3c},
+	{0x12908, 0x18, 0x0, 0x0, 0x10},
+	{0x11aa8, 0x40, 0x0, 0x0, 0x18},
+	{0xa588, 0x50, 0x0, 0x0, 0x20},
+	{0x8700, 0x40, 0x0, 0x0, 0x28},
+	{0x10300, 0x18, 0x0, 0x0, 0x10},
+	{0xde48, 0x48, 0x0, 0x0, 0x38},
+	{0x10768, 0x20, 0x0, 0x0, 0x20},
+	{0x2d28, 0x80, 0x0, 0x0, 0x10},
+	{0x5048, 0x10, 0x0, 0x0, 0x10},
+	{0xc9b8, 0x30, 0x0, 0x0, 0x10},
+	{0xeee0, 0x10, 0x0, 0x0, 0x10},
+	{0xa3a0, 0x10, 0x0, 0x0, 0x10},
+	{0x13108, 0x8, 0x0, 0x0, 0x8},
 };
 
 /* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET	0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET	1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET	2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET	3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET	4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET	5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET	6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET	7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET	8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET	9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET	10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET	11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET	12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET	13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET	14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET	15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET	16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET	17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET	18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET	19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET	20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET	21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET	22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET	23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET	24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET	761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE	736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET	761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE	736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET	1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE	736
-#define CAU_REG_PI_MEMORY_RT_OFFSET	2233
-#define CAU_REG_PI_MEMORY_RT_SIZE	4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET	6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET	6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET	6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET	6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET	6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET	6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET	6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET	6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET	6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET	6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET	6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET	6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET	6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET	6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET	6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET	6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET	6665
-#define SRC_REG_FIRSTFREE_RT_SIZE	2
-#define SRC_REG_LASTFREE_RT_OFFSET	6667
-#define SRC_REG_LASTFREE_RT_SIZE	2
-#define SRC_REG_COUNTFREE_RT_OFFSET	6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET	6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET	6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET	6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET	6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET	6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET	6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET	6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET	6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET	6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET	6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET	6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET	6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET	6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET	6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET	6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET	6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET	6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET	6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET	6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET	6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET	6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET	6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET	6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET	6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET	6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET	6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET	6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET	6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET	6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET	6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET	6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET	6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET	6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE	22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET	28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET	28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET	28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET	28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET	28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET	28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET	28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET	28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET	28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET	28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET	28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET	28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET	28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE	416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET	29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE	608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET	29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET	29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET	29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET	29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET	29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET	29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET	29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET	29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET	29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET	29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET	29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET	29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET	29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET	29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET	29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET	29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET	29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET	29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET	29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET	29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET	29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET	29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET	29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET	29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET	29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET	29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET	29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET	29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET	29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET	29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET	29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET	29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET	29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET	29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET	29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET	29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET	29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET	29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET	29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET	29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET	29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET	29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET	29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET	29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET	29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET	29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET	29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET	29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET	29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET	29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET	29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET	29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET	29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET	29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET	29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET	29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET	29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET	29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET	29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET	29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET	29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET	29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET	29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET	29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET	29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET	29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET	29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET	29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE	128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET	29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET	29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET	29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET	29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET	29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET	29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET	29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET	29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET	29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET	29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET	29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET	29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET	29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET	29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET	29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET	29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET	29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET	29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET	29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET	29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET	29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET	29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET	29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET	29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET	29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET	29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET	29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET	29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET	29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET	29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET	29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET	29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET	29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET	29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET	29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET	29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET	29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET	29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET	29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET	29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET	29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET	29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET	29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET	29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET	29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET	29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET	29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET	29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET	29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET	29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET	29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET	29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET	29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET	29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET	29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET	29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET	29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET	29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET	29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET	29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET	29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET	29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET	29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET	29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET	29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET	29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET	29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET	30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET	30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET	30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET	30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET	30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET	30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET	30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET	30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET	30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET	30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET	30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET	30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET	30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET	30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET	30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET	30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET	30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET	30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET	30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET	30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET	30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET	30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET	30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET	30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET	30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET	30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET	30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET	30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET	30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET	30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET	30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET	30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET	30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET	30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET	30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET	30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET	30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET	30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET	30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET	30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET	30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET	30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET	30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET	30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET	30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET	30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET	30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET	30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET	30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET	30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET	30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET	30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET	30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE	256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET	30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE	256
-#define QM_REG_RLGLBLCRD_RT_OFFSET	30564
-#define QM_REG_RLGLBLCRD_RT_SIZE	256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET	30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET	30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET	30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET	30823
-#define QM_REG_RLPFINCVAL_RT_SIZE	16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET	30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE	16
-#define QM_REG_RLPFCRD_RT_OFFSET	30855
-#define QM_REG_RLPFCRD_RT_SIZE	16
-#define QM_REG_RLPFENABLE_RT_OFFSET	30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET	30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET	30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE	16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET	30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE	16
-#define QM_REG_WFQPFCRD_RT_OFFSET	30905
-#define QM_REG_WFQPFCRD_RT_SIZE	256
-#define QM_REG_WFQPFENABLE_RT_OFFSET	31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET	31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET	31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE	512
-#define QM_REG_TXPQMAP_RT_OFFSET	31675
-#define QM_REG_TXPQMAP_RT_SIZE	512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET	32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE	512
-#define QM_REG_WFQVPCRD_RT_OFFSET	32699
-#define QM_REG_WFQVPCRD_RT_SIZE	512
-#define QM_REG_WFQVPMAP_RT_OFFSET	33211
-#define QM_REG_WFQVPMAP_RT_SIZE	512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET	33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE	320
-#define QM_REG_VOQCRDLINE_RT_OFFSET	34043
-#define QM_REG_VOQCRDLINE_RT_SIZE	36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET	34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE	36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET	34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET	34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET	34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET	34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET	34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET	34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET	34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET	34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE	4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET	34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE	4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET	34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE	4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET	34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET	34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE	32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET	34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE	16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET	34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE	16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET	34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE	16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET	34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE	16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET	34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET	34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET	34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET	34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET	34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET	34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET	34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET	34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET	34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET	34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET	34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET	34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET	34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET	34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET	34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET	34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET	34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET	34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET	34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET	34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET	34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET	34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET	34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET	34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET	34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET	34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET	34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET	34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET	34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET	34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET	34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET	34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET	34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET	34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET	34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET	34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET	34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET	34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET	34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET	34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET	34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET	34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET	34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET	34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET	34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET	34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET	34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET	34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET	34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET	34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET	34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET	34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET	34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET	34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET	34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET	34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET	34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET	34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET	34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET	34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET	34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET	34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET	34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET	34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET	34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET	34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET	34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET	34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET	34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET	34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET	34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET	34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET	34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET	34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET	34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET	34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET	34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET	34308
-
-#define RUNTIME_ARRAY_SIZE 34309
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET			0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET			1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET			2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET			3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET			4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET			5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET			6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET			7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET			8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET			9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET			10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET			11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET			12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET			13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET			14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET			15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET				16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET			17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET			18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET			19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET		20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET		21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET			22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET			23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET			24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET			25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET			26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET			27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET			28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET			29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET		30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET		31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET		32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET		33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET		34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET		35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET		36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET		37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET			38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET			39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET			40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET			41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET			42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET			43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET			44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET				45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE				1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET			1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE				1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET				2093
+#define CAU_REG_PI_MEMORY_RT_SIZE				4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET		6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET		6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET		6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET			6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET			6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET				6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET				6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET				6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET			6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET			6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET			6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET		6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET	6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET		6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET			6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET			6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET				6525
+#define SRC_REG_FIRSTFREE_RT_SIZE				2
+#define SRC_REG_LASTFREE_RT_OFFSET				6527
+#define SRC_REG_LASTFREE_RT_SIZE				2
+#define SRC_REG_COUNTFREE_RT_OFFSET				6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET			6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET			6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET			6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET				6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET				6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET				6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET			6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET			6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET			6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET			6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET			6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET			6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET			6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET			6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET			6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET			6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET			6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET			6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET			6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET		6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET		6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET		6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET			6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET			6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET			6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET			6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET			6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET			6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET				6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET			6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET			6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET			6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET			6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET			6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET			6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET			6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET				6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE				26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET				32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET		32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET			32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET			32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET			32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET			32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET			32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET				32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET				32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET				32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET		32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET		32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET			32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE				416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET			33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE				608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET				34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET				34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET				34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET			34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET			34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET			34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET			34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET			34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET			34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET			34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET			34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET			34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET			34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET			34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET			34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET			34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET			34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET			34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET			34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET			34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET			34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET			34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET			34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET			34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET			34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET			34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET			34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET			34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET			34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET			34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET			34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET			34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET			34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET			34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET			34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET			34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET			34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET			34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET			34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET			34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET			34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET			34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET			34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET			34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET			34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET			34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET			34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET			34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET			34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET			34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET			34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET			34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET			34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET			34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET			34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET			34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET			34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET			34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET			34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET			34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET			34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET			34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET			34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET			34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET			34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET			34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET			34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET			34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE				128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET				34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE				256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET			34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET			34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET			34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET			34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET			34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET			34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET			34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET			34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET			34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET			34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET			34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET			34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET			34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET			34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET			34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET			34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET			34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET			34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET			34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET			34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET			34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET			34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET			34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET			34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET			34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET			34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET			34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET				34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET				34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET				34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET				34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET				34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET				34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET				34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET				34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET				34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET				34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET				34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET				34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET				34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET				34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET				34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET				34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET				34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET				34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET				34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET				34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET				34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET				34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET				34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET				34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET				34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET				34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET				34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET				34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET				34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET				34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET				34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET				34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET				34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET				34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET				34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET				34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET				34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET				34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET				34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET				34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET				34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET				34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET				34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET				34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET				34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET				34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET				34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET				34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET				34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET				34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET				34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET				34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET				34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET				34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET				34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET				34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET				34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET				34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET				34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET				34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET				34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET				34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET				34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET				34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET				34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET				34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET				34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET				34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET				34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET				34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET				34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET				34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET				34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET				34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET				34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET				34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET				34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET				34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET				34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET				34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET				34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET				34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET			34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET			34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET			34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET			34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET			34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET			34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET			34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET			34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET			34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET			34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET				34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE				256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET			34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE				256
+#define QM_REG_RLGLBLCRD_RT_OFFSET				35098
+#define QM_REG_RLGLBLCRD_RT_SIZE				256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET				35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET				35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET			35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET				35357
+#define QM_REG_RLPFINCVAL_RT_SIZE				16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET				35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE				16
+#define QM_REG_RLPFCRD_RT_OFFSET				35389
+#define QM_REG_RLPFCRD_RT_SIZE					16
+#define QM_REG_RLPFENABLE_RT_OFFSET				35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET				35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET				35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE				16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET			35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE				16
+#define QM_REG_WFQPFCRD_RT_OFFSET				35439
+#define QM_REG_WFQPFCRD_RT_SIZE					256
+#define QM_REG_WFQPFENABLE_RT_OFFSET				35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET				35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET				35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE				512
+#define QM_REG_TXPQMAP_RT_OFFSET				36209
+#define QM_REG_TXPQMAP_RT_SIZE					512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET				36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE				512
+#define QM_REG_WFQVPCRD_RT_OFFSET				37233
+#define QM_REG_WFQVPCRD_RT_SIZE					512
+#define QM_REG_WFQVPMAP_RT_OFFSET				37745
+#define QM_REG_WFQVPMAP_RT_SIZE					512
+#define QM_REG_PTRTBLTX_RT_OFFSET				38257
+#define QM_REG_PTRTBLTX_RT_SIZE					1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET				39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE				320
+#define QM_REG_VOQCRDLINE_RT_OFFSET				39601
+#define QM_REG_VOQCRDLINE_RT_SIZE				36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET				39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE				36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET			39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET			39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET			39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET			39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET			39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET			39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET			39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET		39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET			39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE				4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET			39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE			4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET			39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE			32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET			39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE			16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET			39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE			16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET		39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE		16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET		39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE			16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET				39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET		39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET			39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE			8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET		39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE		1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET		40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE		512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET		41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE		512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET	41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE	512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET	42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE		512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET		42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE			32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET			42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET			42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET			42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET			42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET			42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET			42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET			42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET		42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET		42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET		42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET		42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET			42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET			42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET			42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET			42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET		42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET			42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET		42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET		42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET			42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET		42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET		42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET			42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET		42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET		42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET			42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET		42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET		42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET			42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET		42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET		42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET			42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET		42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET		42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET			42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET		42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET		42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET			42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET		42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET		42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET			42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET		42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET		42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET			42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET		42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET		42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET			42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET		42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET		42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET			42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET		42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET		42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET			42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET		42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET		42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET			42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET		42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET		42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET			42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET		42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET		42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET			42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET		42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET		42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET			42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET		42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET		42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET			42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET		42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET		42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET			42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET		42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET		42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET			42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET		42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET		42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET			42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET		42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET		42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET			42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET		42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET		42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET			42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET		42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET		42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET			42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET		42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET		42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET			42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET		42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET		42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET			42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET		42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET		42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET			42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET		42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET		42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET			42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET		42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET		42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET			42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET		43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET		43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET			43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET		43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET		43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET			43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET		43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET		43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET			43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET		43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET		43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET			43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET		43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET		43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET			43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET		43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET		43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET			43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET		43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET		43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET			43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET		43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET				43022
+
+#define RUNTIME_ARRAY_SIZE	43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB	0
 
 /* The eth storm context for the Tstorm */
 struct tstorm_eth_conn_st_ctx {
@@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx {
 	__le32 reserved[60];
 };
 
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
 	u8 reserved0;
-	u8 eth_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT	7
 		u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
 	u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT	6
 		u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT	0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK			0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT			4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT		6
 	u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT			4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT			5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT	1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT		1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT		2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT		3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT		4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT		7
 	u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT	0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT	1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT	4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT		4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT		6
 	u8 edpm_event_id;
 	__le16 physical_q0;
-	__le16 ereserved1;
+	__le16 e5_reserved1;
 	__le16 edpm_num_bds;
 	__le16 tx_bd_cons;
 	__le16 tx_bd_prod;
@@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx {
 	u8 byte13;
 	u8 byte14;
 	u8 byte15;
-	u8 ereserved;
+	u8 e5_reserved;
 	__le16 word11;
 	__le32 reg10;
 	__le32 reg11;
@@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx {
 	__le32 reserved[8];
 };
 
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 state;
 	u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK		0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT		0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK		0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT		1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK	0x3
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT	2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK	0x3
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT	4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT			0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT			1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK	0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT	2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK		0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT	4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT			6
 	u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK		0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT	1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT			4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT			5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT			6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT			7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK	0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT	1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT			4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT			5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT			6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT			7
 	u8 tx_q0_int_coallecing_timeset;
 	u8 byte3;
 	__le16 word0;
@@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx {
 	__le32 reg3;
 };
 
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT		1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT		3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT	3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT	6
 	u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT	6
 	u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK		0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT		6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT		7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT	6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT		1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT		3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT		6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT		7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT	1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT	3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT	6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT		1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT		3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT		7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT			0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT			1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK		0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT	2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK		0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT	4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK				0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT			6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT			0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT			1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK	0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT	2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK	0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT	4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT			6
 	u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK				0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT			0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK			0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT			2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK			0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT			4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK		0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT		6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK			0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT			0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK		0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT		2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK		0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT		4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK	0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT	6
 	u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK	0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT	0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK	0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT	1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT			3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK		0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT		4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK		0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT		5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT	0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT	1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT		4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT		5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT			0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT			1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT			2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT			3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT			4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT			5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT			6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT			7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx {
 };
 
 /* eth connection context */
-struct eth_conn_context {
+struct e4_eth_conn_context {
 	struct tstorm_eth_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
 	struct pstorm_eth_conn_st_ctx pstorm_st_context;
 	struct xstorm_eth_conn_st_ctx xstorm_st_context;
-	struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+	struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
 	struct ystorm_eth_conn_st_ctx ystorm_st_context;
-	struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
-	struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
-	struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+	struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+	struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+	struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
 	struct ustorm_eth_conn_st_ctx ustorm_st_context;
 	struct mstorm_eth_conn_st_ctx mstorm_st_context;
 };
 
+/* Ethernet filter types: mac/vlan/pair */
 enum eth_error_code {
 	ETH_OK = 0x00,
 	ETH_FILTERS_MAC_ADD_FAIL_FULL,
@@ -4972,6 +5472,7 @@ enum eth_error_code {
 	MAX_ETH_ERROR_CODE
 };
 
+/* Opcodes for the event ring */
 enum eth_event_opcode {
 	ETH_EVENT_UNUSED,
 	ETH_EVENT_VPORT_START,
@@ -4983,13 +5484,14 @@ enum eth_event_opcode {
 	ETH_EVENT_RX_QUEUE_UPDATE,
 	ETH_EVENT_RX_QUEUE_STOP,
 	ETH_EVENT_FILTERS_UPDATE,
-	ETH_EVENT_RESERVED,
-	ETH_EVENT_RESERVED2,
-	ETH_EVENT_RESERVED3,
+	ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+	ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+	ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
 	ETH_EVENT_RX_ADD_UDP_FILTER,
 	ETH_EVENT_RX_DELETE_UDP_FILTER,
-	ETH_EVENT_RESERVED4,
-	ETH_EVENT_RESERVED5,
+	ETH_EVENT_RX_CREATE_GFT_ACTION,
+	ETH_EVENT_RX_GFT_UPDATE_FILTER,
+	ETH_EVENT_TX_QUEUE_UPDATE,
 	MAX_ETH_EVENT_OPCODE
 };
 
@@ -5039,6 +5541,7 @@ enum eth_filter_type {
 	MAX_ETH_FILTER_TYPE
 };
 
+/* Eth IPv4 Fragment Type */
 enum eth_ipv4_frag_type {
 	ETH_IPV4_NOT_FRAG,
 	ETH_IPV4_FIRST_FRAG,
@@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type {
 	MAX_ETH_IPV4_FRAG_TYPE
 };
 
+/* eth IPv4 Fragment Type */
 enum eth_ip_type {
 	ETH_IPV4,
 	ETH_IPV6,
 	MAX_ETH_IP_TYPE
 };
 
+/* Ethernet Ramrod Command IDs */
 enum eth_ramrod_cmd_id {
 	ETH_RAMROD_UNUSED,
 	ETH_RAMROD_VPORT_START,
@@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id {
 	ETH_RAMROD_RX_DELETE_UDP_FILTER,
 	ETH_RAMROD_RX_CREATE_GFT_ACTION,
 	ETH_RAMROD_GFT_UPDATE_FILTER,
+	ETH_RAMROD_TX_QUEUE_UPDATE,
 	MAX_ETH_RAMROD_CMD_ID
 };
 
-/* return code from eth sp ramrods */
+/* Return code from eth sp ramrods */
 struct eth_return_code {
 	u8 value;
 #define ETH_RETURN_CODE_ERR_CODE_MASK	0x1F
@@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode {
 	__le16 reserved2[3];
 };
 
+/* GFT filter update action type */
 enum gft_filter_update_action {
 	GFT_ADD_FILTER,
 	GFT_DELETE_FILTER,
 	MAX_GFT_FILTER_UPDATE_ACTION
 };
 
-enum gft_logic_filter_type {
-	GFT_FILTER_TYPE,
-	RFS_FILTER_TYPE,
-	MAX_GFT_LOGIC_FILTER_TYPE
-};
-
+/* Ramrod data for rx add openflow filter */
 struct rx_add_openflow_filter_data {
 	__le16 action_icid;
 	u8 priority;
@@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data {
 	__le16 l4_src_port;
 };
 
+/* Ramrod data for rx create gft action */
 struct rx_create_gft_action_data {
 	u8 vport_id;
 	u8 reserved[7];
 };
 
+/* Ramrod data for rx create openflow action */
 struct rx_create_openflow_action_data {
 	u8 vport_id;
 	u8 reserved[7];
@@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data {
 	struct regpair reserved2;
 };
 
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for rx queue stop ramrod */
 struct rx_queue_stop_ramrod_data {
 	__le16 rx_queue_id;
 	u8 complete_cqe_flg;
@@ -5324,14 +5828,22 @@ struct rx_udp_filter_data {
 	__le32 tenant_id;
 };
 
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
 struct rx_update_gft_filter_data {
 	struct regpair pkt_hdr_addr;
 	__le16 pkt_hdr_length;
-	__le16 rx_qid_or_action_icid;
-	u8 vport_id;
-	u8 filter_type;
+	__le16 action_icid;
+	__le16 rx_qid;
+	__le16 flow_id;
+	__le16 vport_id;
+	u8 action_icid_valid;
+	u8 rx_qid_valid;
+	u8 flow_id_valid;
 	u8 filter_action;
 	u8 assert_on_error;
+	u8 reserved;
 };
 
 /* Ramrod data for rx queue start ramrod */
@@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data {
 	__le16 reserved[4];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+	__le16 update_qm_pq_id_flg;
+	__le16 qm_pq_id;
+	__le32 reserved0;
+	struct regpair reserved1[5];
+};
+
 /* Ramrod data for vport update ramrod */
 struct vport_filter_update_ramrod_data {
 	struct eth_filter_cmd_header filter_cmd_hdr;
@@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data {
 	struct eth_vport_rss_config rss_config;
 };
 
-struct xstorm_eth_conn_agctxdq_ext_ldpart {
+struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
 	u8 reserved0;
-	u8 eth_state;
+	u8 state;
 	u8 flags0;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT		2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT		7
 	u8 flags1;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT		2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT		3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT	7
 	u8 flags2;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT	6
 	u8 flags7;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT	1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT			0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT			1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT			2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT			3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT			4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT			5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT	7
 	u8 flags10;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT			0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT		2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT		3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT		7
 	u8 flags11;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT	1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT		3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT	1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK			0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT		6
 	u8 edpm_event_id;
 	__le16 physical_q0;
-	__le16 ereserved1;
+	__le16 e5_reserved1;
 	__le16 edpm_num_bds;
 	__le16 tx_bd_cons;
 	__le16 tx_bd_prod;
@@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
 	__le32 reg4;
 };
 
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT		2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT		4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		6
 	u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
 	u8 reserved0;
-	u8 eth_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT	7
 	u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT		0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT		2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT		5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT		7
 	u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK			0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK		0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT		6
 	u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT	7
 	u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT		0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT		2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT		3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT		5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT			0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT			2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT			3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT			4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT			6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT			7
 	u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT		0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT		6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK		0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT		6
 	u8 edpm_event_id;
 	__le16 physical_q0;
-	__le16 ereserved1;
+	__le16 e5_reserved1;
 	__le16 edpm_num_bds;
 	__le16 tx_bd_cons;
 	__le16 tx_bd_prod;
@@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
 	__le16 conn_dpi;
 };
 
+/* GFT CAM line struct */
 struct gft_cam_line {
 	__le32 camline;
 #define GFT_CAM_LINE_VALID_MASK		0x1
@@ -5975,6 +6496,7 @@ struct gft_cam_line {
 #define GFT_CAM_LINE_RESERVED1_SHIFT	29
 };
 
+/* GFT CAM line struct with fields breakout */
 struct gft_cam_line_mapped {
 	__le32 camline;
 #define GFT_CAM_LINE_MAPPED_VALID_MASK				0x1
@@ -6008,28 +6530,31 @@ union gft_cam_line_union {
 	struct gft_cam_line_mapped cam_line_mapped;
 };
 
+/* Used in gft_profile_key: Indication for ip version */
 enum gft_profile_ip_version {
 	GFT_PROFILE_IPV4 = 0,
 	GFT_PROFILE_IPV6 = 1,
 	MAX_GFT_PROFILE_IP_VERSION
 };
 
+/* Profile key stucr fot GFT logic in Prs */
 struct gft_profile_key {
 	__le16 profile_key;
-#define GFT_PROFILE_KEY_IP_VERSION_MASK           0x1
-#define GFT_PROFILE_KEY_IP_VERSION_SHIFT          0
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK    0x1
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT   1
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK  0xF
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK          0xF
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT         6
-#define GFT_PROFILE_KEY_PF_ID_MASK                0xF
-#define GFT_PROFILE_KEY_PF_ID_SHIFT               10
-#define GFT_PROFILE_KEY_RESERVED0_MASK            0x3
-#define GFT_PROFILE_KEY_RESERVED0_SHIFT           14
-};
-
+#define GFT_PROFILE_KEY_IP_VERSION_MASK			0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT		0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK		0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT		1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK	0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT	2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK		0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT		6
+#define GFT_PROFILE_KEY_PF_ID_MASK			0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT			10
+#define GFT_PROFILE_KEY_RESERVED0_MASK			0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT			14
+};
+
+/* Used in gft_profile_key: Indication for tunnel type */
 enum gft_profile_tunnel_type {
 	GFT_PROFILE_NO_TUNNEL = 0,
 	GFT_PROFILE_VXLAN_TUNNEL = 1,
@@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type {
 	MAX_GFT_PROFILE_TUNNEL_TYPE
 };
 
+/* Used in gft_profile_key: Indication for protocol type */
 enum gft_profile_upper_protocol_type {
 	GFT_PROFILE_ROCE_PROTOCOL = 0,
 	GFT_PROFILE_RROCE_PROTOCOL = 1,
@@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type {
 	MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
 };
 
+/* GFT RAM line struct */
 struct gft_ram_line {
 	__le32 lo;
 #define GFT_RAM_LINE_VLAN_SELECT_MASK			0x3
@@ -6149,6 +6676,7 @@ struct gft_ram_line {
 #define GFT_RAM_LINE_RESERVED1_SHIFT			10
 };
 
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
 enum gft_vlan_select {
 	INNER_PROVIDER_VLAN = 0,
 	INNER_VLAN = 1,
@@ -6157,10 +6685,205 @@ enum gft_vlan_select {
 	MAX_GFT_VLAN_SELECT
 };
 
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+	struct regpair temp[4];
+};
+
+struct e4_ystorm_rdma_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 msem_ctx_upd_seq;
+	u8 flags0;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK			0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT			6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK			0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT			7
+	u8 flags1;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT		0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT		2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK	0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT	4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT		6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT		7
+	u8 flags2;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT		0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 key;
+	__le32 mw_cnt;
+	u8 ref_cnt_seq;
+	u8 ctx_upd_seq;
+	__le16 dif_flags;
+	__le16 tx_ref_count;
+	__le16 last_used_ltid;
+	__le16 parent_mr_lo;
+	__le16 parent_mr_hi;
+	__le32 fbo_lo;
+	__le32 fbo_hi;
+};
+
+struct e4_mstorm_rdma_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK			0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT			6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK			0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT			7
+	u8 flags1;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT	0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT	2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT	4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT	6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT	7
+	u8 flags2;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT		0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 key;
+	__le32 mw_cnt;
+	u8 ref_cnt_seq;
+	u8 ctx_upd_seq;
+	__le16 dif_flags;
+	__le16 tx_ref_count;
+	__le16 last_used_ltid;
+	__le16 parent_mr_lo;
+	__le16 parent_mr_hi;
+	__le32 fbo_lo;
+	__le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
 struct mstorm_rdma_task_st_ctx {
 	struct regpair temp[4];
 };
 
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+	struct regpair temp[2];
+};
+
+struct e4_ustorm_rdma_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT		5
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK	0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT	6
+	u8 flags1;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK	0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK		0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT		2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK			0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT			4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK		0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT		6
+	u8 flags2;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK	0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT		1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT		2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT	4
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT		5
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT		6
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT		7
+	u8 flags3;
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT	3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK	0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT	4
+	__le32 dif_err_intervals;
+	__le32 dif_error_1st_interval;
+	__le32 reg2;
+	__le32 dif_runt_value;
+	__le32 reg4;
+	__le32 reg5;
+};
+
+/* RDMA task context */
+struct e4_rdma_task_context {
+	struct ystorm_rdma_task_st_ctx ystorm_st_context;
+	struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+	struct tdif_task_context tdif_context;
+	struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+	struct mstorm_rdma_task_st_ctx mstorm_st_context;
+	struct rdif_task_context rdif_context;
+	struct ustorm_rdma_task_st_ctx ustorm_st_context;
+	struct regpair ustorm_st_padding[2];
+	struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+/* rdma function init ramrod data */
 struct rdma_close_func_ramrod_data {
 	u8 cnq_start_offset;
 	u8 num_cnqs;
@@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data {
 	u8 reserved[4];
 };
 
+/* rdma function init CNQ parameters */
 struct rdma_cnq_params {
 	__le16 sb_num;
 	u8 sb_index;
@@ -6179,6 +6903,7 @@ struct rdma_cnq_params {
 	u8 reserved1[6];
 };
 
+/* rdma create cq ramrod data */
 struct rdma_create_cq_ramrod_data {
 	struct regpair cq_handle;
 	struct regpair pbl_addr;
@@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data {
 	__le16 reserved1;
 };
 
+/* rdma deregister tid ramrod data */
 struct rdma_deregister_tid_ramrod_data {
 	__le32 itid;
 	__le32 reserved;
 };
 
+/* rdma destroy cq output params */
 struct rdma_destroy_cq_output_params {
 	__le16 cnq_num;
 	__le16 reserved0;
 	__le32 reserved1;
 };
 
+/* rdma destroy cq ramrod data */
 struct rdma_destroy_cq_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* RDMA slow path EQ cmd IDs */
 enum rdma_event_opcode {
 	RDMA_EVENT_UNUSED,
 	RDMA_EVENT_FUNC_INIT,
@@ -6223,6 +6952,7 @@ enum rdma_event_opcode {
 	MAX_RDMA_EVENT_OPCODE
 };
 
+/* RDMA FW return code for slow path ramrods */
 enum rdma_fw_return_code {
 	RDMA_RETURN_OK = 0,
 	RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
@@ -6232,20 +6962,24 @@ enum rdma_fw_return_code {
 	MAX_RDMA_FW_RETURN_CODE
 };
 
+/* rdma function init header */
 struct rdma_init_func_hdr {
 	u8 cnq_start_offset;
 	u8 num_cnqs;
 	u8 cq_ring_mode;
 	u8 vf_id;
 	u8 vf_valid;
-	u8 reserved[3];
+	u8 relaxed_ordering;
+	u8 reserved[2];
 };
 
+/* rdma function init ramrod data */
 struct rdma_init_func_ramrod_data {
 	struct rdma_init_func_hdr params_header;
 	struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
 };
 
+/* RDMA ramrod command IDs */
 enum rdma_ramrod_cmd_id {
 	RDMA_RAMROD_UNUSED,
 	RDMA_RAMROD_FUNC_INIT,
@@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id {
 	MAX_RDMA_RAMROD_CMD_ID
 };
 
+/* rdma register tid ramrod data */
 struct rdma_register_tid_ramrod_data {
 	__le16 flags;
 #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK	0x1F
 #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT	0
 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT	5
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT	6
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT	7
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT	8
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK	0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT		6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT		7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT		8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK		0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT	9
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT	10
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT	11
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT	12
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT		11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT		12
 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT	13
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK	0x3
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT	14
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK		0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT		14
 	u8 flags1;
 #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK	0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK	0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT	5
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT	0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK		0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT		5
 	u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT	0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT		0
 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT	1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK	0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT	2
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK		0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT		2
 	u8 key;
 	u8 length_hi;
 	u8 vf_id;
@@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data {
 	__le32 reserved4[2];
 };
 
+/* rdma resize cq output params */
 struct rdma_resize_cq_output_params {
 	__le32 old_cq_cons;
 	__le32 old_cq_prod;
 };
 
+/* rdma resize cq ramrod data */
 struct rdma_resize_cq_ramrod_data {
 	u8 flags;
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK        0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT       0
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK  0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK          0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT         2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK		0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT		0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK	0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT	1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK		0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT		2
 	u8 pbl_log_page_size;
 	__le16 pbl_num_pages;
 	__le32 max_cqes;
@@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* The rdma storm context of Mstorm */
 struct rdma_srq_context {
 	struct regpair temp[8];
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_create_ramrod_data {
 	struct regpair pbl_base_addr;
 	__le16 pages_in_srq_pbl;
@@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data {
 	struct regpair producers_addr;
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_destroy_ramrod_data {
 	struct rdma_srq_id srq_id;
 	__le32 reserved;
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_modify_ramrod_data {
 	struct rdma_srq_id srq_id;
 	__le32 wqe_limit;
 };
 
-struct ystorm_rdma_task_st_ctx {
-	struct regpair temp[4];
-};
-
-struct ystorm_rdma_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 msem_ctx_upd_seq;
-	u8 flags0;
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT           6
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
-	u8 flags1;
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK       0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT      4
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
-	u8 flags2;
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT            0
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
-	u8 key;
-	__le32 mw_cnt;
-	u8 ref_cnt_seq;
-	u8 ctx_upd_seq;
-	__le16 dif_flags;
-	__le16 tx_ref_count;
-	__le16 last_used_ltid;
-	__le16 parent_mr_lo;
-	__le16 parent_mr_hi;
-	__le32 fbo_lo;
-	__le32 fbo_hi;
-};
-
-struct mstorm_rdma_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT            6
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
-	u8 flags1;
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT             4
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
-	u8 flags2;
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT           0
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
-	u8 key;
-	__le32 mw_cnt;
-	u8 ref_cnt_seq;
-	u8 ctx_upd_seq;
-	__le16 dif_flags;
-	__le16 tx_ref_count;
-	__le16 last_used_ltid;
-	__le16 parent_mr_lo;
-	__le16 parent_mr_hi;
-	__le32 fbo_lo;
-	__le32 fbo_hi;
-};
-
-struct ustorm_rdma_task_st_ctx {
-	struct regpair temp[2];
-};
-
-struct ustorm_rdma_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK         0xF
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT        0
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK            0x1
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT           4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK          0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT         5
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK     0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT    6
-	u8 flags1;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK   0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT  0
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK           0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT          2
-#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK                     0x3
-#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT                    4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK            0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT           6
-	u8 flags2;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK  0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK               0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT              1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK               0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT              2
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK                   0x1
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT                  3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK         0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT        4
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT                5
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT                6
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT                7
-	u8 flags3;
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT                0
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT                1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT                2
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT                3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK          0xF
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT         4
-	__le32 dif_err_intervals;
-	__le32 dif_error_1st_interval;
-	__le32 reg2;
-	__le32 dif_runt_value;
-	__le32 reg4;
-	__le32 reg5;
-};
-
-struct rdma_task_context {
-	struct ystorm_rdma_task_st_ctx ystorm_st_context;
-	struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
-	struct tdif_task_context tdif_context;
-	struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
-	struct mstorm_rdma_task_st_ctx mstorm_st_context;
-	struct rdif_task_context rdif_context;
-	struct ustorm_rdma_task_st_ctx ustorm_st_context;
-	struct regpair ustorm_st_padding[2];
-	struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
-};
-
+/* RDMA Tid type enumeration (for register_tid ramrod) */
 enum rdma_tid_type {
 	RDMA_TID_REGISTERED_MR,
 	RDMA_TID_FMR,
@@ -6556,214 +7108,214 @@ enum rdma_tid_type {
 	MAX_RDMA_TID_TYPE
 };
 
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT     0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT             1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT             5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT             6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT             7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT             1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT            2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT            3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT            4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT     5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT     7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT              4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT              6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT              4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK       0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT      6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK         0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT        4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT            7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT            0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT            1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT            2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT            3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT            4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT            7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT           0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT           1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT           2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT           3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT           4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT           5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT           6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT           7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT	1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT	6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT           0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT           1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT           2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT           3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT     4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT           5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT          6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT          7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT		7
 	u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT          0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT          1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT          2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT          3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT          4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT          5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT     6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT          7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT	6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT         0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT         1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT         4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT         5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT         6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT         7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT         0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT         1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT     4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT     5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT     6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT     7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT	6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK         0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT        0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT            1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK      0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT         4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK  0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT		6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
 	__le32 reg4;
 };
 
-struct mstorm_rdma_conn_ag_ctx {
+struct e4_mstorm_rdma_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct tstorm_rdma_conn_ag_ctx {
+struct e4_tstorm_rdma_conn_ag_ctx {
 	u8 reserved0;
 	u8 byte1;
 	u8 flags0;
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT                 1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT                 2
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT                 3
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT                 4
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT                 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT                  6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT		3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT		6
 	u8 flags1;
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT                  0
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT                  2
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT			0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK	0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT	4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK		0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT		6
 	u8 flags2;
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK       0x3
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT      0
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT                  2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT                  4
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT                  6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK		0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT	0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT			4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT			6
 	u8 flags3;
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT                  0
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK                  0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT                 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT                4
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT                5
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT                6
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT			0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT			4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT			5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT			6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK	0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       0
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK    0x1
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT   1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT                2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT                3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT                4
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT                5
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK                0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT               6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT              7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK	0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT	1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT			3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT			4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT			5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT			6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT		7
 	u8 flags5;
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT              0
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT              1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT              2
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT              3
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT              4
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT              5
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT              6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT              7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct tstorm_rdma_task_ag_ctx {
+struct e4_tstorm_rdma_task_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	__le16 word0;
 	u8 flags0;
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK  0xF
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT    4
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT    5
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT    6
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT    7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK		0xF
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT		6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT		7
 	u8 flags1;
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT    0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT    1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT     2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT     4
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT     6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT	1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT	6
 	u8 flags2;
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT     0
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT     2
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT     4
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT     6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT	6
 	u8 flags3;
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT     0
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT   2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT   3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT   4
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT   5
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT   6
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT   7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT	3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT	5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT	6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT   0
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT   1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT		0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT		1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT	3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	7
 	u8 byte2;
 	__le16 word1;
 	__le32 reg0;
@@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx {
 	__le32 reg2;
 };
 
-struct ustorm_rdma_conn_ag_ctx {
+struct e4_ustorm_rdma_conn_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK             0x1
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT            1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK      0x3
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT     2
-#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT             4
-#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT             6
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT		4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT		6
 	u8 flags1;
-#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT             0
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK     0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT    2
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK        0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT       4
-#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT             6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT		0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK	0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT	2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK	0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT	4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT		6
 	u8 flags2;
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK   0x1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT  0
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT           1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT           2
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT           3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK  0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK     0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT    5
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT           6
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK         0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT        7
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT			1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT	4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT		5
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT			6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT		7
 	u8 flags3;
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT           0
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT         1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT         2
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT         3
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT         4
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT         5
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT         6
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT         7
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT		0
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 conn_dpi;
@@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_rdma_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT             1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT             5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT             6
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT             7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT             1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT            2
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT     5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT              4
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT              6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT              4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT        4
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT            7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT            0
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT            1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT            2
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT            3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT            4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT            7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT		3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT           0
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT           1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT           2
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT           3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT           4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT           5
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT           6
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT           7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT           0
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT           1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT           2
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT           3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT          6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT          7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT		3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	7
 	u8 flags11;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT          0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT          1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT          2
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT          7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT         1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT         7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK         0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT        0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT            1
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK      0x3
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT         4
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK  0x1
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK			0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT			1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK			0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT			6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx {
 	__le32 reg6;
 };
 
-struct ystorm_rdma_conn_ag_ctx {
+struct e4_ystorm_rdma_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx {
 	__le32 reg3;
 };
 
-struct mstorm_roce_conn_st_ctx {
-	struct regpair temp[6];
+/* The roce storm context of Ystorm */
+struct ystorm_roce_conn_st_ctx {
+	struct regpair temp[2];
 };
 
+/* The roce storm context of Mstorm */
 struct pstorm_roce_conn_st_ctx {
 	struct regpair temp[16];
 };
 
-struct ystorm_roce_conn_st_ctx {
-	struct regpair temp[2];
-};
-
+/* The roce storm context of Xstorm */
 struct xstorm_roce_conn_st_ctx {
 	struct regpair temp[24];
 };
 
+/* The roce storm context of Tstorm */
 struct tstorm_roce_conn_st_ctx {
 	struct regpair temp[30];
 };
 
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+	struct regpair temp[6];
+};
+
+/* The roce storm context of Ystorm */
 struct ustorm_roce_conn_st_ctx {
 	struct regpair temp[12];
 };
 
-struct roce_conn_context {
+/* roce connection context */
+struct e4_roce_conn_context {
 	struct ystorm_roce_conn_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2];
 	struct pstorm_roce_conn_st_ctx pstorm_st_context;
 	struct xstorm_roce_conn_st_ctx xstorm_st_context;
 	struct regpair xstorm_st_padding[2];
-	struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+	struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
 	struct timers_context timer_context;
-	struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
 	struct tstorm_roce_conn_st_ctx tstorm_st_context;
 	struct mstorm_roce_conn_st_ctx mstorm_st_context;
 	struct ustorm_roce_conn_st_ctx ustorm_st_context;
 	struct regpair ustorm_st_padding[2];
 };
 
+/* roce create qp requester ramrod data */
 struct roce_create_qp_req_ramrod_data {
 	__le16 flags;
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK  0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK        0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT       3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT                 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK             0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT            7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       8
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         12
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK			0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT		0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK		0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT	2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK		0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT		3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK				0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT			4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK			0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT			7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK		0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT		8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK			0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT		12
 	u8 max_ord;
 	u8 traffic_class;
 	u8 hop_limit;
@@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data {
 	__le16 dpi;
 };
 
+/* roce create qp responder ramrod data */
 struct roce_create_qp_resp_ramrod_data {
 	__le16 flags;
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK           0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT          2
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK           0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT          3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK            0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT           4
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK              0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT             5
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK  0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK	0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT	7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT                 8
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK    0x1F
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT   11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK		0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT		0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT		2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT		3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT			4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT			5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK	0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT	6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK		0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT		7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK			0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT			8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK		0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT		11
 	u8 max_ird;
 	u8 traffic_class;
 	u8 hop_limit;
@@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data {
 	__le16 dpi;
 };
 
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+	struct regpair ecn_pkt_rcv;
+	struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+	struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
 struct roce_destroy_qp_req_output_params {
 	__le32 num_bound_mw;
 	__le32 cq_prod;
 };
 
+/* RoCE destroy qp requester ramrod data */
 struct roce_destroy_qp_req_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* RoCE destroy qp responder output params */
 struct roce_destroy_qp_resp_output_params {
 	__le32 num_invalidated_mw;
 	__le32 cq_prod;
 };
 
+/* RoCE destroy qp responder ramrod data */
 struct roce_destroy_qp_resp_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* roce special events statistics */
 struct roce_events_stats {
 	__le16 silent_drops;
 	__le16 rnr_naks_sent;
@@ -7508,6 +8085,7 @@ struct roce_events_stats {
 	__le32 reserved;
 };
 
+/* ROCE slow path EQ cmd IDs */
 enum roce_event_opcode {
 	ROCE_EVENT_CREATE_QP = 11,
 	ROCE_EVENT_MODIFY_QP,
@@ -7518,6 +8096,7 @@ enum roce_event_opcode {
 	MAX_ROCE_EVENT_OPCODE
 };
 
+/* roce func init ramrod data */
 struct roce_init_func_params {
 	u8 ll2_queue_id;
 	u8 cnp_vlan_priority;
@@ -7526,42 +8105,46 @@ struct roce_init_func_params {
 	__le32 cnp_send_timeout;
 };
 
+/* roce func init ramrod data */
 struct roce_init_func_ramrod_data {
 	struct rdma_init_func_ramrod_data rdma;
 	struct roce_init_func_params roce;
 };
 
+/* roce modify qp requester ramrod data */
 struct roce_modify_qp_req_ramrod_data {
 	__le16 flags;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT     0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT     1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK  0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK            0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT           3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK   0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT  4
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK          0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT         5
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT     6
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK    0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT   7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT     8
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK              0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT             9
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT                 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK            0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT           13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT		0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT		1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT	2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT			3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT		4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT		5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT		6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT		7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT		8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT			9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK				0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT			10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT	13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK			0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT			14
 	u8 fields;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK	0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT	0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK		0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT	4
 	u8 max_ord;
 	u8 traffic_class;
 	u8 hop_limit;
@@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data {
 	__le32 ack_timeout_val;
 	__le16 mtu;
 	__le16 reserved2;
-	__le32 reserved3[3];
+	__le32 reserved3[2];
+	__le16 low_latency_phy_queue;
+	__le16 regular_latency_phy_queue;
 	__le32 src_gid[4];
 	__le32 dst_gid[4];
 };
 
+/* roce modify qp responder ramrod data */
 struct roce_modify_qp_resp_ramrod_data {
 	__le16 flags;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK        0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT       0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK             0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT            1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK             0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT            2
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK              0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT             3
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK              0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT             4
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK     0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT    5
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK            0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT           6
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK                0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT               7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK  0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK        0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT       9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK              0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT             10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT		0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT		1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT		2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT			3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT			4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT	5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT		6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT			7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK	0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT	8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT		9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK	0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT	10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK			0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT			11
 	u8 fields;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK                    0x7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT                   0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK      0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT     3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK		0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT		0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK	0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT	3
 	u8 max_ird;
 	u8 traffic_class;
 	u8 hop_limit;
 	__le16 p_key;
 	__le32 flow_label;
 	__le16 mtu;
-	__le16 reserved2;
+	__le16 low_latency_phy_queue;
+	__le16 regular_latency_phy_queue;
+	u8 reserved2[6];
 	__le32 src_gid[4];
 	__le32 dst_gid[4];
 };
 
+/* RoCE query qp requester output params */
 struct roce_query_qp_req_output_params {
 	__le32 psn;
 	__le32 flags;
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK          0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT         0
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK  0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK        0x3FFFFFFF
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT       2
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK		0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT		0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK	0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT	1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK		0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT		2
 };
 
+/* RoCE query qp requester ramrod data */
 struct roce_query_qp_req_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* RoCE query qp responder output params */
 struct roce_query_qp_resp_output_params {
 	__le32 psn;
 	__le32 err_flag;
@@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params {
 #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
 };
 
+/* RoCE query qp responder ramrod data */
 struct roce_query_qp_resp_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* ROCE ramrod command IDs */
 enum roce_ramrod_cmd_id {
 	ROCE_RAMROD_CREATE_QP = 11,
 	ROCE_RAMROD_MODIFY_QP,
@@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id {
 	MAX_ROCE_RAMROD_CMD_ID
 };
 
-struct mstorm_roce_req_conn_ag_ctx {
+struct e4_mstorm_roce_req_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct mstorm_roce_resp_conn_ag_ctx {
+struct e4_mstorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct tstorm_roce_req_conn_ag_ctx {
+struct e4_tstorm_roce_req_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK            0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT           1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT       2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK                        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT                       3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK                0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT               4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK                  0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT                 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK                    0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT                   6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT		0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT		1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT	2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT			3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT			5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK			0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT			6
 	u8 flags1;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                         0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                        0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK                 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT                2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK           0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT          4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK                 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT                6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK				0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT			0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK			0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT		2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK		0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK			0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT		6
 	u8 flags2;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK             0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT            0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK                0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT               2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK           0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT          4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK               0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT              6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT	0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT	2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT	4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT	6
 	u8 flags3;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK     0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT    0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK       0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT      2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK                 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT                4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK                       0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT                      5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT             6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT       7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT	0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT	2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT			5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT		6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT             0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK          0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT         1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK             0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT            2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT       3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK            0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT           4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK  0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK    0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT   6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT                    7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT		1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT		2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT	3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT	5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT	6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags5;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT                    0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT                    1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT                    2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT                    3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT                    4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT             5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT                    6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT                    7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT	5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 reg0;
 	__le32 snd_nxt_psn;
 	__le32 snd_max_psn;
@@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx {
 	u8 byte4;
 	u8 byte5;
 	__le16 snd_sq_cons;
-	__le16 word2;
+	__le16 conn_dpi;
 	__le16 word3;
 	__le32 reg9;
 	__le32 reg10;
 };
 
-struct tstorm_roce_resp_conn_ag_ctx {
+struct e4_tstorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK  0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT               2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT               3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK        0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT       4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT               5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT                6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK	0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT	1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT			2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT			3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT			5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK			0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT			6
 	u8 flags1;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT        0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT        2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT                4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT	0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT	2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags2;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK     0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT    0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT                2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT                4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT                6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT	0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT		2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT		6
 	u8 flags3;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT                0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK                0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT               2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT              4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT     6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT              7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT		2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT	5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT	6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT		7
 	u8 flags4;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK  0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT              2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT              3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT              4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT              5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK              0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT             6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT            7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT	1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT			2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT			3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT			4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT			5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT			6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags5;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT            0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT            1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT            2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT            3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT            4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK          0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT         5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT            6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT            7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT	5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 psn_and_rxmit_id_echo;
 	__le32 reg1;
 	__le32 reg2;
@@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct ustorm_roce_req_conn_ag_ctx {
+struct e4_ustorm_roce_req_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT	3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT	5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT	6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct ustorm_roce_resp_conn_ag_ctx {
+struct e4_ustorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT	3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT	5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT	6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct xstorm_roce_req_conn_ag_ctx {
+struct e4_xstorm_roce_req_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT          1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT          2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT          4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT          5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT          6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT          7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT		7
 	u8 flags1;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT          0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT          1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT              2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT              3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT              4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT              5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK         0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT        6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT       7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT                0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT                4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT                6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT        0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT        2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT                0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT                2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT               4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT		6
 	u8 flags6;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT               4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK           0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT          4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT              6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT              7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	7
 	u8 flags8;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT              0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT              1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT     2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT    4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT              6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT              7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT             0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT             1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT             2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT             3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT    4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT             5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT             6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT             7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT		7
 	u8 flags10;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT             0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT             1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT             2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT             3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT             5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT            6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT            7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT		7
 	u8 flags11;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT            0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT            1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT            2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT            3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT            4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK  0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT       6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT            7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK          0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT         0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT           1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK   0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT  4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT           5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK   0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT  6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT    7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT           0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT           1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT       5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT       6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT       7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT     0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT              1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT           4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK    0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT   5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT		6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx {
 	__le32 orq_cons;
 };
 
-struct xstorm_roce_resp_conn_ag_ctx {
+struct e4_xstorm_roce_resp_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT        1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT        2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT        4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT        5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT        6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT        7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT		7
 	u8 flags1;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT        0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT        1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT            2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT            5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK       0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT      6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT              0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT              2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT              4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT              6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK          0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK       0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT      2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK      0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK		0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT              0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT              2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT        4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT            7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	7
 	u8 flags8;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT            0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT            1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK       0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT      2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK    0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT   3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK   0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT  4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT            6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT            7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT           0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT           1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT           2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT           3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT           4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT           5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT           6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT           7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT	1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT           0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT           1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT           2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT           3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT          6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT          7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT		3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT		7
 	u8 flags11;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT          0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT          1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT          2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT          7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK  0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT         7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT            0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT            1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT            2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT            3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT            4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT            5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT	1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
-	__le16 word1;
-	__le16 irq_prod;
-	__le16 word3;
-	__le16 word4;
-	__le16 ereserved1;
+	__le16 irq_prod_shadow;
+	__le16 word2;
 	__le16 irq_cons;
+	__le16 irq_prod;
+	__le16 e5_reserved1;
+	__le16 conn_dpi;
 	u8 rxmit_opcode;
 	u8 byte4;
 	u8 byte5;
@@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx {
 	__le32 msn_and_syndrome;
 };
 
-struct ystorm_roce_req_conn_ag_ctx {
+struct e4_ystorm_roce_req_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx {
 	__le32 reg3;
 };
 
-struct ystorm_roce_resp_conn_ag_ctx {
+struct e4_ystorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
 	__le32 reg3;
 };
 
+/* Roce doorbell data */
 enum roce_flavor {
 	PLAIN_ROCE,
 	RROCE_IPV4,
@@ -8628,228 +9224,231 @@ enum roce_flavor {
 	MAX_ROCE_FLAVOR
 };
 
+/* The iwarp storm context of Ystorm */
 struct ystorm_iwarp_conn_st_ctx {
 	__le32 reserved[4];
 };
 
+/* The iwarp storm context of Pstorm */
 struct pstorm_iwarp_conn_st_ctx {
 	__le32 reserved[36];
 };
 
+/* The iwarp storm context of Xstorm */
 struct xstorm_iwarp_conn_st_ctx {
 	__le32 reserved[44];
 };
 
-struct xstorm_iwarp_conn_ag_ctx {
+struct e4_xstorm_iwarp_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT				0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT				1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT				2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT				3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT				4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT				5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT				6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
 	u8 flags2;
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT			0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT			2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT			4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	6
 	u8 flags3;
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT		0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT		6
 	u8 flags6;
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK				0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT				2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK				0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT				4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT			6
 	u8 flags7;
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT			0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT			2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT			3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT			4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT			5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT			6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT			7
 	u8 flags9;
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT			5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT			7
 	u8 flags10;
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT		1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT		3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT	7
 	u8 flags11;
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT		3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT		3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT		7
 	u8 flags14;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT		0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT		1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT	3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT		6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 physical_q1;
@@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx {
 	__le32 reg17;
 };
 
-struct tstorm_iwarp_conn_ag_ctx {
+struct e4_tstorm_iwarp_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT	3
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT	3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT	5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT		6
 	u8 flags1;
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT		0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT	2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK			0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT			6
 	u8 flags2;
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT	6
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT	2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT				4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK			0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT			5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT		6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT		7
 	u8 flags4;
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT	1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT	3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT				0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT				1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT				2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT				3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT				4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define	E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags5;
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	3
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT	7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT	5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 unaligned_nxt_seq;
@@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx {
 	__le32 last_hq_sequence;
 };
 
+/* The iwarp storm context of Tstorm */
 struct tstorm_iwarp_conn_st_ctx {
 	__le32 reserved[60];
 };
 
+/* The iwarp storm context of Mstorm */
 struct mstorm_iwarp_conn_st_ctx {
 	__le32 reserved[32];
 };
 
+/* The iwarp storm context of Ustorm */
 struct ustorm_iwarp_conn_st_ctx {
 	__le32 reserved[24];
 };
 
-struct iwarp_conn_context {
+/* iwarp connection context */
+struct e4_iwarp_conn_context {
 	struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2];
 	struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
 	struct regpair xstorm_st_padding[2];
-	struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+	struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
 	struct timers_context timer_context;
-	struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
 	struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
 	struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
 	struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
 };
 
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
 struct iwarp_create_qp_ramrod_data {
 	u8 flags;
 #define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT	1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT	2
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT	3
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT	4
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT	5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK	0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT	6
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT	0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT		1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT		2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT		3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT		4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT		5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK		0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT		6
 	u8 reserved1;
 	__le16 pd;
 	__le16 sq_num_pages;
@@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data {
 	u8 reserved2[6];
 };
 
+/* iWARP completion queue types */
 enum iwarp_eqe_async_opcode {
 	IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
 	IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
@@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion {
 	u8 reserved[5];
 };
 
+/* iWARP completion queue types */
 enum iwarp_eqe_sync_opcode {
 	IWARP_EVENT_TYPE_TCP_OFFLOAD =
 	11,
@@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode {
 	MAX_IWARP_EQE_SYNC_OPCODE
 };
 
+/* iWARP EQE completion status */
 enum iwarp_fw_return_code {
 	IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
 	IWARP_CONN_ERROR_TCP_CONNECTION_RST,
@@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code {
 	MAX_IWARP_FW_RETURN_CODE
 };
 
+/* unaligned opaque data received from LL2 */
 struct iwarp_init_func_params {
 	u8 ll2_ooo_q_index;
 	u8 reserved1[7];
 };
 
+/* iwarp func init ramrod data */
 struct iwarp_init_func_ramrod_data {
 	struct rdma_init_func_ramrod_data rdma;
 	struct tcp_init_params tcp;
 	struct iwarp_init_func_params iwarp;
 };
 
+/* iWARP QP - possible states to transition to */
 enum iwarp_modify_qp_new_state_type {
 	IWARP_MODIFY_QP_STATE_CLOSING = 1,
-	IWARP_MODIFY_QP_STATE_ERROR =
-	2,
+	IWARP_MODIFY_QP_STATE_ERROR = 2,
 	MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
 };
 
+/* iwarp modify qp responder ramrod data */
 struct iwarp_modify_qp_ramrod_data {
 	__le16 transition_to_state;
 	__le16 flags;
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT	0
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT	1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT	2
-#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK	0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK		0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT		0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK		0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT		1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK		0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT		2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK		0x1
 #define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT	3
 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK	0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT	5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT	4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK		0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT		5
 	__le32 reserved3[3];
 	__le32 reserved4[8];
 };
 
+/* MPA params for Enhanced mode */
 struct mpa_rq_params {
 	__le32 ird;
 	__le32 ord;
 };
 
+/* MPA host Address-Len for private data */
 struct mpa_ulp_buffer {
 	struct regpair addr;
 	__le16 len;
 	__le16 reserved[3];
 };
 
+/* iWARP MPA offload params common to Basic and Enhanced modes */
 struct mpa_outgoing_params {
 	u8 crc_needed;
 	u8 reject;
@@ -9181,6 +9794,9 @@ struct mpa_outgoing_params {
 	struct mpa_ulp_buffer outgoing_ulp_buffer;
 };
 
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
 struct iwarp_mpa_offload_ramrod_data {
 	struct mpa_outgoing_params common;
 	__le32 tcp_cid;
@@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data {
 	u8 tcp_connect_side;
 	u8 rtr_pref;
 #define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK	0x7
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK	0x1F
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT	3
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT	0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK		0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT		3
 	u8 reserved2;
 	struct mpa_ulp_buffer incoming_ulp_buffer;
 	struct regpair async_eqe_output_buf;
 	struct regpair handle_for_async;
 	struct regpair shared_queue_addr;
+	__le16 rcv_wnd;
 	u8 stats_counter_id;
-	u8 reserved3[15];
+	u8 reserved3[13];
 };
 
+/* iWARP TCP connection offload params passed by driver to FW */
 struct iwarp_offload_params {
 	struct mpa_ulp_buffer incoming_ulp_buffer;
 	struct regpair async_eqe_output_buf;
@@ -9211,22 +9829,24 @@ struct iwarp_offload_params {
 	u8 reserved[10];
 };
 
+/* iWARP query QP output params */
 struct iwarp_query_qp_output_params {
 	__le32 flags;
 #define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK	0x1
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT	0
 #define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK	0x7FFFFFFF
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT	1
 	u8 reserved1[4];
 };
 
+/* iWARP query QP ramrod data */
 struct iwarp_query_qp_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* iWARP Ramrod Command IDs */
 enum iwarp_ramrod_cmd_id {
-	IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
-	11,
+	IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
 	IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 	IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
 	IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id {
 	MAX_IWARP_RAMROD_CMD_ID
 };
 
+/* Per PF iWARP retransmit path statistics */
 struct iwarp_rxmit_stats_drv {
 	struct regpair tx_go_to_slow_start_event_cnt;
 	struct regpair tx_fast_retransmit_event_cnt;
 };
 
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
 struct iwarp_tcp_offload_ramrod_data {
 	struct iwarp_offload_params iwarp;
 	struct tcp_offload_params_opt2 tcp;
 };
 
+/* iWARP MPA negotiation types */
 enum mpa_negotiation_mode {
 	MPA_NEGOTIATION_TYPE_BASIC = 1,
 	MPA_NEGOTIATION_TYPE_ENHANCED = 2,
 	MAX_MPA_NEGOTIATION_MODE
 };
 
+/* iWARP MPA Enhanced mode RTR types */
 enum mpa_rtr_type {
 	MPA_RTR_TYPE_NONE = 0,
 	MPA_RTR_TYPE_ZERO_SEND = 1,
@@ -9264,113 +9890,114 @@ enum mpa_rtr_type {
 	MAX_MPA_RTR_TYPE
 };
 
+/* unaligned opaque data received from LL2 */
 struct unaligned_opaque_data {
 	__le16 first_mpa_offset;
 	u8 tcp_payload_offset;
 	u8 flags;
 #define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK	0x1
-#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK	0x1
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT	1
-#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK	0x3F
-#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT	2
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT	0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK		0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT		1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK			0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT			2
 	__le32 cid;
 };
 
-struct mstorm_iwarp_conn_ag_ctx {
+struct e4_mstorm_iwarp_conn_ag_ctx {
 	u8 reserved;
 	u8 state;
 	u8 flags0;
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK	0x3
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT	2
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT		0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK			0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT			1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK	0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT	2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT			4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT			6
 	u8 flags1;
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	2
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	3
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT	4
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	5
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT	6
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	7
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK	0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT	0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT			1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT		3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT		4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT		5
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT		6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT		7
 	__le16 rcq_cons;
 	__le16 rcq_cons_th;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct ustorm_iwarp_conn_ag_ctx {
+struct e4_ustorm_iwarp_conn_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
-#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT		2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT		4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT		6
 	u8 flags1;
-#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT	4
-#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT		0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK	0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT	2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK	0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT	4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT		6
 	u8 flags2;
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT	3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT	5
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT	6
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT	7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT			0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT			1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT	4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT		5
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT			6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT		7
 	u8 flags3;
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	3
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	4
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	5
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	6
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT	7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT		0
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct ystorm_iwarp_conn_ag_ctx {
+struct e4_ystorm_iwarp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT	0
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	2
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	0
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	2
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx {
 	__le32 reg3;
 };
 
+/* The fcoe storm context of Ystorm */
 struct ystorm_fcoe_conn_st_ctx {
 	u8 func_mode;
 	u8 cos;
@@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx {
 	struct regpair reserved;
 	__le16 min_frame_size;
 	u8 protection_info_flags;
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK               0x1
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT              1
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK           0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT          2
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK		0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT	0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK			0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT			1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK			0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT			2
 	u8 dst_protection_per_mss;
 	u8 src_protection_per_mss;
 	u8 ptu_log_page_size;
 	u8 flags;
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    0
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    1
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK	0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT	0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK	0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT	1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK		0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT		2
 	u8 fcp_xfer_size;
 };
 
+/* FCoE 16-bits vlan structure */
 struct fcoe_vlan_fields {
 	__le16 fields;
-#define FCOE_VLAN_FIELDS_VID_MASK  0xFFF
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI_MASK  0x1
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI_MASK  0x7
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+#define FCOE_VLAN_FIELDS_VID_MASK	0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT	0
+#define FCOE_VLAN_FIELDS_CLI_MASK	0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT	12
+#define FCOE_VLAN_FIELDS_PRI_MASK	0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT	13
 };
 
+/* FCoE 16-bits vlan union */
 union fcoe_vlan_field_union {
 	struct fcoe_vlan_fields fields;
 	__le16 val;
 };
 
+/* FCoE 16-bits vlan, vif union */
 union fcoe_vlan_vif_field_union {
 	union fcoe_vlan_field_union vlan;
 	__le16 vif;
 };
 
+/* Ethernet context section */
 struct pstorm_fcoe_eth_context_section {
 	u8 remote_addr_3;
 	u8 remote_addr_2;
@@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section {
 	__le16 inner_eth_type;
 };
 
+/* The fcoe storm context of Pstorm */
 struct pstorm_fcoe_conn_st_ctx {
 	u8 func_mode;
 	u8 cos;
@@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx {
 	u8 sid_1;
 	u8 sid_0;
 	u8 flags;
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK          0x1
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT         0
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK  0x1
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    2
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK            0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT           4
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK			0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT		0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT	1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT		2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT		3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT		4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK			0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT			5
 	u8 did_2;
 	u8 did_1;
 	u8 did_0;
@@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx {
 	u8 reserved1;
 };
 
+/* The fcoe storm context of Xstorm */
 struct xstorm_fcoe_conn_st_ctx {
 	u8 func_mode;
 	u8 src_mac_index;
@@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx {
 	u8 cached_wqes_avail;
 	__le16 stat_ram_addr;
 	u8 flags;
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK             0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT            0
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK         0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT        1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK    0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT   2
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK      0x3
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT     3
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                    0x7
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT                   5
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT		0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT		1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK	0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT	2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK		0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT	3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK			0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT			5
 	u8 cached_wqes_offset;
 	u8 reserved2;
 	u8 eth_hdr_size;
@@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx {
 	u8 fcp_cmd_byte_credit;
 	u8 fcp_rsp_byte_credit;
 	__le16 protection_info;
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK         0x1
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT        0
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK      0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT     1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK                   0x1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT                  2
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK      0x1
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT     3
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK               0xF
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT              4
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK  0xFF
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT		0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT	1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK			0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT			2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT	3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK			0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT			4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK	0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT	8
 	__le16 xferq_pbl_next_index;
 	__le16 page_size;
 	u8 mid_seq;
@@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx {
 	struct fcoe_wqe cached_wqes[16];
 };
 
-struct xstorm_fcoe_conn_ag_ctx {
+struct e4_xstorm_fcoe_conn_ag_ctx {
 	u8 reserved0;
-	u8 fcoe_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT         1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT         2
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT         4
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT         5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT         6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT         7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT	7
 	u8 flags1;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT         0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT         1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT         2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT		7
 	u8 flags2;
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT               4
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT               6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT               4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT               6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT              0
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT              2
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT              0
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT              2
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK              0x3
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT             6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT	6
 	u8 flags7;
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK         0x3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT        2
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK          0x3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT         4
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT             0
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT             1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT             2
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT            0
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT            1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT            2
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT            3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT            4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT            5
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT            6
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT            7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT            0
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       2
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT        3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT            5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT        6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT        7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT	7
 	u8 flags11;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT        0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT        1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT        2
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT           3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT           4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT           5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK  0x1
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT		0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT		1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT		2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK			0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK			0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK			0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK     0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT    0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT          4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT          5
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT          6
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT          7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK  0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT		1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT		2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT		3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT		4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT		7
 	u8 flags14;
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT             0
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT             1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT             2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx {
 	__le32 reg8;
 };
 
+/* The fcoe storm context of Ustorm */
 struct ustorm_fcoe_conn_st_ctx {
 	struct regpair respq_pbl_addr;
 	__le16 num_pages_in_pbl;
@@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx {
 	u8 reserved[2];
 };
 
-struct tstorm_fcoe_conn_ag_ctx {
+struct e4_tstorm_fcoe_conn_ag_ctx {
 	u8 reserved0;
-	u8 fcoe_state;
+	u8 state;
 	u8 flags0;
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT                 1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT                 2
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT                 3
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT                 4
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT                 5
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK        0x3
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT       6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT		3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT	6
 	u8 flags1;
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          0
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT                  2
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT                  6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK		0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT		0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT			2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT			6
 	u8 flags2;
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT                  0
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT                  2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT                  4
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT                  6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT                  0
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK                  0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT                 2
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK     0x1
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT    4
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       5
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT                6
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT			0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT			2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT			6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT                0
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT                1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT                2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT                3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT                4
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT                5
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK                0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT               6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT              7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT		0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT		1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT		2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT		3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT		4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT		5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT		6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT              0
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT              1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT              2
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT              3
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT              4
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT              5
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT              6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT              7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct ustorm_fcoe_conn_ag_ctx {
+struct e4_ustorm_fcoe_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT		3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT		4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT		5
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT		6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx {
 	__le16 word3;
 };
 
+/* The fcoe storm context of Tstorm */
 struct tstorm_fcoe_conn_st_ctx {
 	__le16 stat_ram_addr;
 	__le16 rx_max_fc_payload_len;
 	__le16 e_d_tov_val;
 	u8 flags;
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK   0x1
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT  0
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK  0x1
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK     0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT    2
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK	0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT	0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK	0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT	1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK		0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT		2
 	u8 timers_cleanup_invocation_cnt;
 	__le32 reserved1[2];
-	__le32 dst_mac_address_bytes0to3;
-	__le16 dst_mac_address_bytes4to5;
+	__le32 dst_mac_address_bytes_0_to_3;
+	__le16 dst_mac_address_bytes_4_to_5;
 	__le16 ramrod_echo;
 	u8 flags1;
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK          0x3
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT         0
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK      0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT     2
-	u8 q_relative_offset;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK	0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT	0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK	0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT	2
+	u8 cq_relative_offset;
+	u8 cmdq_relative_offset;
 	u8 bdq_resource_id;
-	u8 reserved0[5];
+	u8 reserved0[4];
 };
 
-struct mstorm_fcoe_conn_ag_ctx {
+struct e4_mstorm_fcoe_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
+/* Fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
 	__le16 xfer_prod;
-	__le16 reserved1;
+	u8 num_cqs;
+	u8 reserved1;
 	u8 protection_info;
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK  0x1
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
 	u8 reserved2[2];
 };
 
+/* Non fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
 	__le16 conn_id;
 	__le16 stat_ram_addr;
@@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
 	struct regpair reserved2[3];
 };
 
+/* The fcoe storm context of Mstorm */
 struct mstorm_fcoe_conn_st_ctx {
 	struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
 	struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
 };
 
-struct fcoe_conn_context {
+/* fcoe connection context */
+struct e4_fcoe_conn_context {
 	struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
 	struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
-	struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+	struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
 	struct regpair xstorm_ag_padding[6];
 	struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
 	struct regpair ustorm_st_padding[2];
-	struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+	struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
 	struct regpair tstorm_ag_padding[2];
 	struct timers_context timer_context;
-	struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
 	struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
-	struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+	struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
 	struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
 };
 
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
 struct fcoe_conn_offload_ramrod_params {
 	struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
 };
 
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
 struct fcoe_conn_terminate_ramrod_params {
 	struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
 };
 
+/* FCoE event type */
 enum fcoe_event_type {
 	FCOE_EVENT_INIT_FUNC,
 	FCOE_EVENT_DESTROY_FUNC,
@@ -10129,10 +10780,12 @@ enum fcoe_event_type {
 	MAX_FCOE_EVENT_TYPE
 };
 
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
 struct fcoe_init_ramrod_params {
 	struct fcoe_init_func_ramrod_data init_ramrod_data;
 };
 
+/* FCoE ramrod Command IDs */
 enum fcoe_ramrod_cmd_id {
 	FCOE_RAMROD_CMD_ID_INIT_FUNC,
 	FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
@@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id {
 	MAX_FCOE_RAMROD_CMD_ID
 };
 
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
 struct fcoe_stat_ramrod_params {
 	struct fcoe_stat_ramrod_data stat_ramrod_data;
 };
 
-struct ystorm_fcoe_conn_ag_ctx {
+struct e4_ystorm_fcoe_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx {
 	__le32 reg3;
 };
 
+/* The iscsi storm connection context of Ystorm */
 struct ystorm_iscsi_conn_st_ctx {
-	__le32 reserved[4];
+	__le32 reserved[8];
 };
 
+/* Combined iSCSI and TCP storm connection of Pstorm */
 struct pstorm_iscsi_tcp_conn_st_ctx {
 	__le32 tcp[32];
 	__le32 iscsi[4];
 };
 
+/* The combined tcp and iscsi storm context of Xstorm */
 struct xstorm_iscsi_tcp_conn_st_ctx {
-	__le32 reserved_iscsi[40];
 	__le32 reserved_tcp[4];
+	__le32 reserved_iscsi[44];
 };
 
-struct xstorm_iscsi_conn_ag_ctx {
+struct e4_xstorm_iscsi_conn_ag_ctx {
 	u8 cdu_validation;
 	u8 state;
 	u8 flags0;
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT               1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT                  2
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT                  5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT                       6
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT                       7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT                       1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT                      5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT                7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT		0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT		1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT	7
 	u8 flags2;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT                        4
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK              0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT             6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK			0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	6
 	u8 flags3;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT                        4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT                        6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT                       6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT                       2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK     0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT    6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK				0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT				0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK				0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT				2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK				0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT				4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT	6
 	u8 flags6;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT                       2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT		0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT	6
 	u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK	0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK	0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                      7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT			7
 	u8 flags8;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                      0
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK           0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT          1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT                      5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT                      7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT			3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT			5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT			7
 	u8 flags9;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT                     0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT                     1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT                     2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT                     3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT                     4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK  0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT                     6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT                     7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT			7
 	u8 flags10;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK	0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK	0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT       5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT                    6
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT			1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT	3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT	7
 	u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT                  2
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT                    3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT                    4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT                    5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT               6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT                    7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK              0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT             0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT                   1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT               2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT                   4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT                   5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT                   6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT                   7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT		3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK            0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT           0
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK              0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT             1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT               2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT               4
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT               5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT               6
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT               7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT		3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT		7
 	u8 flags14;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT                      0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT                      1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK             0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT            5
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK           0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT          6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT			1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT			3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 physical_q1;
@@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx {
 	u8 byte13;
 	u8 byte14;
 	u8 byte15;
-	u8 ereserved;
+	u8 e5_reserved;
 	__le16 word11;
 	__le32 reg10;
 	__le32 reg11;
@@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx {
 	__le32 reg17;
 };
 
-struct tstorm_iscsi_conn_ag_ctx {
+struct e4_tstorm_iscsi_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT              1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT              2
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT              3
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT              4
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT              5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT		3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT		6
 	u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK	0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK	0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT		0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT		2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK			0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT			6
 	u8 flags2;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT               2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT               4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK               0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK	0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK	0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT		0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK			0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT			2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT			4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT	5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT	6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT             0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT             1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT             2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT             3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK             0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT            6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT           7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT		0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT		1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT		2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT		3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT		4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT	6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT           0
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT           1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT           2
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT           3
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT           4
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT           5
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT           6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT           7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx {
 	__le16 word0;
 };
 
-struct ustorm_iscsi_conn_ag_ctx {
+struct e4_ustorm_iscsi_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT		3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT		4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT		5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT		6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx {
 	__le16 word3;
 };
 
+/* The iscsi storm connection context of Tstorm */
 struct tstorm_iscsi_conn_st_ctx {
-	__le32 reserved[40];
+	__le32 reserved[44];
 };
 
-struct mstorm_iscsi_conn_ag_ctx {
+struct e4_mstorm_iscsi_conn_ag_ctx {
 	u8 reserved;
 	u8 state;
 	u8 flags0;
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
+/* Combined iSCSI and TCP storm connection of Mstorm */
 struct mstorm_iscsi_tcp_conn_st_ctx {
 	__le32 reserved_tcp[20];
-	__le32 reserved_iscsi[8];
+	__le32 reserved_iscsi[12];
 };
 
+/* The iscsi storm context of Ustorm */
 struct ustorm_iscsi_conn_st_ctx {
 	__le32 reserved[52];
 };
 
-struct iscsi_conn_context {
+/* iscsi connection context */
+struct e4_iscsi_conn_context {
 	struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
-	struct regpair ystorm_st_padding[2];
 	struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct pb_context xpb2_context;
 	struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
 	struct regpair xstorm_st_padding[2];
-	struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+	struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
 	struct regpair tstorm_ag_padding[2];
 	struct timers_context timer_context;
-	struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
 	struct pb_context upb_context;
 	struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
-	struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+	struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
 	struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
 	struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
 };
 
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
 struct iscsi_init_ramrod_params {
 	struct iscsi_spe_func_init iscsi_init_spe;
 	struct tcp_init_params tcp_init;
 };
 
-struct ystorm_iscsi_conn_ag_ctx {
+struct e4_ystorm_iscsi_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -11613,7 +12276,7 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK		0x000000FF
 #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT		3
 
-#define DRV_MB_PARAM_NVM_LEN_SHIFT		24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET		24
 
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT	0
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK	0x000000FF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a05feb38c6ee..fca2dbd93ad9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
 	return rc;
 }
 
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, const char *phase)
+{
+	u32 size = PAGE_SIZE / 2, val;
+	struct qed_dmae_params params;
+	int rc = 0;
+	dma_addr_t p_phys;
+	void *p_virt;
+	u32 *p_tmp;
+
+	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				    2 * size, &p_phys, GFP_KERNEL);
+	if (!p_virt) {
+		DP_NOTICE(p_hwfn,
+			  "DMAE sanity [%s]: failed to allocate memory\n",
+			  phase);
+		return -ENOMEM;
+	}
+
+	/* Fill the bottom half of the allocated memory with a known pattern */
+	for (p_tmp = (u32 *)p_virt;
+	     p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+		/* Save the address itself as the value */
+		val = (u32)(uintptr_t)p_tmp;
+		*p_tmp = val;
+	}
+
+	/* Zero the top half of the allocated memory */
+	memset((u8 *)p_virt + size, 0, size);
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_SP,
+		   "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+		   phase,
+		   (u64)p_phys,
+		   p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+	memset(&params, 0, sizeof(params));
+	rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+				size / 4 /* size_in_dwords */, &params);
+	if (rc) {
+		DP_NOTICE(p_hwfn,
+			  "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+			  phase, rc);
+		goto out;
+	}
+
+	/* Verify that the top half of the allocated memory has the pattern */
+	for (p_tmp = (u32 *)((u8 *)p_virt + size);
+	     p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+		/* The corresponding address in the bottom half */
+		val = (u32)(uintptr_t)p_tmp - size;
+
+		if (*p_tmp != val) {
+			DP_NOTICE(p_hwfn,
+				  "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+				  phase,
+				  (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+				  p_tmp, *p_tmp, val);
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+out:
+	dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+	return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index f2505c691c26..8db2839a8ec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -299,4 +299,8 @@ union qed_qm_pq_params {
 
 int qed_init_fw_data(struct qed_dev *cdev,
 		     const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, const char *phase);
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index b069ad088269..18fb5062a83d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,6 +31,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/crc8.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -40,102 +41,197 @@
 #include "qed_init_ops.h"
 #include "qed_reg_addr.h"
 
+#define CDU_VALIDATION_DEFAULT_CFG	61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+	{400, 336, 352, 304, 304, 384, 416, 352},	/* region 3 offsets */
+	{528, 496, 416, 448, 448, 512, 544, 480},	/* region 4 offsets */
+	{608, 544, 496, 512, 576, 592, 624, 560}	/* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+	{240, 240, 112, 0, 0, 0, 0, 96}	/* region 1 offsets */
+};
+
 /* General constants */
 #define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
 							QM_PQ_ELEMENT_SIZE, \
 							0x1000) : 0)
 #define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
 								0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID                        0xffff
+#define QM_INVALID_PQ_ID		0xffff
+
 /* Feature enable */
-#define QM_BYPASS_EN                            1
-#define QM_BYTE_CRD_EN                          1
+#define QM_BYPASS_EN	1
+#define QM_BYTE_CRD_EN	1
+
 /* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF                     4
+#define QM_OTHER_PQS_PER_PF	4
+
 /* WFQ constants */
-#define QM_WFQ_UPPER_BOUND		62500000
-#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
-#define QM_WFQ_VP_PQ_PF_SHIFT           5
-#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL                      43750000
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND	62500000
+
+/* Bit  of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT	0
+
+/* Bit  of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT	5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight)	((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL	((QM_WFQ_UPPER_BOUND * 7) / 10)
 
 /* RL constants */
-#define QM_RL_UPPER_BOUND                       62500000
-#define QM_RL_PERIOD                            5               /* in us */
-#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL                       43750000
-#define QM_RL_INC_VAL(rate)		max_t(u32,	\
-					      (u32)(((rate ? rate : \
-						      1000000) *    \
-						     QM_RL_PERIOD * \
-						     101) / (8 * 100)), 1)
+
+/* Period in us */
+#define QM_RL_PERIOD	5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M	(25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+	typeof(rate) __rate = (rate); \
+	max_t(u32, \
+	      (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+		    (8 * 100)), \
+	      1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND	62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL	((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed)	((u32)max_t(u32, \
+						    QM_RL_INC_VAL(speed), \
+						    9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed)	QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED	(QM_VP_RL_UPPER_BOUND(10000) - 1)
+
 /* AFullOprtnstcCrdMask constants */
-#define QM_OPPOR_LINE_VOQ_DEF           1
-#define QM_OPPOR_FW_STOP_DEF            0
-#define QM_OPPOR_PQ_EMPTY_DEF           1
+#define QM_OPPOR_LINE_VOQ_DEF	1
+#define QM_OPPOR_FW_STOP_DEF	0
+#define QM_OPPOR_PQ_EMPTY_DEF	1
+
 /* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES                          150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (		 \
-		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
-		(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -	 \
-		 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (	      \
-		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
-		(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
-		 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
-						   4) *		     \
-						  2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES	150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO	8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+	 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+	 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+	((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
 /* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS            38
-#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
-#define BTB_PURE_LB_FACTOR                      10
-#define BTB_PURE_LB_RATIO                       7
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS	38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS	BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR	10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO	7
+
 /* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH           32
-#define QM_STOP_CMD_ADDR                2
-#define QM_STOP_CMD_STRUCT_SIZE         2
-#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
-#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
-#define QM_STOP_CMD_PAUSE_MASK_MASK     -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET     1
-#define QM_STOP_CMD_GROUP_ID_SHIFT      16
-#define QM_STOP_CMD_GROUP_ID_MASK       15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET      1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT       24
-#define QM_STOP_CMD_PQ_TYPE_MASK        1
-#define QM_STOP_CMD_MAX_POLL_COUNT      100
-#define QM_STOP_CMD_POLL_PERIOD_US      500
+#define QM_STOP_PQ_MASK_WIDTH		32
+#define QM_STOP_CMD_ADDR		2
+#define QM_STOP_CMD_STRUCT_SIZE		2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET	0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT	0
+#define QM_STOP_CMD_PAUSE_MASK_MASK	-1
+#define QM_STOP_CMD_GROUP_ID_OFFSET	1
+#define QM_STOP_CMD_GROUP_ID_SHIFT	16
+#define QM_STOP_CMD_GROUP_ID_MASK	15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET	1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT	24
+#define QM_STOP_CMD_PQ_TYPE_MASK	1
+#define QM_STOP_CMD_MAX_POLL_COUNT	100
+#define QM_STOP_CMD_POLL_PERIOD_US	500
 
 /* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd)			cmd ## \
-	_STRUCT_SIZE
-#define QM_CMD_SET_FIELD(var, cmd, field,				  \
-			 value)        SET_FIELD(var[cmd ## _ ## field ## \
-						     _OFFSET],		  \
-						 cmd ## _ ## field,	  \
-						 value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *	\
-						   (max_phys_tcs_per_port) + \
-						   (tc))
-#define LB_VOQ(port)				( \
-		MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port)	\
-	((tc) <		\
-	 LB_TC ? PHYS_VOQ(port,		\
-			  tc,			 \
-			  max_phy_tcs_pr_port) \
-		: LB_VOQ(port))
+#define QM_CMD_STRUCT_SIZE(cmd)	cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+	SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+		  cmd ## _ ## field, \
+		  value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+			  ext_voq, wrr) \
+	do { \
+		typeof(map) __map; \
+		memset(&__map, 0, sizeof(__map)); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+			  rl_valid); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+			  vp_pq_id); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+		SET_FIELD(__map.reg, \
+			  QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+			     *((u32 *)&__map)); \
+		(map) = __map; \
+	} while (0)
+
+#define WRITE_PQ_INFO_TO_RAM	1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+	(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+	((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+	(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
 /******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+			  u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+	if (tc == PURE_LB_TC)
+		return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+	else
+		return port_id * max_phys_tcs_per_port + tc;
+}
+
 /* Prepare PF RL enable/disable runtime init values */
 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
 {
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
 	if (pf_rl_en) {
+		u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+		u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
 		/* Enable RLs for all VOQs */
-		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
-			     (1 << MAX_NUM_VOQS) - 1);
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLPFVOQENABLE_RT_OFFSET,
+			     (u32)voq_bit_mask);
+		if (num_ext_voqs >= 32)
+			STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+				     (u32)(voq_bit_mask >> 32));
+
 		/* Write RL period */
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
 		if (QM_BYPASS_EN)
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
-				     QM_RL_UPPER_BOUND);
+				     QM_PF_RL_UPPER_BOUND);
 	}
 }
 
@@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
 		if (QM_BYPASS_EN)
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
-				     QM_RL_UPPER_BOUND);
+				     QM_VP_RL_BYPASS_THRESH_SPEED);
 	}
 }
 
@@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
  * the specified VOQ.
  */
 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
-				       u8 voq, u16 cmdq_lines)
+				       u8 ext_voq, u16 cmdq_lines)
 {
-	u32 qm_line_crd;
+	u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 
-	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
-	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
 			 (u32)cmdq_lines);
-	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
-	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+		     qm_line_crd);
+	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
 		     qm_line_crd);
 }
 
@@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init(
 	u8 max_phys_tcs_per_port,
 	struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
-	u8 tc, voq, port_id, num_tcs_in_port;
+	u8 tc, ext_voq, port_id, num_tcs_in_port;
+	u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+	/* Clear PBF lines of all VOQs */
+	for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
 
-	/* Clear PBF lines for all VOQs */
-	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
-		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-		if (port_params[port_id].active) {
-			u16 phys_lines, phys_lines_per_tc;
-
-			/* find #lines to divide between active phys TCs */
-			phys_lines = port_params[port_id].num_pbf_cmd_lines -
-				     PBF_CMDQ_PURE_LB_LINES;
-			/* find #lines per active physical TC */
-			num_tcs_in_port = 0;
-			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-				if (((port_params[port_id].active_phys_tcs >>
-				      tc) & 0x1) == 1)
-					num_tcs_in_port++;
-			}
+		u16 phys_lines, phys_lines_per_tc;
 
-			phys_lines_per_tc = phys_lines / num_tcs_in_port;
-			/* init registers per active TC */
-			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-				if (((port_params[port_id].active_phys_tcs >>
-				      tc) & 0x1) != 1)
-					continue;
+		if (!port_params[port_id].active)
+			continue;
 
-				voq = PHYS_VOQ(port_id, tc,
-					       max_phys_tcs_per_port);
-				qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
-							   phys_lines_per_tc);
-			}
+		/* Find number of command queue lines to divide between the
+		 * active physical TCs. In E5, 1/8 of the lines are reserved.
+		 * the lines for pure LB TC are subtracted.
+		 */
+		phys_lines = port_params[port_id].num_pbf_cmd_lines;
+		phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+		/* Find #lines per active physical TC */
+		num_tcs_in_port = 0;
+		for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+			if (((port_params[port_id].active_phys_tcs >>
+			      tc) & 0x1) == 1)
+				num_tcs_in_port++;
+		phys_lines_per_tc = phys_lines / num_tcs_in_port;
 
-			/* init registers for pure LB TC */
-			qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
-						   PBF_CMDQ_PURE_LB_LINES);
+		/* Init registers per active TC */
+		for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+			ext_voq = qed_get_ext_voq(p_hwfn,
+						  port_id,
+						  tc, max_phys_tcs_per_port);
+			if (((port_params[port_id].active_phys_tcs >>
+			      tc) & 0x1) == 1)
+				qed_cmdq_lines_voq_rt_init(p_hwfn,
+							   ext_voq,
+							   phys_lines_per_tc);
 		}
+
+		/* Init registers for pure LB TC */
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  port_id,
+					  PURE_LB_TC, max_phys_tcs_per_port);
+		qed_cmdq_lines_voq_rt_init(p_hwfn,
+					   ext_voq, PBF_CMDQ_PURE_LB_LINES);
 	}
 }
 
@@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init(
 	struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
 	u32 usable_blocks, pure_lb_blocks, phys_blocks;
-	u8 tc, voq, port_id, num_tcs_in_port;
+	u8 tc, ext_voq, port_id, num_tcs_in_port;
 
 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-		u32 temp;
-
 		if (!port_params[port_id].active)
 			continue;
 
@@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init(
 		usable_blocks = port_params[port_id].num_btb_blocks -
 				BTB_HEADROOM_BLOCKS;
 
-		/* find blocks per physical TC */
+		/* Find blocks per physical TC. Use factor to avoid floating
+		 * arithmethic.
+		 */
 		num_tcs_in_port = 0;
-		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
 			if (((port_params[port_id].active_phys_tcs >>
 			      tc) & 0x1) == 1)
 				num_tcs_in_port++;
-		}
 
 		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
 				 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
@@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init(
 		/* Init physical TCs */
 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
 			if (((port_params[port_id].active_phys_tcs >>
-			      tc) & 0x1) != 1)
-				continue;
-
-			voq = PHYS_VOQ(port_id, tc,
-				       max_phys_tcs_per_port);
-			STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
-				     phys_blocks);
+			      tc) & 0x1) == 1) {
+				ext_voq =
+					qed_get_ext_voq(p_hwfn,
+							port_id,
+							tc,
+							max_phys_tcs_per_port);
+				STORE_RT_REG(p_hwfn,
+					     PBF_BTB_GUARANTEED_RT_OFFSET
+					     (ext_voq), phys_blocks);
+			}
 		}
 
 		/* Init pure LB TC */
-		temp = LB_VOQ(port_id);
-		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  port_id,
+					  PURE_LB_TC, max_phys_tcs_per_port);
+		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
 			     pure_lb_blocks);
 	}
 }
 
 /* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
-	struct qed_hwfn *p_hwfn,
-	struct qed_ptt *p_ptt,
-	struct qed_qm_pf_rt_init_params *p_params,
-	u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt,
+				  struct qed_qm_pf_rt_init_params *p_params,
+				  u32 base_mem_addr_4kb)
 {
-	struct init_qm_vport_params *vport_params = p_params->vport_params;
-	u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
-	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
-			    QM_PF_QUEUE_GROUP_SIZE;
-	u16 i, pq_id, pq_group;
-
-	/* A bit per Tx PQ indicating if the PQ is associated with a VF */
 	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+	struct init_qm_vport_params *vport_params = p_params->vport_params;
 	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
-	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
-	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
-	u32 mem_addr_4kb = base_mem_addr_4kb;
+	u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+	struct init_qm_pq_params *pq_params = p_params->pq_params;
+	u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+	num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+	first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+	last_pq_group = (p_params->start_pq + num_pqs - 1) /
+			QM_PF_QUEUE_GROUP_SIZE;
+
+	pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+	vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+	mem_addr_4kb = base_mem_addr_4kb;
 
 	/* Set mapping from PQ group to PF */
 	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
 		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
 			     (u32)(p_params->pf_id));
+
 	/* Set PQ sizes */
 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
 		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
@@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init(
 
 	/* Go over all Tx PQs */
 	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
-		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-			     p_params->max_phys_tcs_per_port);
-		bool is_vf_pq = (i >= p_params->num_pf_pqs);
-		struct qm_rf_pq_map tx_pq_map;
-
-		bool rl_valid = p_params->pq_params[i].rl_valid &&
-				(p_params->pq_params[i].vport_id <
-				 MAX_QM_GLOBAL_RLS);
+		u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+		u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+		struct qm_rf_pq_map_e4 tx_pq_map;
+		bool is_vf_pq, rl_valid;
+		u16 *p_first_tx_pq_id;
+
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  p_params->port_id,
+					  tc_id,
+					  p_params->max_phys_tcs_per_port);
+		is_vf_pq = (i >= p_params->num_pf_pqs);
+		rl_valid = pq_params[i].rl_valid &&
+			   pq_params[i].vport_id < max_qm_global_rls;
 
 		/* Update first Tx PQ of VPORT/TC */
-		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
-				    p_params->start_vport;
-		u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
-		u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+		vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+		p_first_tx_pq_id =
+		    &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+		if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+			u32 map_val =
+				(ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+				(p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
 
-		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
 			/* Create new VP PQ */
-			pq_ids[p_params->pq_params[i].tc_id] = pq_id;
-			first_tx_pq_id = pq_id;
+			*p_first_tx_pq_id = pq_id;
 
 			/* Map VP PQ to VOQ and PF */
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_WFQVPMAP_RT_OFFSET +
-				     first_tx_pq_id,
-				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
-				     (p_params->pf_id <<
-				      QM_WFQ_VP_PQ_PF_SHIFT));
+				     *p_first_tx_pq_id,
+				     map_val);
 		}
 
-		if (p_params->pq_params[i].rl_valid && !rl_valid)
+		/* Check RL ID */
+		if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+		    max_qm_global_rls)
 			DP_NOTICE(p_hwfn,
-				  "Invalid VPORT ID for rate limiter configuration");
-		/* Fill PQ map entry */
-		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-		SET_FIELD(tx_pq_map.reg,
-			  QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-			  rl_valid ?
-			  p_params->pq_params[i].vport_id : 0);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
-			  p_params->pq_params[i].wrr_group);
-		/* Write PQ map entry to CAM */
-		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
-			     *((u32 *)&tx_pq_map));
-		/* Set base address */
+				  "Invalid VPORT ID for rate limiter configuration\n");
+
+		/* Prepare PQ map entry */
+		QM_INIT_TX_PQ_MAP(p_hwfn,
+				  tx_pq_map,
+				  E4,
+				  pq_id,
+				  rl_valid ? 1 : 0,
+				  *p_first_tx_pq_id,
+				  rl_valid ? pq_params[i].vport_id : 0,
+				  ext_voq, pq_params[i].wrr_group);
+
+		/* Set PQ base address */
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
 			     mem_addr_4kb);
 
+		/* Clear PQ pointer table entry (64 bit) */
+		if (p_params->is_pf_loading)
+			for (j = 0; j < 2; j++)
+				STORE_RT_REG(p_hwfn,
+					     QM_REG_PTRTBLTX_RT_OFFSET +
+					     (pq_id * 2) + j, 0);
+
+		/* Write PQ info to RAM */
+		if (WRITE_PQ_INFO_TO_RAM != 0) {
+			u32 pq_info = 0;
+
+			pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+						  p_params->pf_id,
+						  tc_id,
+						  p_params->port_id,
+						  rl_valid ? 1 : 0,
+						  rl_valid ?
+						  pq_params[i].vport_id : 0);
+			qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+			       pq_info);
+		}
+
 		/* If VF PQ, add indication to PQ VF mask */
 		if (is_vf_pq) {
 			tx_pq_vf_mask[pq_id /
@@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init(
 
 /* Prepare Other PQ mapping runtime init values for the specified PF */
 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
-				     u8 port_id,
 				     u8 pf_id,
+				     bool is_pf_loading,
 				     u32 num_pf_cids,
 				     u32 num_tids, u32 base_mem_addr_4kb)
 {
 	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
-	u16 i, pq_id, pq_group;
+	u16 i, j, pq_id, pq_group;
 
-	/* a single other PQ group is used in each PF,
-	 * where PQ group i is used in PF i.
+	/* A single other PQ group is used in each PF, where PQ group i is used
+	 * in PF i.
 	 */
 	pq_group = pf_id;
 	pq_size = num_pf_cids + num_tids;
@@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
 	/* Map PQ group to PF */
 	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
 		     (u32)(pf_id));
+
 	/* Set PQ sizes */
 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
 		     QM_PQ_SIZE_256B(pq_size));
 
-	/* Set base address */
 	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
 	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+		/* Set PQ base address */
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
 			     mem_addr_4kb);
+
+		/* Clear PQ pointer table entry */
+		if (is_pf_loading)
+			for (j = 0; j < 2; j++)
+				STORE_RT_REG(p_hwfn,
+					     QM_REG_PTRTBLOTHER_RT_OFFSET +
+					     (pq_id * 2) + j, 0);
+
 		mem_addr_4kb += pq_mem_4kb;
 	}
 }
@@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 			      struct qed_qm_pf_rt_init_params *p_params)
 {
 	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-	u32 crd_reg_offset;
-	u32 inc_val;
+	struct init_qm_pq_params *pq_params = p_params->pq_params;
+	u32 inc_val, crd_reg_offset;
+	u8 ext_voq;
 	u16 i;
 
-	if (p_params->pf_id < MAX_NUM_PFS_BB)
-		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
-	else
-		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
-	crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
 	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
@@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 	}
 
 	for (i = 0; i < num_tx_pqs; i++) {
-		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-			     p_params->max_phys_tcs_per_port);
-
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  p_params->port_id,
+					  pq_params[i].tc_id,
+					  p_params->max_phys_tcs_per_port);
+		crd_reg_offset =
+			(p_params->pf_id < MAX_NUM_PFS_BB ?
+			 QM_REG_WFQPFCRD_RT_OFFSET :
+			 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+			ext_voq * MAX_NUM_PFS_BB +
+			(p_params->pf_id % MAX_NUM_PFS_BB);
 		OVERWRITE_RT_REG(p_hwfn,
-				 crd_reg_offset + voq * MAX_NUM_PFS_BB,
-				 QM_WFQ_CRD_REG_SIGN_BIT);
+				 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
 	}
 
 	STORE_RT_REG(p_hwfn,
 		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
-		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+		     QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
 		     inc_val);
+
 	return 0;
 }
 
@@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
 {
 	u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
 		return -1;
 	}
-	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
-		     QM_RL_CRD_REG_SIGN_BIT);
-	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
-		     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+	STORE_RT_REG(p_hwfn,
+		     QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+		     (u32)QM_RL_CRD_REG_SIGN_BIT);
+	STORE_RT_REG(p_hwfn,
+		     QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+		     QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
 	return 0;
 }
 
@@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
 			      u8 num_vports,
 			      struct init_qm_vport_params *vport_params)
 {
+	u16 vport_pq_id;
 	u32 inc_val;
 	u8 tc, i;
 
 	/* Go over all PF VPORTs */
 	for (i = 0; i < num_vports; i++) {
-
 		if (!vport_params[i].vport_wfq)
 			continue;
 
@@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
 			return -1;
 		}
 
-		/* each VPORT can have several VPORT PQ IDs for
-		 * different TCs
-		 */
+		/* Each VPORT can have several VPORT PQ IDs for various TCs */
 		for (tc = 0; tc < NUM_OF_TCS; tc++) {
-			u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+			vport_pq_id = vport_params[i].first_tx_pq_id[tc];
 			if (vport_pq_id != QM_INVALID_PQ_ID) {
 				STORE_RT_REG(p_hwfn,
 					     QM_REG_WFQVPCRD_RT_OFFSET +
 					     vport_pq_id,
-					     QM_WFQ_CRD_REG_SIGN_BIT);
+					     (u32)QM_WFQ_CRD_REG_SIGN_BIT);
 				STORE_RT_REG(p_hwfn,
 					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
 					     vport_pq_id, inc_val);
@@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
 	return 0;
 }
 
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 				u8 start_vport,
 				u8 num_vports,
+				u32 link_speed,
 				struct init_qm_vport_params *vport_params)
 {
 	u8 i, vport_id;
+	u32 inc_val;
 
 	if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
 		DP_NOTICE(p_hwfn,
@@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 
 	/* Go over all PF VPORTs */
 	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
-		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
-		if (inc_val > QM_RL_MAX_INC_VAL) {
+		inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+			  vport_params[i].vport_rl :
+			  link_speed);
+		if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 			DP_NOTICE(p_hwfn,
 				  "Invalid VPORT rate-limit configuration\n");
 			return -1;
 		}
 
-		STORE_RT_REG(p_hwfn,
-			     QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
-			     QM_RL_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+			     (u32)QM_RL_CRD_REG_SIGN_BIT);
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
-			     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
-		STORE_RT_REG(p_hwfn,
-			     QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+			     QM_VP_RL_UPPER_BOUND(link_speed) |
+			     (u32)QM_RL_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
 			     inc_val);
 	}
 
@@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
 {
 	u32 reg_val, i;
 
-	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
 	     i++) {
 		udelay(QM_STOP_CMD_POLL_PERIOD_US);
 		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
@@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
 }
 
 /******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
-		       u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
 		       u32 num_vf_cids,
 		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
 {
@@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id,
 	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
 }
 
-int qed_qm_common_rt_init(
-	struct qed_hwfn *p_hwfn,
-	struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+			  struct qed_qm_common_rt_init_params *p_params)
 {
-	/* init AFullOprtnstcCrdMask */
+	/* Init AFullOprtnstcCrdMask */
 	u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
 		    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
 		   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
@@ -664,18 +816,31 @@ int qed_qm_common_rt_init(
 		    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
 
 	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+	/* Enable/disable PF RL */
 	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+	/* Enable/disable PF WFQ */
 	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+	/* Enable/disable VPORT RL */
 	qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+	/* Enable/disable VPORT WFQ */
 	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+	/* Init PBF CMDQ line credit */
 	qed_cmdq_lines_rt_init(p_hwfn,
 			       p_params->max_ports_per_engine,
 			       p_params->max_phys_tcs_per_port,
 			       p_params->port_params);
+
+	/* Init BTB blocks in PBF */
 	qed_btb_blocks_rt_init(p_hwfn,
 			       p_params->max_ports_per_engine,
 			       p_params->max_phys_tcs_per_port,
 			       p_params->port_params);
+
 	return 0;
 }
 
@@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
 			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
 
 	/* Map Other PQs (if any) */
-	qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
-				 p_params->num_pf_cids, p_params->num_tids, 0);
+	qed_other_pq_map_rt_init(p_hwfn,
+				 p_params->pf_id,
+				 p_params->is_pf_loading, p_params->num_pf_cids,
+				 p_params->num_tids, 0);
 
 	/* Map Tx PQs */
 	qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
 
+	/* Init PF WFQ */
 	if (p_params->pf_wfq)
 		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
 			return -1;
 
+	/* Init PF RL */
 	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
 		return -1;
 
+	/* Set VPORT WFQ */
 	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
 		return -1;
 
+	/* Set VPORT RL */
 	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
-				 p_params->num_vports, vport_params))
+				 p_params->num_vports, p_params->link_speed,
+				 vport_params))
 		return -1;
 
 	return 0;
@@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
 	}
 
 	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
 	return 0;
 }
 
@@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
 {
 	u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
 		return -1;
 	}
 
-	qed_wr(p_hwfn, p_ptt,
-	       QM_REG_RLPFCRD + pf_id * 4,
-	       QM_RL_CRD_REG_SIGN_BIT);
+	qed_wr(p_hwfn,
+	       p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
 	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
 
 	return 0;
@@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
 	for (tc = 0; tc < NUM_OF_TCS; tc++) {
 		vport_pq_id = first_tx_pq_id[tc];
 		if (vport_pq_id != QM_INVALID_PQ_ID)
-			qed_wr(p_hwfn, p_ptt,
-			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
-			       inc_val);
+			qed_wr(p_hwfn,
+			       p_ptt,
+			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
 	}
 
 	return 0;
 }
 
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+		      struct qed_ptt *p_ptt,
+		      u8 vport_id, u32 vport_rl, u32 link_speed)
 {
-	u32 inc_val = QM_RL_INC_VAL(vport_rl);
+	u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
 
-	if (vport_id >= MAX_QM_GLOBAL_RLS) {
+	if (vport_id >= max_qm_global_rls) {
 		DP_NOTICE(p_hwfn,
 			  "Invalid VPORT ID for rate limiter configuration\n");
 		return -1;
 	}
 
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+	if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
 		return -1;
 	}
 
-	qed_wr(p_hwfn, p_ptt,
-	       QM_REG_RLGLBLCRD + vport_id * 4,
-	       QM_RL_CRD_REG_SIGN_BIT);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
 	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
 
 	return 0;
@@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 			  bool is_tx_pq, u16 start_pq, u16 num_pqs)
 {
 	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
-	u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+	u32 pq_mask = 0, last_pq, pq_id;
+
+	last_pq = start_pq + num_pqs - 1;
 
 	/* Set command's PQ type */
 	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
 
+	/* Go over requested PQs */
 	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
 		/* Set PQ bit in mask (stop command only) */
 		if (!is_release_cmd)
-			pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+			pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
 
 		/* If last PQ or end of PQ mask, write command */
 		if ((pq_id == last_pq) ||
 		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
 		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
-			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
-					 PAUSE_MASK, pq_mask);
-			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+			QM_CMD_SET_FIELD(cmd_arr,
+					 QM_STOP_CMD, PAUSE_MASK, pq_mask);
+			QM_CMD_SET_FIELD(cmd_arr,
+					 QM_STOP_CMD,
 					 GROUP_ID,
 					 pq_id / QM_STOP_PQ_MASK_WIDTH);
 			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
@@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 	return true;
 }
 
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
-	if (enable)
-		set_bit(bit, var);
-	else
-		clear_bit(bit, var);
-}
 
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+	do { \
+		typeof(var) *__p_var = &(var); \
+		typeof(offset) __offset = offset; \
+		*__p_var = (*__p_var & ~BIT(__offset)) | \
+			   ((enable) ? BIT(__offset) : 0); \
+	} while (0)
 #define PRS_ETH_TUNN_FIC_FORMAT	-188897008
 
 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
 			     struct qed_ptt *p_ptt, u16 dest_port)
 {
+	/* Update PRS register */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+	/* Update NIG register */
 	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+	/* Update PBF register */
 	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
 }
 
 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt, bool vxlan_enable)
 {
-	unsigned long reg_val = 0;
+	u32 reg_val;
 	u8 shift;
 
+	/* Update PRS register */
 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
 	if (reg_val)
-		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-		       PRS_ETH_TUNN_FIC_FORMAT);
+		qed_wr(p_hwfn,
+		       p_ptt,
+		       PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+		       (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+	/* Update NIG register */
 	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
 	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
-	       vxlan_enable ? 1 : 0);
+	/* Update DORQ register */
+	qed_wr(p_hwfn,
+	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
 }
 
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
 			bool eth_gre_enable, bool ip_gre_enable)
 {
-	unsigned long reg_val = 0;
+	u32 reg_val;
 	u8 shift;
 
+	/* Update PRS register */
 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val)
-		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-		       PRS_ETH_TUNN_FIC_FORMAT);
+		qed_wr(p_hwfn,
+		       p_ptt,
+		       PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+		       (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+	/* Update NIG register */
 	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
 	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
 	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
-	       eth_gre_enable ? 1 : 0);
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
-	       ip_gre_enable ? 1 : 0);
+	/* Update DORQ registers */
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+	qed_wr(p_hwfn,
+	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
 }
 
 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
 			      struct qed_ptt *p_ptt, u16 dest_port)
 {
+	/* Update PRS register */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+	/* Update NIG register */
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+	/* Update PBF register */
 	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
 }
 
@@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 			   struct qed_ptt *p_ptt,
 			   bool eth_geneve_enable, bool ip_geneve_enable)
 {
-	unsigned long reg_val = 0;
+	u32 reg_val;
 	u8 shift;
 
+	/* Update PRS register */
 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val)
-		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-		       PRS_ETH_TUNN_FIC_FORMAT);
+		qed_wr(p_hwfn,
+		       p_ptt,
+		       PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+		       (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+	/* Update NIG register */
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
 	       eth_geneve_enable ? 1 : 0);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-	/* EDPM with geneve tunnel not supported in BB_B0 */
+	/* EDPM with geneve tunnel not supported in BB */
 	if (QED_IS_BB_B0(p_hwfn->cdev))
 		return;
 
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+	/* Update DORQ registers */
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
 	       eth_geneve_enable ? 1 : 0);
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
 	       ip_geneve_enable ? 1 : 0);
 }
 
@@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define RAM_LINE_SIZE sizeof(u64)
 #define REG_SIZE sizeof(u32)
 
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-			      struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
 {
-	u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
-		      pf_id * RAM_LINE_SIZE;
-
-	/*stop using gft logic */
+	/* Disable gft search for PF */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
-	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+	/* Clean ram & cam for next gft session */
+
+	/* Zero camline */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
-	qed_wr(p_hwfn, p_ptt, hw_addr, 0);
-	qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+	/* Zero ramline */
+	qed_wr(p_hwfn,
+	       p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+	       0);
 }
 
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-			     u16 pf_id, bool tcp, bool udp,
-			     bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	union gft_cam_line_union camline;
-	struct gft_ram_line ramline;
 	u32 rfs_cm_hdr_event_id;
 
+	/* Set RFS event ID to be awakened i Tstorm By Prs */
 	rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+	rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+			       PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+	rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+			       PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    u16 pf_id,
+		    bool tcp,
+		    bool udp,
+		    bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+	u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
 
 	if (!ipv6 && !ipv4)
 		DP_NOTICE(p_hwfn,
-			  "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+			  "gft_config: must accept at least on of - ipv4 or ipv6'\n");
 	if (!tcp && !udp)
 		DP_NOTICE(p_hwfn,
-			  "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+			  "gft_config: must accept at least on of - udp or tcp\n");
+	if (profile_type >= MAX_GFT_PROFILE_TYPE)
+		DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
 
-	rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
-					PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
-	rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
-					PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
-	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+	/* Set RFS event ID to be awakened i Tstorm By Prs */
+	reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+		  PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+	reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
 
-	/* Configure Registers for RFS mode */
-	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+	/* Do not load context only cid in PRS on match. */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
-	camline.cam_line_mapped.camline = 0;
 
-	/* Cam line is now valid!! */
-	SET_FIELD(camline.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_VALID, 1);
+	/* Do not use tenant ID exist bit for gft search */
+	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
 
-	/* filters are per PF!! */
-	SET_FIELD(camline.cam_line_mapped.camline,
+	/* Set Cam */
+	cam_line = 0;
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+	/* Filters are per PF!! */
+	SET_FIELD(cam_line,
 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
-	SET_FIELD(camline.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
 	if (!(tcp && udp)) {
-		SET_FIELD(camline.cam_line_mapped.camline,
+		SET_FIELD(cam_line,
 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
 		if (tcp)
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
 				  GFT_PROFILE_TCP_PROTOCOL);
 		else
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
 				  GFT_PROFILE_UDP_PROTOCOL);
 	}
 
 	if (!(ipv4 && ipv6)) {
-		SET_FIELD(camline.cam_line_mapped.camline,
-			  GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+		SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
 		if (ipv4)
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_IP_VERSION,
 				  GFT_PROFILE_IPV4);
 		else
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_IP_VERSION,
 				  GFT_PROFILE_IPV6);
 	}
 
 	/* Write characteristics to cam */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
-	       camline.cam_line_mapped.camline);
-	camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
-						 PRS_REG_GFT_CAM +
-						 CAM_LINE_SIZE * pf_id);
+	       cam_line);
+	cam_line =
+	    qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
 
 	/* Write line to RAM - compare to filter 4 tuple */
-	ramline.lo = 0;
-	ramline.hi = 0;
-	SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
-	SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
-	SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-	SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-	SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
-	SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
-
-	/* Each iteration write to reg */
-	qed_wr(p_hwfn, p_ptt,
+	ram_line_lo = 0;
+	ram_line_hi = 0;
+
+	if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+	} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+	} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+	}
+
+	qed_wr(p_hwfn,
+	       p_ptt,
 	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
-	       ramline.lo);
-	qed_wr(p_hwfn, p_ptt,
-	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
-	       ramline.hi);
+	       ram_line_lo);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+	       ram_line_hi);
 
 	/* Set default profile so that no filter match will happen */
-	qed_wr(p_hwfn, p_ptt,
-	       PRS_REG_GFT_PROFILE_MASK_RAM +
-	       RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
-	       ramline.lo);
-	qed_wr(p_hwfn, p_ptt,
-	       PRS_REG_GFT_PROFILE_MASK_RAM +
-	       RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
-	       ramline.hi);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+	       PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+	       PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+	/* Enable gft search */
+	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+	const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+	u8 crc, validation_byte = 0;
+	static u8 crc8_table_valid; /* automatically initialized to 0 */
+	u32 validation_string = 0;
+	u32 data_to_crc;
+
+	if (!crc8_table_valid) {
+		crc8_populate_msb(cdu_crc8_table, 0x07);
+		crc8_table_valid = 1;
+	}
+
+	/* The CRC is calculated on the String-to-compress:
+	 * [31:8]  = {CID[31:20],CID[11:0]}
+	 * [7:4]   = Region
+	 * [3:0]   = Type
+	 */
+	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+		validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+		validation_string |= ((region & 0xF) << 4);
+
+	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+		validation_string |= (conn_type & 0xF);
+
+	/* Convert to big-endian and calculate CRC8 */
+	data_to_crc = be32_to_cpu(validation_string);
+
+	crc = crc8(cdu_crc8_table,
+		   (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+	/* The validation byte [7:0] is composed:
+	 * for type A validation
+	 * [7]          = active configuration bit
+	 * [6:0]        = crc[6:0]
+	 *
+	 * for type B validation
+	 * [7]          = active configuration bit
+	 * [6:3]        = connection_type[3:0]
+	 * [2:0]        = crc[2:0]
+	 */
+	validation_byte |=
+	    ((validation_cfg >>
+	      CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+	if ((validation_cfg >>
+	     CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+		validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+	else
+		validation_byte |= crc & 0x7F;
+
+	return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+				     u16 ctx_size, u8 ctx_type, u32 cid)
+{
+	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+	memset(p_ctx, 0, ctx_size);
+
+	*x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+	*t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+	*u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+				  u16 ctx_size, u8 ctx_type, u32 tid)
+{
+	u8 *p_ctx, *region1_val_ptr;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+	memset(p_ctx, 0, ctx_size);
+
+	*region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+	u8 x_val, t_val, u_val;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+	x_val = *x_val_ptr;
+	t_val = *t_val_ptr;
+	u_val = *u_val_ptr;
+
+	memset(p_ctx, 0, ctx_size);
+
+	*x_val_ptr = x_val;
+	*t_val_ptr = t_val;
+	*u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+	u8 *p_ctx, *region1_val_ptr;
+	u8 region1_val;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+	region1_val = *region1_val_ptr;
+
+	memset(p_ctx, 0, ctx_size);
+
+	*region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt)
+{
+	u32 ctx_validation;
+
+	/* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+	/* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+	/* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+	qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index e3f368882f46..3bb76da6baa2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
 }
 
 /* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt,
+			   struct init_callback_op *p_cmd)
 {
-	DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+	int rc;
+
+	switch (p_cmd->callback_id) {
+	case DMAE_READY_CB:
+		rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+			  p_cmd->callback_id);
+		return -EINVAL;
+	}
+
+	return rc;
 }
 
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
 			break;
 
 		case INIT_OP_CALLBACK:
-			qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+			rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
 			break;
 		}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 719cdbfe1695..d3eabcf9c86c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -59,10 +59,10 @@ struct qed_pi_info {
 };
 
 struct qed_sb_sp_info {
-	struct qed_sb_info	sb_info;
+	struct qed_sb_info sb_info;
 
 	/* per protocol index data */
-	struct qed_pi_info	pi_info_arr[PIS_PER_SB];
+	struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
 };
 
 enum qed_attention_type {
@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
 #define ATTENTION_LENGTH_SHIFT          (4)
 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
 					 ATTENTION_LENGTH_SHIFT)
-#define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE                BIT(ATTENTION_LENGTH_SHIFT)
 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
 					 ATTENTION_PARITY)
@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
 	if (IS_VF(p_hwfn->cdev))
 		return;
 
-	sb_offset = igu_sb_id * PIS_PER_SB;
+	sb_offset = igu_sb_id * PIS_PER_SB_E4;
 	memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
 
 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 5199634ed630..54b4ee0acfd7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 #define QED_SB_EVENT_MASK       0x0003
 
 #define SB_ALIGNED_SIZE(p_hwfn)	\
-	ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+	ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
 
 #define QED_SB_INVALID_IDX      0xffff
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 813c77cc857f..c0d4a54a5edb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -62,22 +62,6 @@
 #include "qed_sriov.h"
 #include "qed_reg_addr.h"
 
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
-		      u8 fw_event_code,
-		      u16 echo, union event_ring_data *data, u8 fw_return_code)
-{
-	if (p_hwfn->p_iscsi_info->event_cb) {
-		struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-
-		return p_iscsi->event_cb(p_iscsi->event_context,
-					 fw_event_code, data);
-	} else {
-		DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
-		return -EINVAL;
-	}
-}
-
 struct qed_iscsi_conn {
 	struct list_head list_entry;
 	bool free_on_delete;
@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
 	u8 local_mac[6];
 	u8 remote_mac[6];
 	u16 vlan_id;
-	u8 tcp_flags;
+	u16 tcp_flags;
 	u8 ip_version;
 	u32 remote_ip[4];
 	u32 local_ip[4];
@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
 	u32 ss_thresh;
 	u16 srtt;
 	u16 rtt_var;
-	u32 ts_time;
 	u32 ts_recent;
 	u32 ts_recent_age;
 	u32 total_rt;
@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
 	u16 mss;
 	u8 snd_wnd_scale;
 	u8 rcv_wnd_scale;
-	u32 ts_ticks_per_second;
 	u16 da_timeout_value;
 	u8 ack_frequency;
 
@@ -162,6 +144,22 @@ struct qed_iscsi_conn {
 };
 
 static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+		      u8 fw_event_code,
+		      u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+	if (p_hwfn->p_iscsi_info->event_cb) {
+		struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+		return p_iscsi->event_cb(p_iscsi->event_context,
+					 fw_event_code, data);
+	} else {
+		DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+		return -EINVAL;
+	}
+}
+
+static int
 qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
 			enum spq_mode comp_mode,
 			struct qed_spq_comp_cb *p_comp_addr,
@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
 	p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
 	p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
 	p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
-	p_init->ooo_enable = p_params->ooo_enable;
 	p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
 				  p_params->ll2_ooo_queue_id;
+
 	p_init->func_params.log_page_size = p_params->log_page_size;
 	val = p_params->num_tasks;
 	p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
 	val = p_params->tx_sws_timer;
 	p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
-	p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+	p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
 
 	p_hwfn->p_iscsi_info->event_context = event_context;
 	p_hwfn->p_iscsi_info->event_cb = async_event_cb;
@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 	int rc = 0;
 	u32 dval;
 	u16 wval;
-	u8 i;
 	u16 *p;
+	u8 i;
 
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 
 		p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
 
-		p_tcp->flags = p_conn->tcp_flags;
+		p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
 		p_tcp->ip_version = p_conn->ip_version;
 		for (i = 0; i < 4; i++) {
 			dval = p_conn->remote_ip[i];
@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 		p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
 
 		p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
-		p_tcp2->flags = p_conn->tcp_flags;
+		p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
 
 		p_tcp2->ip_version = p_conn->ip_version;
 		for (i = 0; i < 4; i++) {
@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 		p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
 		p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
 		p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+		p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+		p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+		p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+		p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+		p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
 	}
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
 	}
 }
 
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
-				      struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
 {
 	if (!p_conn->queue_cnts_virt_addr)
 		goto nomem;
@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
 		rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
 
 	if (!rc)
-		rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+		rc = qed_iscsi_setup_connection(p_conn);
 
 	if (rc) {
 		spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
 	con->ss_thresh = conn_info->ss_thresh;
 	con->srtt = conn_info->srtt;
 	con->rtt_var = conn_info->rtt_var;
-	con->ts_time = conn_info->ts_time;
 	con->ts_recent = conn_info->ts_recent;
 	con->ts_recent_age = conn_info->ts_recent_age;
 	con->total_rt = conn_info->total_rt;
@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
 	con->mss = conn_info->mss;
 	con->snd_wnd_scale = conn_info->snd_wnd_scale;
 	con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
-	con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
 	con->da_timeout_value = conn_info->da_timeout_value;
 	con->ack_frequency = conn_info->ack_frequency;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 409041eab189..ca4a81dc1ace 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
 
 #define QED_IWARP_INVALID_TCP_CID	0xffffffff
 #define QED_IWARP_RCV_WND_SIZE_DEF	(256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN	(64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
 #define TIMESTAMP_HEADER_SIZE		(12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
 
 #define QED_IWARP_TS_EN			BIT(0)
 #define QED_IWARP_DA_EN			BIT(1)
 #define QED_IWARP_PARAM_CRC_NEEDED	(1)
 #define QED_IWARP_PARAM_P2P		(1)
 
+#define QED_IWARP_DEF_MAX_RT_TIME	(0)
+#define QED_IWARP_DEF_CWND_FACTOR	(4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
+#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
+
 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
 				 u8 fw_event_code, u16 echo,
 				 union event_ring_data *data,
@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-			      struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+			 struct iwarp_init_func_ramrod_data *p_ramrod)
 {
-	p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
-				    p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+	p_ramrod->iwarp.ll2_ooo_q_index =
+		RESC_START(p_hwfn, QED_LL2_QUEUE) +
+		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+	return;
 }
 
 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 	tcp->ttl = 0x40;
 	tcp->tos_or_tc = 0;
 
+	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+	tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
+	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+	tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+	tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
 	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 	tcp->connect_mode = ep->connect_mode;
 
@@ -807,6 +826,7 @@ static int
 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 {
 	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+	struct qed_iwarp_info *iwarp_info;
 	struct qed_sp_init_data init_data;
 	dma_addr_t async_output_phys;
 	struct qed_spq_entry *p_ent;
@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 		p_mpa_ramrod->common.reject = 1;
 	}
 
+	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 	p_mpa_ramrod->mode = ep->mpa_rev;
 	SET_FIELD(p_mpa_ramrod->rtr_pref,
 		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
 	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
 	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
 	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
 	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index c1ecd743305f..b8f612d00241 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -95,6 +95,7 @@ struct qed_iwarp_info {
 	spinlock_t iw_lock;	/* for iwarp resources */
 	spinlock_t qp_lock;	/* for teardown races */
 	u32 rcv_wnd_scale;
+	u16 rcv_wnd_size;
 	u16 max_mtu;
 	u8 mac_addr[ETH_ALEN];
 	u8 crc_needed;
@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		    struct qed_rdma_start_in_params *params);
 
 void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-			      struct iwarp_init_func_params *p_ramrod);
+			      struct iwarp_init_func_ramrod_data *p_ramrod);
 
 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 085338990f49..893ef08a4b39 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
 	struct qed_queue_cid *p_cid;
 	int rc;
 
-	p_cid = vmalloc(sizeof(*p_cid));
+	p_cid = vzalloc(sizeof(*p_cid));
 	if (!p_cid)
 		return NULL;
-	memset(p_cid, 0, sizeof(*p_cid));
 
 	p_cid->opaque_fid = opaque_fid;
 	p_cid->cid = cid;
@@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
 		_qed_get_vport_stats(cdev, cdev->reset_stats);
 }
 
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-			struct qed_arfs_config_params *p_cfg_params)
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
 {
-	if (p_cfg_params->arfs_enable) {
-		qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-					p_cfg_params->tcp, p_cfg_params->udp,
-					p_cfg_params->ipv4, p_cfg_params->ipv6);
-		DP_VERBOSE(p_hwfn, QED_MSG_SP,
-			   "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+	if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+		return GFT_PROFILE_TYPE_4_TUPLE;
+	if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+		return GFT_PROFILE_TYPE_IP_DST_PORT;
+	return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt,
+			     struct qed_arfs_config_params *p_cfg_params)
+{
+	if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+		qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+			       p_cfg_params->tcp,
+			       p_cfg_params->udp,
+			       p_cfg_params->ipv4,
+			       p_cfg_params->ipv6,
+			       qed_arfs_mode_to_hsi(p_cfg_params->mode));
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_SP,
+			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
 			   p_cfg_params->tcp ? "Enable" : "Disable",
 			   p_cfg_params->udp ? "Enable" : "Disable",
 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
-			   p_cfg_params->ipv6 ? "Enable" : "Disable");
+			   p_cfg_params->ipv6 ? "Enable" : "Disable",
+			   (u32)p_cfg_params->mode);
 	} else {
-		qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+		qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
 	}
-
-	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
-		   p_cfg_params->arfs_enable ? "Enable" : "Disable");
 }
 
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
 				struct qed_spq_comp_cb *p_cb,
-				dma_addr_t p_addr, u16 length, u16 qid,
-				u8 vport_id, bool b_is_add)
+				struct qed_ntuple_filter_params *p_params)
 {
 	struct rx_update_gft_filter_data *p_ramrod = NULL;
 	struct qed_spq_entry *p_ent = NULL;
@@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 	u8 abs_vport_id = 0;
 	int rc = -EINVAL;
 
-	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	if (rc)
 		return rc;
 
-	rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
-	if (rc)
-		return rc;
+	if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+		rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+		if (rc)
+			return rc;
+	}
 
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
@@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		return rc;
 
 	p_ramrod = &p_ent->ramrod.rx_update_gft;
-	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
-	p_ramrod->pkt_hdr_length = cpu_to_le16(length);
-	p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
-	p_ramrod->vport_id = abs_vport_id;
-	p_ramrod->filter_type = RFS_FILTER_TYPE;
-	p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+	p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+	if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+		p_ramrod->rx_qid_valid = 1;
+		p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+	}
+
+	p_ramrod->flow_id_valid = 0;
+	p_ramrod->flow_id = 0;
+
+	p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+	    : GFT_DELETE_FILTER;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
 		   abs_vport_id, abs_rx_q_id,
-		   b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+		   p_params->b_is_add ? "Adding" : "Removing",
+		   (u64)p_params->addr, p_params->length);
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
@@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
 	}
 }
 
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+				       enum qed_filter_config_mode mode)
 {
 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 	struct qed_arfs_config_params arfs_config_params;
@@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
 	arfs_config_params.udp = true;
 	arfs_config_params.ipv4 = true;
 	arfs_config_params.ipv6 = true;
-	arfs_config_params.arfs_enable = en_searcher;
-
+	arfs_config_params.mode = mode;
 	qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
 				&arfs_config_params);
 	return 0;
@@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
 
 static void
 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
-			     void *cookie, union event_ring_data *data,
-			     u8 fw_return_code)
+			     void *cookie,
+			     union event_ring_data *data, u8 fw_return_code)
 {
 	struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
 	void *dev = p_hwfn->cdev->ops_cookie;
@@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
 	op->arfs_filter_op(dev, cookie, fw_return_code);
 }
 
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
-					 dma_addr_t mapping, u16 length,
-					 u16 vport_id, u16 rx_queue_id,
-					 bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+			      void *cookie,
+			      struct qed_ntuple_filter_params *params)
 {
 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 	struct qed_spq_comp_cb cb;
@@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
 	cb.function = qed_arfs_sp_response_handler;
 	cb.cookie = cookie;
 
-	rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
-					     &cb, mapping, length, rx_queue_id,
-					     vport_id, add_filter);
+	if (params->b_is_vf) {
+		if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+					   false)) {
+			DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+				params->vf_id);
+			return rc;
+		}
+
+		params->vport_id = params->vf_id + 1;
+		params->qid = QED_RFS_NTUPLE_QID_RSS;
+	}
+
+	rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
 	if (rc)
 		DP_NOTICE(p_hwfn,
 			  "Failed to issue a-RFS filter configuration\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index cc1f248551c9..c4030e949cce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
 	bool udp;
 	bool ipv4;
 	bool ipv6;
-	bool arfs_enable;
+	enum qed_filter_config_mode mode;
 };
 
 struct qed_sp_vport_update_params {
@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
 
 void qed_reset_vport_stats(struct qed_dev *cdev);
 
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt,
+			     struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ *		  it with cookie and callback function address, if not
+ *		  using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+				struct qed_spq_comp_cb *p_cb,
+				struct qed_ntuple_filter_params *p_params);
+
 #define MAX_QUEUES_PER_QZONE    (sizeof(unsigned long) * 8)
 #define QED_QUEUE_CID_SELF	(0xff)
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 047f556ca62e..c4f14fdc4e77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
 	data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
 	data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
 	data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+	data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+	data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
 }
 
 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
 	p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
-	p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+	p_ramrod->inner_vlan_stripping_en =
+		p_ll2_conn->input.rx_vlan_removal_en;
 	p_ramrod->queue_id = p_ll2_conn->queue_id;
 	p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
 
@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
 
 	memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
 
-	p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
-			      CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+	switch (data->input.tx_dest) {
+	case QED_LL2_TX_DEST_NW:
+		p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+		break;
+	case QED_LL2_TX_DEST_LB:
+		p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+		break;
+	case QED_LL2_TX_DEST_DROP:
+		p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	if (data->input.conn_type == QED_LL2_TYPE_OOO ||
 	    data->input.secondary_queue)
 		p_ll2_info->main_func_queue = false;
@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 		goto release_terminate;
 	}
 
-	if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-	    cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+	if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
 		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
 		rc = qed_ll2_start_ooo(cdev, params);
 		if (rc) {
@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
 	eth_zero_addr(cdev->ll2_mac_address);
 
-	if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-	    cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+	if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
 		qed_ll2_stop_ooo(cdev);
 
 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 8b99c7d26f34..6f46cb11f349 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
 					DRV_MSG_CODE_NVM_READ_NVRAM,
 					addr + offset +
 					(bytes_to_copy <<
-					 DRV_MB_PARAM_NVM_LEN_SHIFT),
+					 DRV_MB_PARAM_NVM_LEN_OFFSET),
 					&resp, &resp_param,
 					&read_len,
 					(u32 *)(p_buf + offset));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index b7abb8205d3a..5d040b873137 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -360,13 +360,13 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 
 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
 {
-        struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 
-        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
 
-        spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-        qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
-        spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
 static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
@@ -570,7 +570,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
 
 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
 		qed_iwarp_init_fw_ramrod(p_hwfn,
-					 &p_ent->ramrod.iwarp_init_func.iwarp);
+					 &p_ent->ramrod.iwarp_init_func);
 		p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
 	} else {
 		p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 0cdb4337b3a0..f7122059b6b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -124,6 +124,8 @@
 	0x1f0434UL
 #define PRS_REG_SEARCH_TAG1 \
 	0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+	0x1f044cUL
 #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
 	0x1f0a0cUL
 #define PRS_REG_SEARCH_TCP_FIRST_FRAG \
@@ -200,7 +202,13 @@
 	0x2e8800UL
 #define CCFC_REG_STRONG_ENABLE_VF \
 	0x2e070cUL
-#define  CDU_REG_CID_ADDR_PARAMS	\
+#define CDU_REG_CCFC_CTX_VALID0 \
+	0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+	0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+	0x580408UL
+#define  CDU_REG_CID_ADDR_PARAMS \
 	0x580900UL
 #define  DBG_REG_CLIENT_ENABLE \
 	0x010004UL
@@ -564,7 +572,7 @@
 #define PRS_REG_ENCAPSULATION_TYPE_EN	0x1f0730UL
 #define PRS_REG_GRE_PROTOCOL		0x1f0734UL
 #define PRS_REG_VXLAN_PORT		0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0	0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2	0x1f099cUL
 #define NIG_REG_ENC_TYPE_ENABLE		0x501058UL
 
 #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE		(0x1 << 0)
@@ -580,11 +588,11 @@
 #define PRS_REG_NGE_PORT		0x1f086cUL
 #define NIG_REG_NGE_PORT		0x508b38UL
 
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN	0x10090cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN	0x100910UL
-#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN	0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN	0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN	0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN		0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN		0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN		0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5		0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5	0x100930UL
 
 #define NIG_REG_NGE_IP_ENABLE			0x508b28UL
 #define NIG_REG_NGE_ETH_ENABLE			0x508b2cUL
@@ -595,15 +603,15 @@
 #define QM_REG_WFQPFWEIGHT	0x2f4e80UL
 #define QM_REG_WFQVPWEIGHT	0x2fa000UL
 
-#define PGLCS_REG_DBG_SELECT_K2 \
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
 	0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x001d18UL
-#define PGLCS_REG_DBG_SHIFT_K2 \
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
 	0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID_K2 \
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
 	0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x001d24UL
 #define MISC_REG_RESET_PL_PDA_VMAIN_1 \
 	0x008070UL
@@ -615,7 +623,7 @@
 	0x009050UL
 #define MISCS_REG_RESET_PL_HV \
 	0x009060UL
-#define MISCS_REG_RESET_PL_HV_2_K2	\
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
 	0x009150UL
 #define DMAE_REG_DBG_SELECT \
 	0x00c510UL
@@ -647,15 +655,15 @@
 	0x0500b0UL
 #define GRC_REG_DBG_FORCE_FRAME	\
 	0x0500b4UL
-#define UMAC_REG_DBG_SELECT_K2 \
+#define UMAC_REG_DBG_SELECT_K2_E5 \
 	0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x051098UL
-#define UMAC_REG_DBG_SHIFT_K2 \
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
 	0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID_K2 \
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
 	0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME_K2 \
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x0510a4UL
 #define MCP2_REG_DBG_SELECT \
 	0x052400UL
@@ -717,15 +725,15 @@
 	0x1f0ba0UL
 #define PRS_REG_DBG_FORCE_FRAME	\
 	0x1f0ba4UL
-#define CNIG_REG_DBG_SELECT_K2 \
+#define CNIG_REG_DBG_SELECT_K2_E5 \
 	0x218254UL
-#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x218258UL
-#define CNIG_REG_DBG_SHIFT_K2 \
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
 	0x21825cUL
-#define CNIG_REG_DBG_FORCE_VALID_K2 \
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
 	0x218260UL
-#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x218264UL
 #define PRM_REG_DBG_SELECT \
 	0x2306a8UL
@@ -997,35 +1005,35 @@
 	0x580710UL
 #define CDU_REG_DBG_FORCE_FRAME	\
 	0x580714UL
-#define WOL_REG_DBG_SELECT_K2 \
+#define WOL_REG_DBG_SELECT_K2_E5 \
 	0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE_K2 \
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x600144UL
-#define WOL_REG_DBG_SHIFT_K2 \
+#define WOL_REG_DBG_SHIFT_K2_E5 \
 	0x600148UL
-#define WOL_REG_DBG_FORCE_VALID_K2 \
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
 	0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME_K2 \
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x600150UL
-#define BMBN_REG_DBG_SELECT_K2 \
+#define BMBN_REG_DBG_SELECT_K2_E5 \
 	0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x610144UL
-#define BMBN_REG_DBG_SHIFT_K2 \
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
 	0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID_K2 \
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
 	0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME_K2 \
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x610150UL
-#define NWM_REG_DBG_SELECT_K2 \
+#define NWM_REG_DBG_SELECT_K2_E5 \
 	0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE_K2 \
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x8000f0UL
-#define NWM_REG_DBG_SHIFT_K2 \
+#define NWM_REG_DBG_SHIFT_K2_E5 \
 	0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID_K2 \
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
 	0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME_K2\
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x8000fcUL
 #define PBF_REG_DBG_SELECT \
 	0xd80060UL
@@ -1247,36 +1255,76 @@
 	0x1901534UL
 #define USEM_REG_DBG_FORCE_FRAME \
 	0x1901538UL
-#define NWS_REG_DBG_SELECT_K2 \
+#define NWS_REG_DBG_SELECT_K2_E5 \
 	0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE_K2 \
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x70012cUL
-#define NWS_REG_DBG_SHIFT_K2 \
+#define NWS_REG_DBG_SHIFT_K2_E5 \
 	0x700130UL
-#define NWS_REG_DBG_FORCE_VALID_K2 \
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
 	0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME_K2 \
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x700138UL
-#define MS_REG_DBG_SELECT_K2 \
+#define MS_REG_DBG_SELECT_K2_E5 \
 	0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE_K2 \
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x6a022cUL
-#define MS_REG_DBG_SHIFT_K2 \
+#define MS_REG_DBG_SHIFT_K2_E5 \
 	0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID_K2 \
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
 	0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME_K2 \
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT_K2 \
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
 	0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
 	0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
 	0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
 	0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
 	0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+	0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+	0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+	0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+	0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+	0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+	0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+	0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+	0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+	0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+	0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5	\
+	0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+	0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+	0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+	0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+	0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5	\
+	0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+	0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+	0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+	0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+	0x322050UL
 #define MISC_REG_RESET_PL_UA \
 	0x008050UL
 #define MISC_REG_RESET_PL_HV \
@@ -1415,7 +1463,7 @@
 	0x1940000UL
 #define SEM_FAST_REG_INT_RAM \
 	0x020000UL
-#define SEM_FAST_REG_INT_RAM_SIZE \
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
 	20480
 #define GRC_REG_TRACE_FIFO_VALID_DATA \
 	0x050064UL
@@ -1433,6 +1481,8 @@
 	0x340800UL
 #define BRB_REG_BIG_RAM_DATA \
 	0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+	64
 #define SEM_FAST_REG_STALL_0_BB_K2 \
 	0x000488UL
 #define SEM_FAST_REG_STALLED \
@@ -1451,7 +1501,7 @@
 	0x238c30UL
 #define MISCS_REG_BLOCK_256B_EN \
 	0x009074UL
-#define MCP_REG_SCRATCH_SIZE \
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
 	57344
 #define MCP_REG_CPU_REG_FILE \
 	0xe05200UL
@@ -1485,35 +1535,35 @@
 	0x008c14UL
 #define NWS_REG_NWS_CMU_K2	\
 	0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
 	0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
 	0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
 	0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
 	0x0006c4UL
-#define MS_REG_MS_CMU_K2 \
+#define MS_REG_MS_CMU_K2_E5 \
 	0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
 	0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
 	0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
 	0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
 	0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
 	0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
 	0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
 	0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
 	0x000214UL
-#define PHY_PCIE_REG_PHY0_K2 \
+#define PHY_PCIE_REG_PHY0_K2_E5 \
 	0x620000UL
-#define PHY_PCIE_REG_PHY1_K2 \
+#define PHY_PCIE_REG_PHY1_K2_E5 \
 	0x624000UL
 #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
 #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a1d33f35aad3..5e927b6cac22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
 		p_ramrod->mf_mode = MF_NPAR;
 	}
-	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+	p_ramrod->outer_tag_config.outer_tag.tci =
+		cpu_to_le16(p_hwfn->hw_info.ovlan);
 
 	/* Place EQ address in RAMROD */
 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-		   sb, sb_index, p_ramrod->outer_tag);
+		   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
 
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index cd9a0297ebc5..1673fc90027f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -213,7 +213,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 				  struct qed_spq *p_spq)
 {
-	struct core_conn_context *p_cxt;
+	struct e4_core_conn_context *p_cxt;
 	struct qed_cxt_info cxt_info;
 	u16 physical_q;
 	int rc;
@@ -231,11 +231,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 	p_cxt = cxt_info.p_cxt;
 
 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
-		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
-		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 
 	/* QM physical queue */
 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 3f40b1de7957..5acb91b3564c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
-				  int rel_vf_id,
-				  bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+			   int rel_vf_id,
+			   bool b_enabled_only, bool b_non_malicious)
 {
 	if (!p_hwfn->pf_iov_info) {
 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
 	/* fill in pfdev info */
 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
 	pfdev_info->db_size = 0;
-	pfdev_info->indices_per_sb = PIS_PER_SB;
+	pfdev_info->indices_per_sb = PIS_PER_SB_E4;
 
 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3582,11 +3582,11 @@ static int
 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
 {
-	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+	u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
 	int i, cnt;
 
 	/* Read initial consumers & producers */
-	for (i = 0; i < MAX_NUM_VOQS; i++) {
+	for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
 		u32 prod;
 
 		cons[i] = qed_rd(p_hwfn, p_ptt,
@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
 	/* Wait for consumers to pass the producers */
 	i = 0;
 	for (cnt = 0; cnt < 50; cnt++) {
-		for (; i < MAX_NUM_VOQS; i++) {
+		for (; i < MAX_NUM_VOQS_E4; i++) {
 			u32 tmp;
 
 			tmp = qed_rd(p_hwfn, p_ptt,
@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
 				break;
 		}
 
-		if (i == MAX_NUM_VOQS)
+		if (i == MAX_NUM_VOQS_E4)
 			break;
 
 		msleep(20);
@@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
 				     struct qed_ptt *p_ptt, int vfid, int val)
 {
+	struct qed_mcp_link_state *p_link;
 	struct qed_vf_info *vf;
 	u8 abs_vp_id = 0;
 	int rc;
@@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
 	if (rc)
 		return rc;
 
-	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+	p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+				 p_link->speed);
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 3955929ba892..9a8fd79611f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -274,6 +274,23 @@ enum qed_iov_wq_flag {
 
 #ifdef CONFIG_QED_SRIOV
 /**
+ * @brief Check if given VF ID @vfid is valid
+ *        w.r.t. @b_enabled_only value
+ *        if b_enabled_only = true - only enabled VF id is valid
+ *        else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+			   int rel_vf_id,
+			   bool b_enabled_only, bool b_non_malicious);
+
+/**
  * @brief - Given a VF index, return index of next [including that] active VF.
  *
  * @param p_hwfn
@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
 #else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+		      int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+	return false;
+}
+
 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
 					     u16 rel_vf_id)
 {
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index a3a70ade411f..9935978c5542 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/bpf.h>
+#include <net/xdp.h>
 #include <linux/qed/qede_rdma.h>
 #include <linux/io.h>
 #ifdef CONFIG_RFS_ACCEL
@@ -52,9 +53,9 @@
 #include <linux/qed/qed_eth_if.h>
 
 #define QEDE_MAJOR_VERSION		8
-#define QEDE_MINOR_VERSION		10
-#define QEDE_REVISION_VERSION		10
-#define QEDE_ENGINEERING_VERSION	21
+#define QEDE_MINOR_VERSION		33
+#define QEDE_REVISION_VERSION		0
+#define QEDE_ENGINEERING_VERSION	20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
 		__stringify(QEDE_MINOR_VERSION) "."		\
 		__stringify(QEDE_REVISION_VERSION) "."		\
@@ -345,6 +346,7 @@ struct qede_rx_queue {
 	u64 xdp_no_pass;
 
 	void *handle;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 union db_prod {
@@ -494,6 +496,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
 void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
 int qede_configure_vlan_filters(struct qede_dev *edev);
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+				    netdev_features_t features);
 int qede_set_features(struct net_device *dev, netdev_features_t features);
 void qede_set_rx_mode(struct net_device *ndev);
 void qede_config_rx_mode(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index dae741270022..4ca3847fffd4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
 	DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
 		   "Configuring MTU size of %d\n", new_mtu);
 
+	if (new_mtu > PAGE_SIZE)
+		ndev->features &= ~NETIF_F_GRO_HW;
+
 	/* Set the mtu field and re-start the interface if needed */
 	args.u.mtu = new_mtu;
 	args.func = &qede_update_mtu;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c1a0708a7d7c..6687e04d1558 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
 				     u16 rxq_id, bool add_fltr)
 {
 	const struct qed_eth_ops *op = edev->ops;
+	struct qed_ntuple_filter_params params;
 
 	if (n->used)
 		return;
 
+	memset(&params, 0, sizeof(params));
+
+	params.addr = n->mapping;
+	params.length = n->buf_len;
+	params.qid = rxq_id;
+	params.b_is_add = add_fltr;
+
 	DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
 		   "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
 		   add_fltr ? "Adding" : "Deleting",
@@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
 
 	n->used = true;
 	n->filter_op = add_fltr;
-	op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
-				 rxq_id, add_fltr);
+	op->ntuple_filter_config(edev->cdev, n, &params);
 }
 
 static void
@@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
 	edev->arfs->filter_count++;
 
 	if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
-		edev->ops->configure_arfs_searcher(edev->cdev, true);
+		enum qed_filter_config_mode mode;
+
+		mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+		edev->ops->configure_arfs_searcher(edev->cdev, mode);
 		edev->arfs->enable = true;
 	}
 
@@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
 	edev->arfs->filter_count--;
 
 	if (!edev->arfs->filter_count && edev->arfs->enable) {
+		enum qed_filter_config_mode mode;
+
+		mode = QED_FILTER_CONFIG_MODE_DISABLE;
 		edev->arfs->enable = false;
-		edev->ops->configure_arfs_searcher(edev->cdev, false);
+		edev->ops->configure_arfs_searcher(edev->cdev, mode);
 	}
 }
 
@@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
 
 	if (!edev->arfs->filter_count) {
 		if (edev->arfs->enable) {
+			enum qed_filter_config_mode mode;
+
+			mode = QED_FILTER_CONFIG_MODE_DISABLE;
 			edev->arfs->enable = false;
-			edev->ops->configure_arfs_searcher(edev->cdev, false);
+			edev->ops->configure_arfs_searcher(edev->cdev, mode);
 		}
 #ifdef CONFIG_RFS_ACCEL
 	} else {
@@ -895,19 +911,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
 	edev->ndev->features = args->u.features;
 }
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+				    netdev_features_t features)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+	    !(features & NETIF_F_GRO))
+		features &= ~NETIF_F_GRO_HW;
+
+	return features;
+}
+
 int qede_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct qede_dev *edev = netdev_priv(dev);
 	netdev_features_t changes = features ^ dev->features;
 	bool need_reload = false;
 
-	/* No action needed if hardware GRO is disabled during driver load */
-	if (changes & NETIF_F_GRO) {
-		if (dev->features & NETIF_F_GRO)
-			need_reload = !edev->gro_disable;
-		else
-			need_reload = edev->gro_disable;
-	}
+	if (changes & NETIF_F_GRO_HW)
+		need_reload = true;
 
 	if (need_reload) {
 		struct qede_reload_args args;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 48ec4c56cddf..dafc079ab6b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
 	xdp.data = xdp.data_hard_start + *data_offset;
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + *len;
+	xdp.rxq = &rxq->xdp_rxq;
 
 	/* Queues always have a full reset currently, so for the time
 	 * being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8f9b3eb82137..2db70eabddfe 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+	.ndo_fix_features = qede_fix_features,
 	.ndo_set_features = qede_set_features,
 	.ndo_get_stats64 = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
@@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
 	.ndo_change_mtu = qede_change_mtu,
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+	.ndo_fix_features = qede_fix_features,
 	.ndo_set_features = qede_set_features,
 	.ndo_get_stats64 = qede_get_stats64,
 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
 	.ndo_change_mtu = qede_change_mtu,
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+	.ndo_fix_features = qede_fix_features,
 	.ndo_set_features = qede_set_features,
 	.ndo_get_stats64 = qede_get_stats64,
 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
 	ndev->priv_flags |= IFF_UNICAST_FLT;
 
 	/* user-changeble features */
-	hw_features = NETIF_F_GRO | NETIF_F_SG |
+	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		      NETIF_F_TSO | NETIF_F_TSO6;
 
@@ -762,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
 			fp = &edev->fp_array[i];
 
 			kfree(fp->sb_info);
+			/* Handle mem alloc failure case where qede_init_fp
+			 * didn't register xdp_rxq_info yet.
+			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
+			 */
+			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
 			kfree(fp->rxq);
 			kfree(fp->xdp_tx);
 			kfree(fp->txq);
@@ -1068,10 +1077,6 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
 	pci_set_drvdata(pdev, NULL);
 
-	/* Release edev's reference to XDP's bpf if such exist */
-	if (edev->xdp_prog)
-		bpf_prog_put(edev->xdp_prog);
-
 	/* Use global ops since we've freed edev */
 	qed_ops->common->slowpath_stop(cdev);
 	if (system_state == SYSTEM_POWER_OFF)
@@ -1148,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
 static int qede_alloc_mem_sb(struct qede_dev *edev,
 			     struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int rc;
 
@@ -1232,18 +1237,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 	dma_addr_t mapping;
 	int i;
 
-	/* Don't perform FW aggregations in case of XDP */
-	if (edev->xdp_prog)
-		edev->gro_disable = 1;
-
 	if (edev->gro_disable)
 		return 0;
 
-	if (edev->ndev->mtu > PAGE_SIZE) {
-		edev->gro_disable = 1;
-		return 0;
-	}
-
 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
@@ -1273,6 +1269,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 err:
 	qede_free_sge_mem(edev, rxq);
 	edev->gro_disable = 1;
+	edev->ndev->features &= ~NETIF_F_GRO_HW;
 	return -ENOMEM;
 }
 
@@ -1502,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
 			else
 				fp->rxq->data_direction = DMA_FROM_DEVICE;
 			fp->rxq->dev = &edev->pdev->dev;
+
+			/* Driver have no error path from here */
+			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+						 fp->rxq->rxq_id) < 0);
 		}
 
 		if (fp->type & QEDE_FASTPATH_TX) {
@@ -1515,7 +1516,7 @@ static void qede_init_fp(struct qede_dev *edev)
 			 edev->ndev->name, queue_id);
 	}
 
-	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
 }
 
 static int qede_set_real_num_queues(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f7080d0ab874..46b0372dd032 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
 	struct list_head *head = &mbx->cmd_q;
 	struct qlcnic_cmd_args *cmd = NULL;
 
-	spin_lock(&mbx->queue_lock);
+	spin_lock_bh(&mbx->queue_lock);
 
 	while (!list_empty(head)) {
 		cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
@@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
 		qlcnic_83xx_notify_cmd_completion(adapter, cmd);
 	}
 
-	spin_unlock(&mbx->queue_lock);
+	spin_unlock_bh(&mbx->queue_lock);
 }
 
 static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
@@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
 {
 	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 
-	spin_lock(&mbx->queue_lock);
+	spin_lock_bh(&mbx->queue_lock);
 
 	list_del(&cmd->list);
 	mbx->num_cmds--;
 
-	spin_unlock(&mbx->queue_lock);
+	spin_unlock_bh(&mbx->queue_lock);
 
 	qlcnic_83xx_notify_cmd_completion(adapter, cmd);
 }
@@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
 		init_completion(&cmd->completion);
 		cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
 
-		spin_lock(&mbx->queue_lock);
+		spin_lock_bh(&mbx->queue_lock);
 
 		list_add_tail(&cmd->list, &mbx->cmd_q);
 		mbx->num_cmds++;
@@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
 		*timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
 		queue_work(mbx->work_q, &mbx->work);
 
-		spin_unlock(&mbx->queue_lock);
+		spin_unlock_bh(&mbx->queue_lock);
 
 		return 0;
 	}
@@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
 		mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
 		spin_unlock_irqrestore(&mbx->aen_lock, flags);
 
-		spin_lock(&mbx->queue_lock);
+		spin_lock_bh(&mbx->queue_lock);
 
 		if (list_empty(head)) {
-			spin_unlock(&mbx->queue_lock);
+			spin_unlock_bh(&mbx->queue_lock);
 			return;
 		}
 		cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
 
-		spin_unlock(&mbx->queue_lock);
+		spin_unlock_bh(&mbx->queue_lock);
 
 		mbx_ops->encode_cmd(adapter, cmd);
 		mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index 5028fb4bec2b..4beedb8faa1e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -114,8 +114,9 @@ struct emac_tpd {
 #define TPD_INSTC_SET(tpd, val)		BITS_SET((tpd)->word[3], 17, 17, val)
 /* High-14bit Buffer Address, So, the 64b-bit address is
  * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping.
  */
-#define TPD_BUFFER_ADDR_H_SET(tpd, val)	BITS_SET((tpd)->word[3], 18, 30, val)
+#define TPD_BUFFER_ADDR_H_SET(tpd, val)	BITS_SET((tpd)->word[3], 18, 31, val)
 /* Format D. Word offset from the 1st byte of this packet to start to calculate
  * the custom checksum.
  */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 38c924bdd32e..13235baf4766 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -615,8 +615,11 @@ static int emac_probe(struct platform_device *pdev)
 	u32 reg;
 	int ret;
 
-	/* The TPD buffer address is limited to 45 bits. */
-	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45));
+	/* The TPD buffer address is limited to:
+	 * 1. PTP:	45bits. (Driver doesn't support yet.)
+	 * 2. NON-PTP:	46bits.
+	 */
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
 	if (ret) {
 		dev_err(&pdev->dev, "could not set DMA mask\n");
 		return ret;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index df21e900f874..7e7704daf5f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,11 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 			 struct nlattr *tb[], struct nlattr *data[],
 			 struct netlink_ext_ack *extack)
 {
-	int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING |
-			     RMNET_INGRESS_FORMAT_DEAGGREGATION |
-			     RMNET_INGRESS_FORMAT_MAP;
-	int egress_format = RMNET_EGRESS_FORMAT_MUXING |
-			    RMNET_EGRESS_FORMAT_MAP;
+	u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
 	struct net_device *real_dev;
 	int mode = RMNET_EPMODE_VND;
 	struct rmnet_endpoint *ep;
@@ -181,13 +177,20 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 	if (err)
 		goto err2;
 
-	netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n",
-		   ingress_format, egress_format);
-	port->egress_data_format = egress_format;
-	port->ingress_data_format = ingress_format;
 	port->rmnet_mode = mode;
 
 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+	if (data[IFLA_VLAN_FLAGS]) {
+		struct ifla_vlan_flags *flags;
+
+		flags = nla_data(data[IFLA_VLAN_FLAGS]);
+		data_format = flags->flags & flags->mask;
+	}
+
+	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+	port->data_format = data_format;
+
 	return 0;
 
 err2:
@@ -317,9 +320,49 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
 	return 0;
 }
 
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+			    struct nlattr *data[],
+			    struct netlink_ext_ack *extack)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	u16 mux_id;
+
+	real_dev = __dev_get_by_index(dev_net(dev),
+				      nla_get_u32(tb[IFLA_LINK]));
+
+	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+		return -ENODEV;
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	if (data[IFLA_VLAN_ID]) {
+		mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
+		ep = rmnet_get_endpoint(port, priv->mux_id);
+
+		hlist_del_init_rcu(&ep->hlnode);
+		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+		ep->mux_id = mux_id;
+		priv->mux_id = mux_id;
+	}
+
+	if (data[IFLA_VLAN_FLAGS]) {
+		struct ifla_vlan_flags *flags;
+
+		flags = nla_data(data[IFLA_VLAN_FLAGS]);
+		port->data_format = flags->flags & flags->mask;
+	}
+
+	return 0;
+}
+
 static size_t rmnet_get_size(const struct net_device *dev)
 {
-	return nla_total_size(2); /* IFLA_VLAN_ID */
+	return nla_total_size(2) /* IFLA_VLAN_ID */ +
+	       nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */
 }
 
 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
@@ -331,6 +374,7 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
 	.newlink	= rmnet_newlink,
 	.dellink	= rmnet_dellink,
 	.get_size	= rmnet_get_size,
+	.changelink     = rmnet_changelink,
 };
 
 /* Needs either rcu_read_lock() or rtnl lock */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index c19259eea99e..00e4634100d3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,8 +32,7 @@ struct rmnet_endpoint {
  */
 struct rmnet_port {
 	struct net_device *dev;
-	u32 ingress_data_format;
-	u32 egress_data_format;
+	u32 data_format;
 	u8 nr_rmnet_devs;
 	u8 rmnet_mode;
 	struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 08e4afc0ab39..601edec28c5f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -15,6 +15,8 @@
 
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
 #include "rmnet_private.h"
 #include "rmnet_config.h"
 #include "rmnet_vnd.h"
@@ -64,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 			    struct rmnet_port *port)
 {
 	struct rmnet_endpoint *ep;
+	u16 len, pad;
 	u8 mux_id;
-	u16 len;
 
 	if (RMNET_MAP_GET_CD_BIT(skb)) {
-		if (port->ingress_data_format
-		    & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+		if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
 			return rmnet_map_command(skb, port);
 
 		goto free_skb;
 	}
 
 	mux_id = RMNET_MAP_GET_MUX_ID(skb);
-	len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+	pad = RMNET_MAP_GET_PAD(skb);
+	len = RMNET_MAP_GET_LENGTH(skb) - pad;
 
 	if (mux_id >= RMNET_MAX_LOGICAL_EP)
 		goto free_skb;
@@ -89,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 
 	/* Subtract MAP header */
 	skb_pull(skb, sizeof(struct rmnet_map_header));
-	skb_trim(skb, len);
 	rmnet_set_skb_proto(skb);
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+
+	skb_trim(skb, len);
 	rmnet_deliver_skb(skb);
 	return;
 
@@ -104,8 +112,17 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
 {
 	struct sk_buff *skbn;
 
-	if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
-		while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
+	if (skb->dev->type == ARPHRD_ETHER) {
+		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, ETH_HLEN);
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
 			__rmnet_map_ingress_handler(skbn, port);
 
 		consume_skb(skb);
@@ -124,29 +141,32 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 	additional_header_len = 0;
 	required_headroom = sizeof(struct rmnet_map_header);
 
+	if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+		required_headroom += additional_header_len;
+	}
+
 	if (skb_headroom(skb) < required_headroom) {
 		if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
 			goto fail;
 	}
 
+	if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+		rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
 	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
 	if (!map_header)
 		goto fail;
 
-	if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
-		if (mux_id == 0xff)
-			map_header->mux_id = 0;
-		else
-			map_header->mux_id = mux_id;
-	}
+	map_header->mux_id = mux_id;
 
 	skb->protocol = htons(ETH_P_MAP);
 
-	return RMNET_MAP_SUCCESS;
+	return 0;
 
 fail:
 	kfree_skb(skb);
-	return RMNET_MAP_CONSUMED;
+	return -ENOMEM;
 }
 
 static void
@@ -178,8 +198,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
 
 	switch (port->rmnet_mode) {
 	case RMNET_EPMODE_VND:
-		if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
-			rmnet_map_ingress_handler(skb, port);
+		rmnet_map_ingress_handler(skb, port);
 		break;
 	case RMNET_EPMODE_BRIDGE:
 		rmnet_bridge_handler(skb, port->bridge_ep);
@@ -201,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
 	struct rmnet_priv *priv;
 	u8 mux_id;
 
+	sk_pacing_shift_update(skb->sk, 8);
+
 	orig_dev = skb->dev;
 	priv = netdev_priv(orig_dev);
 	skb->dev = priv->real_dev;
@@ -212,19 +233,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
 		return;
 	}
 
-	if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
-		switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
-		case RMNET_MAP_CONSUMED:
-			return;
-
-		case RMNET_MAP_SUCCESS:
-			break;
-
-		default:
-			kfree_skb(skb);
-			return;
-		}
-	}
+	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+		return;
 
 	rmnet_vnd_tx_fixup(skb, orig_dev);
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 3af3fe7b5457..6ce31e29136d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -30,15 +30,6 @@ struct rmnet_map_control_command {
 	};
 }  __aligned(1);
 
-enum rmnet_map_results {
-	RMNET_MAP_SUCCESS,
-	RMNET_MAP_CONSUMED,
-	RMNET_MAP_GENERAL_FAILURE,
-	RMNET_MAP_NOT_ENABLED,
-	RMNET_MAP_FAILED_AGGREGATION,
-	RMNET_MAP_FAILED_MUX
-};
-
 enum rmnet_map_commands {
 	RMNET_MAP_COMMAND_NONE,
 	RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -56,6 +47,22 @@ struct rmnet_map_header {
 	u16 pkt_len;
 }  __aligned(1);
 
+struct rmnet_map_dl_csum_trailer {
+	u8  reserved1;
+	u8  valid:1;
+	u8  reserved2:7;
+	u16 csum_start_offset;
+	u16 csum_length;
+	__be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+	__be16 csum_start_offset;
+	u16 csum_insert_offset:14;
+	u16 udp_ip4_ind:1;
+	u16 csum_enabled:1;
+} __aligned(1);
+
 #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
 				 (Y)->data)->mux_id)
 #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -76,10 +83,13 @@ struct rmnet_map_header {
 #define RMNET_MAP_NO_PAD_BYTES        0
 #define RMNET_MAP_ADD_PAD_BYTES       1
 
-u8 rmnet_map_demultiplex(struct sk_buff *skb);
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port);
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 						  int hdrlen, int pad);
 void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct net_device *orig_dev);
 
 #endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e604923ac1..6bc328fb88e1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
 }
 
 static void rmnet_map_send_ack(struct sk_buff *skb,
-			       unsigned char type)
+			       unsigned char type,
+			       struct rmnet_port *port)
 {
 	struct rmnet_map_control_command *cmd;
 	int xmit_status;
 
+	if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+		if (skb->len < sizeof(struct rmnet_map_header) +
+		    RMNET_MAP_GET_LENGTH(skb) +
+		    sizeof(struct rmnet_map_dl_csum_trailer)) {
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_trim(skb, skb->len -
+			 sizeof(struct rmnet_map_dl_csum_trailer));
+	}
+
 	skb->protocol = htons(ETH_P_MAP);
 
 	cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
 		break;
 	}
 	if (rc == RMNET_MAP_COMMAND_ACK)
-		rmnet_map_send_ack(skb, rc);
+		rmnet_map_send_ack(skb, rc, port);
 }
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c758f94e..c74a6c56d315 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
  */
 
 #include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
 #include "rmnet_config.h"
 #include "rmnet_map.h"
 #include "rmnet_private.h"
@@ -21,6 +24,233 @@
 #define RMNET_MAP_DEAGGR_SPACING  64
 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+					 const void *txporthdr)
+{
+	__sum16 *check = NULL;
+
+	switch (protocol) {
+	case IPPROTO_TCP:
+		check = &(((struct tcphdr *)txporthdr)->check);
+		break;
+
+	case IPPROTO_UDP:
+		check = &(((struct udphdr *)txporthdr)->check);
+		break;
+
+	default:
+		check = NULL;
+		break;
+	}
+
+	return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+	u16 csum_value, csum_value_final;
+	struct iphdr *ip4h;
+	void *txporthdr;
+	__be16 addend;
+
+	ip4h = (struct iphdr *)(skb->data);
+	if ((ntohs(ip4h->frag_off) & IP_MF) ||
+	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+		return -EOPNOTSUPP;
+
+	txporthdr = skb->data + ip4h->ihl * 4;
+
+	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+	if (!csum_field)
+		return -EPROTONOSUPPORT;
+
+	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+		return 0;
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+				     (__force __be16)hdr_csum);
+
+	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+					 ip4h->protocol, 0);
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip4h->protocol) {
+		case IPPROTO_UDP:
+			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL4 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field))
+		return 0;
+	else
+		return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+	u16 csum_value, csum_value_final;
+	__be16 ip6_hdr_csum, addend;
+	struct ipv6hdr *ip6h;
+	void *txporthdr;
+	u32 length;
+
+	ip6h = (struct ipv6hdr *)(skb->data);
+
+	txporthdr = skb->data + sizeof(struct ipv6hdr);
+	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+	if (!csum_field)
+		return -EPROTONOSUPPORT;
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	ip6_hdr_csum = (__force __be16)
+			~ntohs((__force __be16)ip_compute_csum(ip6h,
+			       (int)(txporthdr - (void *)(skb->data))));
+	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+				      ip6_hdr_csum);
+
+	length = (ip6h->nexthdr == IPPROTO_UDP) ?
+		 ntohs(((struct udphdr *)txporthdr)->len) :
+		 ntohs(ip6h->payload_len);
+	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+			     length, ip6h->nexthdr, 0));
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip6h->nexthdr) {
+		case IPPROTO_UDP:
+			/* RFC 2460 section 8.1
+			 * DL6 One's complement rule for UDP checksum 0
+			 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL6 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field))
+		return 0;
+	else
+		return -EINVAL;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = iphdr + ip4h->ihl * 4;
+
+	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)iphdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+	if (ip4h->protocol == IPPROTO_UDP)
+		ul_header->udp_ip4_ind = 1;
+	else
+		ul_header->udp_ip4_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)ip6hdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+	ul_header->udp_ip4_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
 /* Adds MAP header to front of skb->data
  * Padding is calculated and set appropriately in MAP header. Mux ID is
  * initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 	u32 padding, map_datalen;
 	u8 *padbytes;
 
-	if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
-		return NULL;
-
 	map_datalen = skb->len - hdrlen;
 	map_header = (struct rmnet_map_header *)
 			skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ done:
  * returned, indicating that there are no more packets to deaggregate. Caller
  * is responsible for freeing the original skb.
  */
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port)
 {
 	struct rmnet_map_header *maph;
 	struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
 	maph = (struct rmnet_map_header *)skb->data;
 	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
 
+	if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
+		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
 	if (((int)skb->len - (int)packet_len) < 0)
 		return NULL;
 
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
 
 	return skbn;
 }
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+	struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+		return -EOPNOTSUPP;
+
+	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+	if (!csum_trailer->valid)
+		return -EINVAL;
+
+	if (skb->protocol == htons(ETH_P_IP))
+		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
+	else if (skb->protocol == htons(ETH_P_IPV6))
+#if IS_ENABLED(CONFIG_IPV6)
+		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+#else
+		return -EPROTONOSUPPORT;
+#endif
+
+	return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct net_device *orig_dev)
+{
+	struct rmnet_map_ul_csum_header *ul_header;
+	void *iphdr;
+
+	ul_header = (struct rmnet_map_ul_csum_header *)
+		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+	if (unlikely(!(orig_dev->features &
+		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+		goto sw_csum;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		iphdr = (char *)ul_header +
+			sizeof(struct rmnet_map_ul_csum_header);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+			return;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+			return;
+#else
+			goto sw_csum;
+#endif
+		}
+	}
+
+sw_csum:
+	ul_header->csum_start_offset = 0;
+	ul_header->csum_insert_offset = 0;
+	ul_header->csum_enabled = 0;
+	ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 49102f922b31..de0143eaa05a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -19,14 +19,10 @@
 #define RMNET_TX_QUEUE_LEN         1000
 
 /* Constants */
-#define RMNET_EGRESS_FORMAT_MAP                 BIT(1)
-#define RMNET_EGRESS_FORMAT_AGGREGATION         BIT(2)
-#define RMNET_EGRESS_FORMAT_MUXING              BIT(3)
-
-#define RMNET_INGRESS_FORMAT_MAP                BIT(1)
-#define RMNET_INGRESS_FORMAT_DEAGGREGATION      BIT(2)
-#define RMNET_INGRESS_FORMAT_DEMUXING           BIT(3)
-#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       BIT(4)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION      BIT(0)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       BIT(1)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4        BIT(2)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4         BIT(3)
 
 /* Replace skb->dev to a virtual rmnet device and pass up the stack */
 #define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9caa5e387450..570a227acdd8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -185,6 +185,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
 	if (ep->egress_dev)
 		return -EINVAL;
 
+	if (rmnet_get_endpoint(port, id))
+		return -EBUSY;
+
+	rmnet_dev->hw_features = NETIF_F_RXCSUM;
+	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	rmnet_dev->hw_features |= NETIF_F_SG;
+
 	rc = register_netdevice(rmnet_dev);
 	if (!rc) {
 		ep->egress_dev = rmnet_dev;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e7ab23e87de2..81045dfa1cd8 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 	mss = skb_shinfo(skb)->gso_size;
 
 	if (mss > MSSMask) {
-		WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
-			  mss);
+		netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
+				 mss);
 		goto out_dma_error;
 	}
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 734286ebe5ef..0bf7d1759250 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	return RTL_R8(IBISR0) & 0x02;
+	return RTL_R8(IBISR0) & 0x20;
 }
 
 static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
@@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
 	void __iomem *ioaddr = tp->mmio_addr;
 
 	RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
-	rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+	rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
 	RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
 	RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
 }
@@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
 	}
 }
 
-static void __rtl8169_check_link_status(struct net_device *dev,
-					struct rtl8169_private *tp,
-					void __iomem *ioaddr, bool pm)
+static void rtl8169_check_link_status(struct net_device *dev,
+				      struct rtl8169_private *tp,
+				      void __iomem *ioaddr)
 {
 	if (tp->link_ok(ioaddr)) {
 		rtl_link_chg_patch(tp);
 		/* This is to cancel a scheduled suspend if there's one. */
-		if (pm)
-			pm_request_resume(&tp->pci_dev->dev);
+		pm_request_resume(&tp->pci_dev->dev);
 		netif_carrier_on(dev);
 		if (net_ratelimit())
 			netif_info(tp, ifup, dev, "link up\n");
 	} else {
 		netif_carrier_off(dev);
 		netif_info(tp, ifdown, dev, "link down\n");
-		if (pm)
-			pm_schedule_suspend(&tp->pci_dev->dev, 5000);
+		pm_runtime_idle(&tp->pci_dev->dev);
 	}
 }
 
-static void rtl8169_check_link_status(struct net_device *dev,
-				      struct rtl8169_private *tp,
-				      void __iomem *ioaddr)
-{
-	__rtl8169_check_link_status(dev, tp, ioaddr, false);
-}
-
 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 
 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -4638,16 +4629,6 @@ static void rtl8169_phy_timer(struct timer_list *t)
 	rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
 }
 
-static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
-				  void __iomem *ioaddr)
-{
-	iounmap(ioaddr);
-	pci_release_regions(pdev);
-	pci_clear_mwi(pdev);
-	pci_disable_device(pdev);
-	free_netdev(dev);
-}
-
 DECLARE_RTL_COND(rtl_phy_reset_cond)
 {
 	return tp->phy_reset_pending(tp);
@@ -4779,14 +4760,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data
 	return -EOPNOTSUPP;
 }
 
-static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
-{
-	if (tp->features & RTL_FEATURE_MSI) {
-		pci_disable_msi(pdev);
-		tp->features &= ~RTL_FEATURE_MSI;
-	}
-}
-
 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
 {
 	struct mdio_ops *ops = &tp->mdio_ops;
@@ -7759,7 +7732,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
 		rtl8169_pcierr_interrupt(dev);
 
 	if (status & LinkChg)
-		__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
+		rtl8169_check_link_status(dev, tp, tp->mmio_addr);
 
 	rtl_irq_enable_all(tp);
 }
@@ -7972,7 +7945,7 @@ static int rtl_open(struct net_device *dev)
 	rtl_unlock_work(tp);
 
 	tp->saved_wolopts = 0;
-	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 
 	rtl8169_check_link_status(dev, tp, ioaddr);
 out:
@@ -8116,8 +8089,10 @@ static int rtl8169_runtime_suspend(struct device *device)
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	if (!tp->TxDescArray)
+	if (!tp->TxDescArray) {
+		rtl_pll_power_down(tp);
 		return 0;
+	}
 
 	rtl_lock_work(tp);
 	tp->saved_wolopts = __rtl8169_get_wol(tp);
@@ -8159,9 +8134,11 @@ static int rtl8169_runtime_idle(struct device *device)
 {
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
-	struct rtl8169_private *tp = netdev_priv(dev);
 
-	return tp->TxDescArray ? -EBUSY : 0;
+	if (!netif_running(dev) || !netif_carrier_ok(dev))
+		pm_schedule_suspend(device, 10000);
+
+	return -EBUSY;
 }
 
 static const struct dev_pm_ops rtl8169_pm_ops = {
@@ -8208,9 +8185,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct rtl8169_private *tp = netdev_priv(dev);
-	struct device *d = &pdev->dev;
-
-	pm_runtime_get_sync(d);
 
 	rtl8169_net_suspend(dev);
 
@@ -8228,8 +8202,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
 		pci_wake_from_d3(pdev, true);
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
-
-	pm_runtime_put_noidle(d);
 }
 
 static void rtl_remove_one(struct pci_dev *pdev)
@@ -8251,9 +8223,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
 	unregister_netdev(dev);
 
-	dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
-			  tp->counters, tp->counters_phys_addr);
-
 	rtl_release_firmware(tp);
 
 	if (pci_dev_run_wake(pdev))
@@ -8261,9 +8230,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
 	/* restore original MAC address */
 	rtl_rar_set(tp, dev->perm_addr);
-
-	rtl_disable_msi(pdev, tp);
-	rtl8169_release_board(pdev, dev, tp->mmio_addr);
 }
 
 static const struct net_device_ops rtl_netdev_ops = {
@@ -8440,11 +8406,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		       MODULENAME, RTL8169_VERSION);
 	}
 
-	dev = alloc_etherdev(sizeof (*tp));
-	if (!dev) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
+	if (!dev)
+		return -ENOMEM;
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	dev->netdev_ops = &rtl_netdev_ops;
@@ -8467,13 +8431,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 				     PCIE_LINK_STATE_CLKPM);
 
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc < 0) {
 		netif_err(tp, probe, dev, "enable failure\n");
-		goto err_out_free_dev_1;
+		return rc;
 	}
 
-	if (pci_set_mwi(pdev) < 0)
+	if (pcim_set_mwi(pdev) < 0)
 		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
 
 	/* make sure PCI base addr 1 is MMIO */
@@ -8481,30 +8445,28 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		netif_err(tp, probe, dev,
 			  "region #%d not an MMIO resource, aborting\n",
 			  region);
-		rc = -ENODEV;
-		goto err_out_mwi_2;
+		return -ENODEV;
 	}
 
 	/* check for weird/broken PCI region reporting */
 	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
 		netif_err(tp, probe, dev,
 			  "Invalid PCI region size(s), aborting\n");
-		rc = -ENODEV;
-		goto err_out_mwi_2;
+		return -ENODEV;
 	}
 
 	rc = pci_request_regions(pdev, MODULENAME);
 	if (rc < 0) {
 		netif_err(tp, probe, dev, "could not request regions\n");
-		goto err_out_mwi_2;
+		return rc;
 	}
 
 	/* ioremap MMIO region */
-	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+	ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region),
+			      R8169_REGS_SIZE);
 	if (!ioaddr) {
 		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
-		rc = -EIO;
-		goto err_out_free_res_3;
+		return -EIO;
 	}
 	tp->mmio_addr = ioaddr;
 
@@ -8530,7 +8492,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (rc < 0) {
 			netif_err(tp, probe, dev, "DMA configuration failed\n");
-			goto err_out_unmap_4;
+			return rc;
 		}
 	}
 
@@ -8692,16 +8654,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 
-	tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
-					   &tp->counters_phys_addr, GFP_KERNEL);
-	if (!tp->counters) {
-		rc = -ENOMEM;
-		goto err_out_msi_5;
-	}
+	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+					    &tp->counters_phys_addr,
+					    GFP_KERNEL);
+	if (!tp->counters)
+		return -ENOMEM;
 
 	rc = register_netdev(dev);
 	if (rc < 0)
-		goto err_out_cnt_6;
+		return rc;
 
 	pci_set_drvdata(pdev, dev);
 
@@ -8725,30 +8686,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		rtl8168_driver_start(tp);
 	}
 
-	if (pci_dev_run_wake(pdev))
-		pm_runtime_put_noidle(&pdev->dev);
-
 	netif_carrier_off(dev);
 
-out:
-	return rc;
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_put_sync(&pdev->dev);
 
-err_out_cnt_6:
-	dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
-			  tp->counters_phys_addr);
-err_out_msi_5:
-	netif_napi_del(&tp->napi);
-	rtl_disable_msi(pdev, tp);
-err_out_unmap_4:
-	iounmap(ioaddr);
-err_out_free_res_3:
-	pci_release_regions(pdev);
-err_out_mwi_2:
-	pci_clear_mwi(pdev);
-	pci_disable_device(pdev);
-err_out_free_dev_1:
-	free_netdev(dev);
-	goto out;
+	return 0;
 }
 
 static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 009780df664b..c87f57ca4437 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2205,8 +2205,7 @@ out_dma_free:
 	if (chip_id != RCAR_GEN2)
 		ravb_ptp_stop(ndev);
 out_release:
-	if (ndev)
-		free_netdev(ndev);
+	free_netdev(ndev);
 
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 53924a4fc31c..a197e11f3a56 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3125,7 +3125,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 	const struct platform_device_id *id = platform_get_device_id(pdev);
 	struct sh_eth_private *mdp;
 	struct net_device *ndev;
-	int ret, devno;
+	int ret;
 
 	/* get base addr */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -3137,10 +3137,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
 
-	devno = pdev->id;
-	if (devno < 0)
-		devno = 0;
-
 	ret = platform_get_irq(pdev, 0);
 	if (ret < 0)
 		goto out_release;
@@ -3222,8 +3218,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 		eth_hw_addr_random(ndev);
 	}
 
-	/* ioremap the TSU registers */
 	if (mdp->cd->tsu) {
+		int port = pdev->id < 0 ? 0 : pdev->id % 2;
 		struct resource *rtsu;
 
 		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -3235,7 +3231,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 		/* We can only request the  TSU region  for the first port
 		 * of the two  sharing this TSU for the probe to succeed...
 		 */
-		if (devno % 2 == 0 &&
+		if (port == 0 &&
 		    !devm_request_mem_region(&pdev->dev, rtsu->start,
 					     resource_size(rtsu),
 					     dev_name(&pdev->dev))) {
@@ -3243,6 +3239,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 			ret = -EBUSY;
 			goto out_release;
 		}
+		/* ioremap the TSU registers */
 		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
 					     resource_size(rtsu));
 		if (!mdp->tsu_addr) {
@@ -3250,16 +3247,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto out_release;
 		}
-		mdp->port = devno % 2;
+		mdp->port = port;
 		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
-	}
 
-	/* Need to init only the first port of the two sharing a TSU */
-	if (devno % 2 == 0) {
-		if (mdp->cd->chip_reset)
-			mdp->cd->chip_reset(ndev);
+		/* Need to init only the first port of the two sharing a TSU */
+		if (port == 0) {
+			if (mdp->cd->chip_reset)
+				mdp->cd->chip_reset(ndev);
 
-		if (mdp->cd->tsu) {
 			/* TSU init (Init only)*/
 			sh_eth_tsu_init(mdp);
 		}
@@ -3301,8 +3296,7 @@ out_napi_del:
 
 out_release:
 	/* net_dev free */
-	if (ndev)
-		free_netdev(ndev);
+	free_netdev(ndev);
 
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 6d6fb8cf3e7c..6473cc68c2d5 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -789,7 +789,6 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
 			       ofdpa_flags_nowait(flags),
 			       ofdpa_cmd_flow_tbl_add,
 			       found, NULL, NULL);
-	return 0;
 }
 
 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e566dbb3343d..75fbf58e421c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -160,11 +160,31 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
 		EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
 }
 
+/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
+ * I/O space and BAR 2(&3) for memory.  On SFC9250 (Medford2), there is no I/O
+ * bar; PFs use BAR 0/1 for memory.
+ */
+static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
+{
+	switch (efx->pci_dev->device) {
+	case 0x0b03: /* SFC9250 PF */
+		return 0;
+	default:
+		return 2;
+	}
+}
+
+/* All VFs use BAR 0/1 for memory */
+static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
+{
+	return 0;
+}
+
 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
 {
 	int bar;
 
-	bar = efx->type->mem_bar;
+	bar = efx->type->mem_bar(efx);
 	return resource_size(&efx->pci_dev->resource[bar]);
 }
 
@@ -213,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
 
 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	size_t outlen;
 	int rc;
@@ -257,9 +277,70 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 		return -ENODEV;
 	}
 
+	if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+		u8 vi_window_mode = MCDI_BYTE(outbuf,
+				GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+		switch (vi_window_mode) {
+		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+			efx->vi_stride = 8192;
+			break;
+		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+			efx->vi_stride = 16384;
+			break;
+		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+			efx->vi_stride = 65536;
+			break;
+		default:
+			netif_err(efx, probe, efx->net_dev,
+				  "Unrecognised VI window mode %d\n",
+				  vi_window_mode);
+			return -EIO;
+		}
+		netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+			  efx->vi_stride);
+	} else {
+		/* keep default VI stride */
+		netif_dbg(efx, probe, efx->net_dev,
+			  "firmware did not report VI window mode, assuming vi_stride = %u\n",
+			  efx->vi_stride);
+	}
+
+	if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+		efx->num_mac_stats = MCDI_WORD(outbuf,
+				GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+		netif_dbg(efx, probe, efx->net_dev,
+			  "firmware reports num_mac_stats = %u\n",
+			  efx->num_mac_stats);
+	} else {
+		/* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+		netif_dbg(efx, probe, efx->net_dev,
+			  "firmware did not report num_mac_stats, assuming %u\n",
+			  efx->num_mac_stats);
+	}
+
 	return 0;
 }
 
+static void efx_ef10_read_licensed_features(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
+		       MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), &outlen);
+	if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
+		return;
+
+	nic_data->licensed_features = MCDI_QWORD(outbuf,
+					 LICENSING_V3_OUT_LICENSED_FEATURES);
+}
+
 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
@@ -589,17 +670,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	struct efx_ef10_nic_data *nic_data;
 	int i, rc;
 
-	/* We can have one VI for each 8K region.  However, until we
-	 * use TX option descriptors we need two TX queues per channel.
-	 */
-	efx->max_channels = min_t(unsigned int,
-				  EFX_MAX_CHANNELS,
-				  efx_ef10_mem_map_size(efx) /
-				  (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
-	efx->max_tx_channels = efx->max_channels;
-	if (WARN_ON(efx->max_channels == 0))
-		return -EIO;
-
 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
 	if (!nic_data)
 		return -ENOMEM;
@@ -671,6 +741,22 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	if (rc < 0)
 		goto fail5;
 
+	efx_ef10_read_licensed_features(efx);
+
+	/* We can have one VI for each vi_stride-byte region.
+	 * However, until we use TX option descriptors we need two TX queues
+	 * per channel.
+	 */
+	efx->max_channels = min_t(unsigned int,
+				  EFX_MAX_CHANNELS,
+				  efx_ef10_mem_map_size(efx) /
+				  (efx->vi_stride * EFX_TXQ_TYPES));
+	efx->max_tx_channels = efx->max_channels;
+	if (WARN_ON(efx->max_channels == 0)) {
+		rc = -EIO;
+		goto fail5;
+	}
+
 	efx->rx_packet_len_offset =
 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 
@@ -695,7 +781,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	if (rc && rc != -EPERM)
 		goto fail5;
 
-	efx_ptp_probe(efx, NULL);
+	efx_ptp_defer_probe_with_channel(efx);
 
 #ifdef CONFIG_SFC_SRIOV
 	if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
@@ -865,6 +951,11 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
 
 	/* Link a buffer to each TX queue */
 	efx_for_each_channel(channel, efx) {
+		/* Extra channels, even those with TXQs (PTP), do not require
+		 * PIO resources.
+		 */
+		if (!channel->type->want_pio)
+			continue;
 		efx_for_each_channel_tx_queue(tx_queue, channel) {
 			/* We assign the PIO buffers to queues in
 			 * reverse order to allow for the following
@@ -907,7 +998,7 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
 			} else {
 				tx_queue->piobuf =
 					nic_data->pio_write_base +
-					index * EFX_VI_PAGE_SIZE + offset;
+					index * efx->vi_stride + offset;
 				tx_queue->piobuf_offset = offset;
 				netif_dbg(efx, probe, efx->net_dev,
 					  "linked VI %u to PIO buffer %u offset %x addr %p\n",
@@ -1212,7 +1303,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
 	void __iomem *membase;
 	int rc;
 
-	channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+	channel_vis = max(efx->n_channels,
+			  (efx->n_tx_channels + efx->n_extra_tx_channels) *
+			  EFX_TXQ_TYPES);
 
 #ifdef EFX_USE_PIO
 	/* Try to allocate PIO buffers if wanted and if the full
@@ -1253,19 +1346,19 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
 	 * for writing PIO buffers through.
 	 *
 	 * The UC mapping contains (channel_vis - 1) complete VIs and the
-	 * first half of the next VI.  Then the WC mapping begins with
-	 * the second half of this last VI.
+	 * first 4K of the next VI.  Then the WC mapping begins with
+	 * the remainder of this last VI.
 	 */
-	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
+	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
 				     ER_DZ_TX_PIOBUF);
 	if (nic_data->n_piobufs) {
 		/* pio_write_vi_base rounds down to give the number of complete
 		 * VIs inside the UC mapping.
 		 */
-		pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+		pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
 		wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
 					       nic_data->n_piobufs) *
-					      EFX_VI_PAGE_SIZE) -
+					      efx->vi_stride) -
 				   uc_mem_map_size);
 		max_vis = pio_write_vi_base + nic_data->n_piobufs;
 	} else {
@@ -1337,7 +1430,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
 		nic_data->pio_write_vi_base = pio_write_vi_base;
 		nic_data->pio_write_base =
 			nic_data->wc_membase +
-			(pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+			(pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
 			 uc_mem_map_size);
 
 		rc = efx_ef10_link_piobufs(efx);
@@ -1571,6 +1664,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 	EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
 	EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
 	EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+	EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+	EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+	EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+	EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+	EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+	EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+	EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+	EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+	EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+	EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+	EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+	EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+	EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+	EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+	EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+	EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+	EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+	EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+	EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+	EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+	EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+	EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+	EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |	\
@@ -1646,6 +1762,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 	(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |			\
 	(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
 
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK (						\
+	(1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |		\
+	(1ULL << (EF10_STAT_fec_corrected_errors - 64)) |		\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |	\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |	\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |	\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK (						\
+	(1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_success - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_fallback - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_poison - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_erase - 64)))
+
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
 	u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1684,10 +1837,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
 	if (nic_data->datapath_caps &
 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
 		raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
-		raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+		raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
 	} else {
 		raw_mask[1] = 0;
 	}
+	/* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+		raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+	/* CTPIO stats appear in V3. Only show them on devices that actually
+	 * support CTPIO. Although this driver doesn't use CTPIO others might,
+	 * and we may be reporting the stats for the underlying port.
+	 */
+	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+	    (nic_data->datapath_caps2 &
+	     (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+		raw_mask[1] |= EF10_CTPIO_STAT_MASK;
 
 #if BITS_PER_LONG == 64
 	BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1791,7 +1956,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
 
 	dma_stats = efx->stats_buffer.addr;
 
-	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	generation_end = dma_stats[efx->num_mac_stats - 1];
 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
 		return 0;
 	rmb();
@@ -1839,7 +2004,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
 	__le64 generation_start, generation_end;
 	u64 *stats = nic_data->stats;
-	u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+	u32 dma_len = efx->num_mac_stats * sizeof(u64);
 	struct efx_buffer stats_buf;
 	__le64 *dma_stats;
 	int rc;
@@ -1864,7 +2029,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 	}
 
 	dma_stats = stats_buf.addr;
-	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
 	MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
 	MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1883,7 +2048,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 		goto out;
 	}
 
-	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	generation_end = dma_stats[efx->num_mac_stats - 1];
 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
 		WARN_ON_ONCE(1);
 		goto out;
@@ -1951,8 +2116,9 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
 	} else {
 		unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
 
-		EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
-				     ERF_DZ_TC_TIMER_VAL, ticks);
+		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+				     ERF_DZ_TC_TIMER_VAL, ticks,
+				     ERF_FZ_TC_TMR_REL_VAL, ticks);
 		efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
 				channel->channel);
 	}
@@ -2263,12 +2429,25 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
 	int i;
 	BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
 
+	/* Only attempt to enable TX timestamping if we have the license for it,
+	 * otherwise TXQ init will fail
+	 */
+	if (!(nic_data->licensed_features &
+	      (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
+		tx_queue->timestamping = false;
+		/* Disable sync events on this channel. */
+		if (efx->type->ptp_set_ts_sync_events)
+			efx->type->ptp_set_ts_sync_events(efx, false, false);
+	}
+
 	/* TSOv2 is a limited resource that can only be configured on a limited
 	 * number of queues. TSO without checksum offload is not really a thing,
 	 * so we only enable it for those queues.
+	 * TSOv2 cannot be used with Hardware timestamping.
 	 */
 	if (csum_offload && (nic_data->datapath_caps2 &
-			(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
+			(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
+	    !tx_queue->timestamping) {
 		tso_v2 = true;
 		netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
 				channel->channel);
@@ -2294,14 +2473,16 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
 	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
 
 	do {
-		MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
+		MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
 				/* This flag was removed from mcdi_pcol.h for
 				 * the non-_EXT version of INIT_TXQ.  However,
 				 * firmware still honours it.
 				 */
 				INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
 				INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
-				INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+				INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+				INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+						tx_queue->timestamping);
 
 		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
 					NULL, 0, NULL);
@@ -2327,12 +2508,13 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
 	tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
 	tx_queue->insert_count = 1;
 	txd = efx_tx_desc(tx_queue, 0);
-	EFX_POPULATE_QWORD_4(*txd,
+	EFX_POPULATE_QWORD_5(*txd,
 			     ESF_DZ_TX_DESC_IS_OPT, true,
 			     ESF_DZ_TX_OPTION_TYPE,
 			     ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
 			     ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
-			     ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+			     ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
+			     ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
 	tx_queue->write_count = 1;
 
 	if (tso_v2) {
@@ -3233,8 +3415,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
 		if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
 			     ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
 			       rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
-			      (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
-			       rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+			      (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+			       rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
 			netdev_WARN(efx->net_dev,
 				    "invalid class for RX_TCPUDP_CKSUM_ERR: event="
 				    EFX_QWORD_FMT "\n",
@@ -3271,8 +3453,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
 				    EFX_QWORD_VAL(*event));
 		else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
 				   rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
-				  (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
-				   rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+				  (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+				   rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
 			netdev_WARN(efx->net_dev,
 				    "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
 				    EFX_QWORD_FMT "\n",
@@ -3307,7 +3489,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 	next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
 	rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
 	rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
-	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
 	rx_encap_hdr =
 		nic_data->datapath_caps &
@@ -3385,8 +3567,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 							 rx_l3_class, rx_l4_class,
 							 event);
 	} else {
-		bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
-			      rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+		bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
+			      rx_l4_class == ESE_FZ_L4_CLASS_UDP;
 
 		switch (rx_encap_hdr) {
 		case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
@@ -3407,7 +3589,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 		}
 	}
 
-	if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+	if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
 		flags |= EFX_RX_PKT_TCP;
 
 	channel->irq_mod_score += 2 * n_packets;
@@ -3427,31 +3609,92 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 	return n_packets;
 }
 
-static int
+static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
+{
+	u32 tstamp;
+
+	tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
+	tstamp <<= 16;
+	tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
+
+	return tstamp;
+}
+
+static void
 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 {
 	struct efx_nic *efx = channel->efx;
 	struct efx_tx_queue *tx_queue;
 	unsigned int tx_ev_desc_ptr;
 	unsigned int tx_ev_q_label;
-	int tx_descs = 0;
+	unsigned int tx_ev_type;
+	u64 ts_part;
 
 	if (unlikely(READ_ONCE(efx->reset_pending)))
-		return 0;
+		return;
 
 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
-		return 0;
+		return;
 
-	/* Transmit completion */
-	tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+	/* Get the transmit queue */
 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
 	tx_queue = efx_channel_get_tx_queue(channel,
 					    tx_ev_q_label % EFX_TXQ_TYPES);
-	tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
-		    tx_queue->ptr_mask);
-	efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
 
-	return tx_descs;
+	if (!tx_queue->timestamping) {
+		/* Transmit completion */
+		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+		efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+		return;
+	}
+
+	/* Transmit timestamps are only available for 8XXX series. They result
+	 * in three events per packet. These occur in order, and are:
+	 *  - the normal completion event
+	 *  - the low part of the timestamp
+	 *  - the high part of the timestamp
+	 *
+	 * Each part of the timestamp is itself split across two 16 bit
+	 * fields in the event.
+	 */
+	tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
+
+	switch (tx_ev_type) {
+	case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
+		/* In case of Queue flush or FLR, we might have received
+		 * the previous TX completion event but not the Timestamp
+		 * events.
+		 */
+		if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
+			efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+
+		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
+						 ESF_DZ_TX_DESCR_INDX);
+		tx_queue->completed_desc_ptr =
+					tx_ev_desc_ptr & tx_queue->ptr_mask;
+		break;
+
+	case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
+		ts_part = efx_ef10_extract_event_ts(event);
+		tx_queue->completed_timestamp_minor = ts_part;
+		break;
+
+	case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
+		ts_part = efx_ef10_extract_event_ts(event);
+		tx_queue->completed_timestamp_major = ts_part;
+
+		efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+		tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+		break;
+
+	default:
+		netif_err(efx, hw, efx->net_dev,
+			  "channel %d unknown tx event type %d (data "
+			  EFX_QWORD_FMT ")\n",
+			  channel->channel, tx_ev_type,
+			  EFX_QWORD_VAL(*event));
+		break;
+	}
 }
 
 static void
@@ -3513,7 +3756,6 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
 	efx_qword_t event, *p_event;
 	unsigned int read_ptr;
 	int ev_code;
-	int tx_descs = 0;
 	int spent = 0;
 
 	if (quota <= 0)
@@ -3553,13 +3795,7 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
 			}
 			break;
 		case ESE_DZ_EV_CODE_TX_EV:
-			tx_descs += efx_ef10_handle_tx_event(channel, &event);
-			if (tx_descs > efx->txq_entries) {
-				spent = quota;
-				goto out;
-			} else if (++spent == quota) {
-				goto out;
-			}
+			efx_ef10_handle_tx_event(channel, &event);
 			break;
 		case ESE_DZ_EV_CODE_DRIVER_EV:
 			efx_ef10_handle_driver_event(channel, &event);
@@ -6034,7 +6270,8 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
 	      efx_ef10_rx_enable_timestamping :
 	      efx_ef10_rx_disable_timestamping;
 
-	efx_for_each_channel(channel, efx) {
+	channel = efx_ptp_channel(efx);
+	if (channel) {
 		int rc = set(channel, temp);
 		if (en && rc != 0) {
 			efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
@@ -6392,7 +6629,7 @@ out_unlock:
 
 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
 	.is_vf = true,
-	.mem_bar = EFX_MEM_VF_BAR,
+	.mem_bar = efx_ef10_vf_mem_bar,
 	.mem_map_size = efx_ef10_mem_map_size,
 	.probe = efx_ef10_probe_vf,
 	.remove = efx_ef10_remove,
@@ -6500,7 +6737,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
 
 const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.is_vf = false,
-	.mem_bar = EFX_MEM_BAR,
+	.mem_bar = efx_ef10_pf_mem_bar,
 	.mem_map_size = efx_ef10_mem_map_size,
 	.probe = efx_ef10_probe_pf,
 	.remove = efx_ef10_remove,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 2c4bf9476c37..6a56778cf06c 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare network controllers and boards
- * Copyright 2012-2015 Solarflare Communications Inc.
+ * Copyright 2012-2017 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -79,6 +79,8 @@
 #define	ER_DZ_EVQ_TMR 0x00000420
 #define	ER_DZ_EVQ_TMR_STEP 8192
 #define	ER_DZ_EVQ_TMR_ROWS 2048
+#define	ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define	ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
 #define	ERF_DZ_TC_TIMER_MODE_LBN 14
 #define	ERF_DZ_TC_TIMER_MODE_WIDTH 2
 #define	ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -159,16 +161,24 @@
 #define	ESF_DZ_RX_EV_SOFT2_WIDTH 2
 #define	ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
 #define	ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define	ESF_DZ_RX_L4_CLASS_LBN 45
-#define	ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define	ESE_DZ_L4_CLASS_RSVD7 7
-#define	ESE_DZ_L4_CLASS_RSVD6 6
-#define	ESE_DZ_L4_CLASS_RSVD5 5
-#define	ESE_DZ_L4_CLASS_RSVD4 4
-#define	ESE_DZ_L4_CLASS_RSVD3 3
-#define	ESE_DZ_L4_CLASS_UDP 2
-#define	ESE_DZ_L4_CLASS_TCP 1
-#define	ESE_DZ_L4_CLASS_UNKNOWN 0
+#define	ESF_DE_RX_L4_CLASS_LBN 45
+#define	ESF_DE_RX_L4_CLASS_WIDTH 3
+#define	ESE_DE_L4_CLASS_RSVD7 7
+#define	ESE_DE_L4_CLASS_RSVD6 6
+#define	ESE_DE_L4_CLASS_RSVD5 5
+#define	ESE_DE_L4_CLASS_RSVD4 4
+#define	ESE_DE_L4_CLASS_RSVD3 3
+#define	ESE_DE_L4_CLASS_UDP 2
+#define	ESE_DE_L4_CLASS_TCP 1
+#define	ESE_DE_L4_CLASS_UNKNOWN 0
+#define	ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define	ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define	ESF_FZ_RX_L4_CLASS_LBN 45
+#define	ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define	ESE_FZ_L4_CLASS_RSVD3 3
+#define	ESE_FZ_L4_CLASS_UDP 2
+#define	ESE_FZ_L4_CLASS_TCP 1
+#define	ESE_FZ_L4_CLASS_UNKNOWN 0
 #define	ESF_DZ_RX_L3_CLASS_LBN 42
 #define	ESF_DZ_RX_L3_CLASS_WIDTH 3
 #define	ESE_DZ_L3_CLASS_RSVD7 7
@@ -215,6 +225,8 @@
 #define	ESF_EZ_RX_ABORT_WIDTH 1
 #define	ESF_DZ_RX_ECC_ERR_LBN 29
 #define	ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define	ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define	ESF_DZ_RX_TRUNC_ERR_WIDTH 1
 #define	ESF_DZ_RX_CRC1_ERR_LBN 28
 #define	ESF_DZ_RX_CRC1_ERR_WIDTH 1
 #define	ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -332,6 +344,8 @@
 #define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
 #define	ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
 #define	ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
 #define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
 #define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
 #define	ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -341,7 +355,7 @@
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
 
-/* TX_TSO_FATSO2A_DESC */
+/* TX_TSO_V2_DESC_A */
 #define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
 #define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
 #define	ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -360,8 +374,7 @@
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
 
-
-/* TX_TSO_FATSO2B_DESC */
+/* TX_TSO_V2_DESC_B */
 #define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
 #define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
 #define	ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -375,11 +388,10 @@
 #define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
 #define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
 #define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define	ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
-#define	ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
 #define	ESF_DZ_TX_TSO_TCP_MSS_LBN 32
 #define	ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-
+#define	ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define	ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
 
 /*************************************************************************/
 
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e3c492fcaff0..16757cfc5b29 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -27,6 +27,7 @@
 #include <net/udp_tunnel.h>
 #include "efx.h"
 #include "nic.h"
+#include "io.h"
 #include "selftest.h"
 #include "sriov.h"
 
@@ -895,12 +896,20 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 }
 
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+	return channel->channel - channel->efx->tx_channel_offset <
+		channel->efx->n_tx_channels;
+}
+
 static const struct efx_channel_type efx_default_channel_type = {
 	.pre_probe		= efx_channel_dummy_op_int,
 	.post_remove		= efx_channel_dummy_op_void,
 	.get_name		= efx_get_channel_name,
 	.copy			= efx_copy_channel,
+	.want_txqs		= efx_default_channel_want_txqs,
 	.keep_eventq		= false,
+	.want_pio		= true,
 };
 
 int efx_channel_dummy_op_int(struct efx_channel *channel)
@@ -952,31 +961,42 @@ void efx_link_status_changed(struct efx_nic *efx)
 		netif_info(efx, link, efx->net_dev, "link down\n");
 }
 
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+			      const unsigned long *advertising)
 {
-	efx->link_advertising = advertising;
-	if (advertising) {
-		if (advertising & ADVERTISED_Pause)
-			efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
-		else
-			efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
-		if (advertising & ADVERTISED_Asym_Pause)
-			efx->wanted_fc ^= EFX_FC_TX;
-	}
+	memcpy(efx->link_advertising, advertising,
+	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+	efx->link_advertising[0] |= ADVERTISED_Autoneg;
+	if (advertising[0] & ADVERTISED_Pause)
+		efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+	else
+		efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+	if (advertising[0] & ADVERTISED_Asym_Pause)
+		efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+	bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+	efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
 }
 
 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
 {
 	efx->wanted_fc = wanted_fc;
-	if (efx->link_advertising) {
+	if (efx->link_advertising[0]) {
 		if (wanted_fc & EFX_FC_RX)
-			efx->link_advertising |= (ADVERTISED_Pause |
-						  ADVERTISED_Asym_Pause);
+			efx->link_advertising[0] |= (ADVERTISED_Pause |
+						     ADVERTISED_Asym_Pause);
 		else
-			efx->link_advertising &= ~(ADVERTISED_Pause |
-						   ADVERTISED_Asym_Pause);
+			efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+						      ADVERTISED_Asym_Pause);
 		if (wanted_fc & EFX_FC_TX)
-			efx->link_advertising ^= ADVERTISED_Asym_Pause;
+			efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
 	}
 }
 
@@ -1248,7 +1268,7 @@ static int efx_init_io(struct efx_nic *efx)
 
 	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
 
-	bar = efx->type->mem_bar;
+	bar = efx->type->mem_bar(efx);
 
 	rc = pci_enable_device(pci_dev);
 	if (rc) {
@@ -1323,7 +1343,7 @@ static void efx_fini_io(struct efx_nic *efx)
 	}
 
 	if (efx->membase_phys) {
-		bar = efx->type->mem_bar;
+		bar = efx->type->mem_bar(efx);
 		pci_release_region(efx->pci_dev, bar);
 		efx->membase_phys = 0;
 	}
@@ -1489,6 +1509,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
 	}
 
 	/* Assign extra channels if possible */
+	efx->n_extra_tx_channels = 0;
 	j = efx->n_channels;
 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
 		if (!efx->extra_channel_type[i])
@@ -1500,6 +1521,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
 			--j;
 			efx_get_channel(efx, j)->type =
 				efx->extra_channel_type[i];
+			if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+				efx->n_extra_tx_channels++;
 		}
 	}
 
@@ -2909,6 +2932,10 @@ static const struct pci_device_id efx_pci_table[] = {
 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 	{0}			/* end of list */
 };
 
@@ -2977,6 +3004,9 @@ static int efx_init_struct(struct efx_nic *efx,
 	efx->rx_packet_ts_offset =
 		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
 	spin_lock_init(&efx->stats_lock);
+	efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+	efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+	BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
 	mutex_init(&efx->mac_lock);
 	efx->phy_op = &efx_dummy_phy_operations;
 	efx->mdio.dev = net_dev;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 52c84b782901..0cddc5ad77b1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,11 +14,6 @@
 #include "net_driver.h"
 #include "filter.h"
 
-/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
-/* All VFs use BAR 0/1 for memory */
-#define EFX_MEM_BAR 2
-#define EFX_MEM_VF_BAR 0
-
 int efx_net_open(struct net_device *net_dev);
 int efx_net_stop(struct net_device *net_dev);
 
@@ -263,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
 }
 
 void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+			      const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
 void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 
 static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b5644110..4db2dc2bf52f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 		goto out;
 	}
 
-	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
 		netif_dbg(efx, drv, efx->net_dev,
 			  "Autonegotiation is disabled\n");
 		rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 	    (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
 		efx->type->prepare_enable_fc_tx(efx);
 
-	old_adv = efx->link_advertising;
+	old_adv = efx->link_advertising[0];
 	old_fc = efx->wanted_fc;
 	efx_link_set_wanted_fc(efx, wanted_fc);
-	if (efx->link_advertising != old_adv ||
+	if (efx->link_advertising[0] != old_adv ||
 	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
 		rc = efx->phy_op->reconfigure(efx);
 		if (rc) {
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5334dc83d926..266b9bee1f3a 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -818,17 +818,16 @@ static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
  * The NIC batches TX completion events; the message we receive is of
  * the form "complete all TX events up to this index".
  */
-static int
+static void
 efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 {
 	unsigned int tx_ev_desc_ptr;
 	unsigned int tx_ev_q_label;
 	struct efx_tx_queue *tx_queue;
 	struct efx_nic *efx = channel->efx;
-	int tx_packets = 0;
 
 	if (unlikely(READ_ONCE(efx->reset_pending)))
-		return 0;
+		return;
 
 	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
 		/* Transmit completion */
@@ -836,8 +835,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 		tx_queue = efx_channel_get_tx_queue(
 			channel, tx_ev_q_label % EFX_TXQ_TYPES);
-		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
-			      tx_queue->ptr_mask);
 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
 		/* Rewrite the FIFO write pointer */
@@ -856,8 +853,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 			  EFX_QWORD_FMT"\n", channel->channel,
 			  EFX_QWORD_VAL(*event));
 	}
-
-	return tx_packets;
 }
 
 /* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -1090,7 +1085,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 	int qid;
 
 	qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
-	if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+	if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
 		tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
 					    qid % EFX_TXQ_TYPES);
 		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
@@ -1270,7 +1265,6 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
 	unsigned int read_ptr;
 	efx_qword_t event, *p_event;
 	int ev_code;
-	int tx_packets = 0;
 	int spent = 0;
 
 	if (budget <= 0)
@@ -1304,12 +1298,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
 				goto out;
 			break;
 		case FSE_AZ_EV_CODE_TX_EV:
-			tx_packets += efx_farch_handle_tx_event(channel,
-								&event);
-			if (tx_packets > efx->txq_entries) {
-				spent = budget;
-				goto out;
-			}
+			efx_farch_handle_tx_event(channel, &event);
 			break;
 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
 			efx_farch_handle_generated_event(channel, &event);
@@ -1680,20 +1669,21 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
  */
 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
 {
-	unsigned vi_count, buftbl_min;
+	unsigned vi_count, buftbl_min, total_tx_channels;
 
 #ifdef CONFIG_SFC_SRIOV
 	struct siena_nic_data *nic_data = efx->nic_data;
 #endif
 
+	total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
 	/* Account for the buffer table entries backing the datapath channels
 	 * and the descriptor caches for those channels.
 	 */
 	buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
-		       efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+		       total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
 		       efx->n_channels * EFX_MAX_EVQ_SIZE)
 		      * sizeof(efx_qword_t) / EFX_BUF_SIZE);
-	vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+	vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
 
 #ifdef CONFIG_SFC_SRIOV
 	if (efx->type->sriov_wanted) {
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index afb94aa2c15e..89563170af52 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -222,18 +222,21 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
 	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
 }
 
-/* Page size used as step between per-VI registers */
-#define EFX_VI_PAGE_SIZE 0x2000
+/* default VI stride (step between per-VI registers) is 8K */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
 
 /* Calculate offset to page-mapped register */
-#define EFX_PAGED_REG(page, reg) \
-	((page) * EFX_VI_PAGE_SIZE + (reg))
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+					 unsigned int reg)
+{
+	return page * efx->vi_stride + reg;
+}
 
 /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
 static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
 				    unsigned int reg, unsigned int page)
 {
-	reg = EFX_PAGED_REG(page, reg);
+	reg = efx_paged_reg(efx, page, reg);
 
 	netif_vdbg(efx, hw, efx->net_dev,
 		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
@@ -262,7 +265,7 @@ static inline void
 _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
 		 unsigned int reg, unsigned int page)
 {
-	efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+	efx_writed(efx, value, efx_paged_reg(efx, page, reg));
 }
 #define efx_writed_page(efx, value, reg, page)				\
 	_efx_writed_page(efx, value,					\
@@ -288,10 +291,10 @@ static inline void _efx_writed_page_locked(struct efx_nic *efx,
 
 	if (page == 0) {
 		spin_lock_irqsave(&efx->biu_lock, flags);
-		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
 		spin_unlock_irqrestore(&efx->biu_lock, flags);
 	} else {
-		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
 	}
 }
 #define efx_writed_page_locked(efx, value, reg, page)			\
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 154ef41d1927..ebd95972ae7b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -208,6 +208,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define _MCDI_DWORD(_buf, _field)					\
 	((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
 
+#define MCDI_BYTE(_buf, _field)						\
+	((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1),	\
+	 *MCDI_PTR(_buf, _field))
 #define MCDI_WORD(_buf, _field)						\
 	((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) +	\
 	 le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 91fb54fd03d9..869d76f8f589 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -114,6 +114,8 @@
 #define MCDI_HEADER_XFLAGS_WIDTH 8
 /* Request response using event */
 #define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
 
 /* Maximum number of payload bytes */
 #define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
@@ -123,7 +125,7 @@
 
 
 /* The MC can generate events for two reasons:
- *   - To complete a shared memory request if XFLAGS_EVREQ was set
+ *   - To advance a shared memory request if XFLAGS_EVREQ was set
  *   - As a notification (link state, i2c event), controlled
  *     via MC_CMD_LOG_CTRL
  *
@@ -279,6 +281,17 @@
 /* Returned by MC_CMD_TESTASSERT if the action that should
  * have caused an assertion failed to do so.  */
 #define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away.  This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away.  This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
 
 #define MC_CMD_ERR_CODE_OFST 0
 
@@ -360,6 +373,7 @@
 /* enum: Fatal. */
 #define          MCDI_EVENT_LEVEL_FATAL 0x3
 #define       MCDI_EVENT_DATA_OFST 0
+#define       MCDI_EVENT_DATA_LEN 4
 #define        MCDI_EVENT_CMDDONE_SEQ_LBN 0
 #define        MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
 #define        MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -370,6 +384,8 @@
 #define        MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN  0x0
 /* enum: 100Mbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_100M  0x1
 /* enum: 1Gbs */
@@ -378,6 +394,12 @@
 #define          MCDI_EVENT_LINKCHANGE_SPEED_10G  0x3
 /* enum: 40Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_40G  0x4
+/* enum: 25Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_25G  0x5
+/* enum: 50Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_50G  0x6
+/* enum: 100Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_100G  0x7
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
 #define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -456,8 +478,63 @@
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
 /* enum: PTP status update */
 #define          MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define          MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define          MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define          MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define          MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define          MCDI_EVENT_AOE_FC_RUNNING 0x14
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
 #define        MCDI_EVENT_RX_ERR_TYPE_LBN 12
@@ -480,6 +557,22 @@
 #define          MCDI_EVENT_MUM_WATCHDOG 0x3
 #define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
 #define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_DBRET_SEQ_LBN 0
+#define        MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define        MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define          MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define          MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define          MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define          MCDI_EVENT_SUC_WATCHDOG 0x4
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define        MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define        MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
@@ -552,73 +645,99 @@
  * been processed and it may now resend the command
  */
 #define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define          MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define          MCDI_EVENT_CODE_SUC 0x1f
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 #define          MCDI_EVENT_CODE_TESTGEN  0xfa
 #define       MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define       MCDI_EVENT_CMDDONE_DATA_LEN 4
 #define       MCDI_EVENT_CMDDONE_DATA_LBN 0
 #define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
 #define       MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_DATA_LEN 4
 #define       MCDI_EVENT_LINKCHANGE_DATA_LBN 0
 #define       MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
 #define       MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define       MCDI_EVENT_SENSOREVT_DATA_LEN 4
 #define       MCDI_EVENT_SENSOREVT_DATA_LBN 0
 #define       MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
 #define       MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_TX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_TX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_TX_ERR_DATA_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_SECONDS_OFST 0
+#define       MCDI_EVENT_PTP_SECONDS_LEN 4
 #define       MCDI_EVENT_PTP_SECONDS_LBN 0
 #define       MCDI_EVENT_PTP_SECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_MAJOR_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
  * of timestamp
  */
 #define       MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define       MCDI_EVENT_PTP_NANOSECONDS_LEN 4
 #define       MCDI_EVENT_PTP_NANOSECONDS_LBN 0
 #define       MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MINOR_OFST 0
+#define       MCDI_EVENT_PTP_MINOR_LEN 4
 #define       MCDI_EVENT_PTP_MINOR_LBN 0
 #define       MCDI_EVENT_PTP_MINOR_WIDTH 32
 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
  */
 #define       MCDI_EVENT_PTP_UUID_OFST 0
+#define       MCDI_EVENT_PTP_UUID_LEN 4
 #define       MCDI_EVENT_PTP_UUID_LBN 0
 #define       MCDI_EVENT_PTP_UUID_WIDTH 32
 #define       MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_RX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_RX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_RX_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_PAR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_PAR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
 /* For CODE_PTP_TIME events, the major value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
 /* For CODE_PTP_TIME events where report sync status is enabled, indicates
  * whether the NIC clock has ever been set
  */
@@ -634,10 +753,17 @@
  */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
 /* Zero means that the request has been completed or authorized, and the driver
@@ -646,6 +772,10 @@
  */
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define       MCDI_EVENT_DBRET_DATA_OFST 0
+#define       MCDI_EVENT_DBRET_DATA_LEN 4
+#define       MCDI_EVENT_DBRET_DATA_LBN 0
+#define       MCDI_EVENT_DBRET_DATA_WIDTH 32
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
@@ -662,6 +792,7 @@
 /* enum: Fatal. */
 #define          FCDI_EVENT_LEVEL_FATAL 0x3
 #define       FCDI_EVENT_DATA_OFST 0
+#define       FCDI_EVENT_DATA_LEN 4
 #define        FCDI_EVENT_LINK_STATE_STATUS_LBN 0
 #define        FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
 #define          FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -701,6 +832,7 @@
 #define          FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
 #define          FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
 #define       FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -708,12 +840,15 @@
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define       FCDI_EVENT_LINK_STATE_DATA_LEN 4
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
 #define       FCDI_EVENT_PTP_STATE_OFST 0
+#define       FCDI_EVENT_PTP_STATE_LEN 4
 #define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
 #define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
 #define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -722,6 +857,7 @@
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
 /* Index of MC port being referred to */
@@ -729,9 +865,11 @@
 #define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
 /* FC Port index that matches the MC port index in SRC */
 #define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
 #define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
 #define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 #define       FCDI_EVENT_BOOT_RESULT_OFST 0
+#define       FCDI_EVENT_BOOT_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
 #define       FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -748,14 +886,17 @@
 #define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
 /* Number of timestamps following */
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
 /* Seconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
 /* Nanoseconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
 /* Timestamp records comprising the event */
@@ -783,6 +924,7 @@
 /* enum: Fatal. */
 #define          MUM_EVENT_LEVEL_FATAL 0x3
 #define       MUM_EVENT_DATA_OFST 0
+#define       MUM_EVENT_DATA_LEN 4
 #define        MUM_EVENT_SENSOR_ID_LBN 0
 #define        MUM_EVENT_SENSOR_ID_WIDTH 8
 /*             Enum values, see field(s): */
@@ -820,18 +962,23 @@
 /* enum: Link fault has been asserted, or has cleared. */
 #define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
 #define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LEN 4
 #define       MUM_EVENT_SENSOR_DATA_LBN 0
 #define       MUM_EVENT_SENSOR_DATA_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LEN 4
 #define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
 #define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LEN 4
 #define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
 #define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define       MUM_EVENT_PORT_PHY_TECH_LEN 4
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -864,7 +1011,9 @@
 /* MC_CMD_READ32_IN msgrequest */
 #define    MC_CMD_READ32_IN_LEN 8
 #define       MC_CMD_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define       MC_CMD_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_READ32_OUT msgresponse */
 #define    MC_CMD_READ32_OUT_LENMIN 4
@@ -882,13 +1031,14 @@
  */
 #define MC_CMD_WRITE32 0x2
 
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
 #define    MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_OFST 4
 #define       MC_CMD_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -915,6 +1065,7 @@
  * is a bitfield, with each bit as documented below.
  */
 #define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -940,9 +1091,12 @@
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define       MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define       MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
 /* Address of where to jump after copy. */
 #define       MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define       MC_CMD_COPYCODE_IN_JUMP_LEN 4
 /* enum: Control should return to the caller rather than jumping */
 #define          MC_CMD_COPYCODE_JUMP_NONE 0x1
 
@@ -956,12 +1110,13 @@
  */
 #define MC_CMD_SET_FUNC 0x4
 
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_FUNC_IN msgrequest */
 #define    MC_CMD_SET_FUNC_IN_LEN 4
 /* Set function */
 #define       MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define       MC_CMD_SET_FUNC_IN_FUNC_LEN 4
 
 /* MC_CMD_SET_FUNC_OUT msgresponse */
 #define    MC_CMD_SET_FUNC_OUT_LEN 0
@@ -973,7 +1128,7 @@
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
 
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
 #define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -982,9 +1137,11 @@
 #define    MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
 /* ?? */
 #define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
 /* enum: indicates that the MC wasn't flash booted */
 #define          MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL  0xdeadbeef
 #define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1007,11 +1164,13 @@
 #define    MC_CMD_GET_ASSERTS_IN_LEN 4
 /* Set to clear assertion */
 #define       MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define       MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
 
 /* MC_CMD_GET_ASSERTS_OUT msgresponse */
 #define    MC_CMD_GET_ASSERTS_OUT_LEN 140
 /* Assertion status flag. */
 #define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
 /* enum: No assertions have failed. */
 #define          MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
 /* enum: A system-level assertion has failed. */
@@ -1024,6 +1183,7 @@
 #define          MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
 /* Failing PC value */
 #define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
 /* Saved GP regs */
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1034,7 +1194,9 @@
 #define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
 
 
 /***********************************/
@@ -1050,12 +1212,14 @@
 #define    MC_CMD_LOG_CTRL_IN_LEN 8
 /* Log destination */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
 /* enum: UART. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
 /* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
 #define    MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1076,23 +1240,29 @@
 #define    MC_CMD_GET_VERSION_EXT_IN_LEN 4
 /* placeholder, set to 0 */
 #define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
 
 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
 #define    MC_CMD_GET_VERSION_V0_OUT_LEN 4
 #define       MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define       MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
 /* enum: Reserved version number to indicate "any" version. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
 /* enum: Bootrom version value for Siena. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
 /* enum: Bootrom version value for Huntington. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
 
 /* MC_CMD_GET_VERSION_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_OUT_LEN 32
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1104,9 +1274,11 @@
 /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_EXT_OUT_LEN 48
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1136,41 +1308,54 @@
 #define          MC_CMD_PTP_OP_ENABLE 0x1
 /* enum: Disable PTP packet timestamping operation. */
 #define          MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
 #define          MC_CMD_PTP_OP_TRANSMIT 0x3
 /* enum: Read the current NIC time. */
 #define          MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
 #define          MC_CMD_PTP_OP_STATUS 0x5
 /* enum: Adjust the PTP NIC's time. */
 #define          MC_CMD_PTP_OP_ADJUST 0x6
 /* enum: Synchronize host and NIC time. */
 #define          MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
 /* enum: Reset some of the PTP related statistics */
 #define          MC_CMD_PTP_OP_RESET_STATS 0xa
 /* enum: Debug operations to MC. */
 #define          MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAWRITE 0xd
 /* enum: Apply an offset to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
 #define          MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
 #define          MC_CMD_PTP_OP_RST_CLK 0x14
 /* enum: Enable the forwarding of PPS events to the host */
 #define          MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -1191,7 +1376,7 @@
 /* enum: Unsubscribe to stop receiving time events */
 #define          MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
 /* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -1204,11 +1389,15 @@
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
 #define       MC_CMD_PTP_IN_CMD_OFST 0
+#define       MC_CMD_PTP_IN_CMD_LEN 4
 #define       MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define       MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define       MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
 #define       MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define       MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
 /* enum: PTP, version 1 */
 #define          MC_CMD_PTP_MODE_V1 0x0
 /* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -1225,16 +1414,21 @@
 /* MC_CMD_PTP_IN_DISABLE msgrequest */
 #define    MC_CMD_PTP_IN_DISABLE_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Transmit packet length */
 #define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
 /* Transmit packet data */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -1244,17 +1438,30 @@
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define    MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_STATUS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_ADJUST_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -1262,21 +1469,67 @@
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
 /* enum: Number of fractional bits in frequency adjustment */
 #define          MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define          MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/*               MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
 #define    MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of time readings to capture */
 #define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
 /* Host address in which to write "synchronization started" indication (64
  * bits)
  */
@@ -1288,42 +1541,59 @@
 /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Enable or disable packet testing */
 #define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_RESET_STATS msgrequest */
 #define    MC_CMD_PTP_IN_RESET_STATS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset PTP statistics */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_DEBUG msgrequest */
 #define    MC_CMD_PTP_IN_DEBUG_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Debug operations */
 #define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */
 #define    MC_CMD_PTP_IN_FPGAREAD_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
 #define    MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -1332,34 +1602,67 @@
 /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
 
 /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of VLAN tags, 0 if not VLAN */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
 /* Set of VLAN tags to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -1368,9 +1671,12 @@
 /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable UUID filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
 /* UUID to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -1380,18 +1686,25 @@
 /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable Domain filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
 /* Domain number to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
 
 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
 #define    MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Set the clock source. */
 #define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
 /* enum: External. */
@@ -1400,42 +1713,56 @@
 /* MC_CMD_PTP_IN_RST_CLK msgrequest */
 #define    MC_CMD_PTP_IN_RST_CLK_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset value of Timer Reg. */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Enable or disable */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define       MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
 /* enum: Enable */
 #define          MC_CMD_PTP_ENABLE_PPS 0x0
 /* enum: Disable */
 #define          MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
 
 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
 #define    MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -1444,29 +1771,39 @@
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Unsubscribe options */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
 /* enum: Unsubscribe a single queue */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
 /* enum: Unsubscribe all queues */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
 /* Event queue ID */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
 
 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* NIC - Host System Clock Synchronization status */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
 /* enum: Host System clock and NIC clock are not in sync */
 #define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
 /* enum: Host System clock and NIC clock are synchronized */
@@ -1475,8 +1812,11 @@
  * no longer in sync.
  */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
 
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
@@ -1485,12 +1825,16 @@
 #define    MC_CMD_PTP_OUT_TRANSMIT_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
 
 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
 #define    MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -1502,47 +1846,85 @@
 #define    MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define    MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_OUT_STATUS msgresponse */
 #define    MC_CMD_PTP_OUT_STATUS_LEN 64
 /* Frequency of NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
 /* Number of packets transmitted and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
 /* Number of packets received and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define       MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
 /* Number of packets timestamped by the FPGA */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
 /* Number of packets filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define       MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
 /* Number of packets not filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
 /* Number of PPS overflows (noise on input?) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
 /* Number of PPS bad periods */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
 /* Minimum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
 /* Maximum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
 /* Last period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
 /* Mean period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
 /* Minimum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
 /* Maximum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
 /* Last offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
 /* Mean offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
 
 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -1555,23 +1937,31 @@
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
 /* Host time immediately before NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
 /* Host time immediately after NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
 /* Number of nanoseconds waited after reading NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
 /* enum: Successful test */
 #define          MC_CMD_PTP_MANF_SUCCESS 0x0
 /* enum: FPGA load failed */
@@ -1604,15 +1994,19 @@
 #define          MC_CMD_PTP_MANF_CLOCK_READ 0xe
 /* Presence of external oscillator */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
 /* Number of packets received by FPGA */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
 /* Number of packets received by Siena filters */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
 
 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
 #define    MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -1628,9 +2022,11 @@
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
  */
 #define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -1646,12 +2042,16 @@
  * be assumed.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
 /* enum: Major register has units of seconds, minor 2^-27s per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
 /* Minimum acceptable value for a corrected synchronization timeset. When
  * comparing host and NIC clock times, the MC returns a set of samples that
  * contain the host start and end time, the MC time when the host start was
@@ -1660,46 +2060,66 @@
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
 /* Various PTP capabilities */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
@@ -1713,14 +2133,17 @@
  */
 #define MC_CMD_CSR_READ32 0xc
 
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_READ32_IN msgrequest */
 #define    MC_CMD_CSR_READ32_IN_LEN 12
 /* Address */
 #define       MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_READ32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define       MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_CSR_READ32_OUT msgresponse */
 #define    MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -1739,7 +2162,7 @@
  */
 #define MC_CMD_CSR_WRITE32 0xd
 
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -1747,7 +2170,9 @@
 #define    MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
 /* Address */
 #define       MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -1756,6 +2181,7 @@
 /* MC_CMD_CSR_WRITE32_OUT msgresponse */
 #define    MC_CMD_CSR_WRITE32_OUT_LEN 4
 #define       MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -1776,6 +2202,7 @@
  * sensors.
  */
 #define       MC_CMD_HP_IN_SUBCMD_OFST 0
+#define       MC_CMD_HP_IN_SUBCMD_LEN 4
 /* enum: OCSD (Option Card Sensor Data) sub-command. */
 #define          MC_CMD_HP_IN_OCSD_SUBCMD 0x0
 /* enum: Last known valid HP sub-command. */
@@ -1790,10 +2217,12 @@
  * NULL.)
  */
 #define       MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define       MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
 
 /* MC_CMD_HP_OUT msgresponse */
 #define    MC_CMD_HP_OUT_LEN 4
 #define       MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define       MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
 /* enum: OCSD stopped for this card. */
 #define          MC_CMD_HP_OUT_OCSD_STOPPED 0x1
 /* enum: OCSD was successfully started with the address provided. */
@@ -1838,29 +2267,35 @@
  * external devices.
  */
 #define       MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_READ_IN_BUS_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_MDIO_BUS_INTERNAL 0x0
 /* enum: External. */
 #define          MC_CMD_MDIO_BUS_EXTERNAL 0x1
 /* Port address */
 #define       MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 #define          MC_CMD_MDIO_CLAUSE22 0x20
 /* Address */
 #define       MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_READ_IN_ADDR_LEN 4
 
 /* MC_CMD_MDIO_READ_OUT msgresponse */
 #define    MC_CMD_MDIO_READ_OUT_LEN 8
 /* Value */
 #define       MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define       MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
 /* Status the MDIO commands return the raw status bits from the MDIO block. A
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define       MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
 /* enum: Good. */
 #define          MC_CMD_MDIO_STATUS_GOOD 0x8
 
@@ -1879,22 +2314,27 @@
  * external devices.
  */
 #define       MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
 /* enum: Internal. */
 /*               MC_CMD_MDIO_BUS_INTERNAL 0x0 */
 /* enum: External. */
 /*               MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
 /* Port address */
 #define       MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 /*               MC_CMD_MDIO_CLAUSE22 0x20 */
 /* Address */
 #define       MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define       MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
 
 /* MC_CMD_MDIO_WRITE_OUT msgresponse */
 #define    MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -1902,6 +2342,7 @@
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
 /* enum: Good. */
 /*               MC_CMD_MDIO_STATUS_GOOD 0x8 */
 
@@ -1912,7 +2353,7 @@
  */
 #define MC_CMD_DBI_WRITE 0x12
 
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -1932,9 +2373,11 @@
 /* MC_CMD_DBIWROP_TYPEDEF structuredef */
 #define    MC_CMD_DBIWROP_TYPEDEF_LEN 12
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -1944,6 +2387,7 @@
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
 
@@ -1959,13 +2403,16 @@
 #define    MC_CMD_PORT_READ32_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ32_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ32_OUT msgresponse */
 #define    MC_CMD_PORT_READ32_OUT_LEN 8
 /* Value */
 #define       MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define       MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
 /* Status */
 #define       MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define       MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -1979,13 +2426,16 @@
 #define    MC_CMD_PORT_WRITE32_IN_LEN 8
 /* Address */
 #define       MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define       MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
 
 /* MC_CMD_PORT_WRITE32_OUT msgresponse */
 #define    MC_CMD_PORT_WRITE32_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -1999,6 +2449,7 @@
 #define    MC_CMD_PORT_READ128_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ128_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ128_OUT msgresponse */
 #define    MC_CMD_PORT_READ128_OUT_LEN 20
@@ -2007,6 +2458,7 @@
 #define       MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
 /* Status */
 #define       MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define       MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -2020,6 +2472,7 @@
 #define    MC_CMD_PORT_WRITE128_IN_LEN 20
 /* Address */
 #define       MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -2028,6 +2481,7 @@
 #define    MC_CMD_PORT_WRITE128_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
 
 /* MC_CMD_CAPABILITIES structuredef */
 #define    MC_CMD_CAPABILITIES_LEN 4
@@ -2072,24 +2526,54 @@
 #define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
 #define    MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
  */
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -2103,7 +2587,7 @@
  */
 #define MC_CMD_DBI_READX 0x19
 
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
@@ -2130,9 +2614,11 @@
 /* MC_CMD_DBIRDOP_TYPEDEF structuredef */
 #define    MC_CMD_DBIRDOP_TYPEDEF_LEN 8
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -2149,7 +2635,7 @@
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
 
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_RAND_SEED_IN msgrequest */
 #define    MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -2198,14 +2684,17 @@
 #define    MC_CMD_DRV_ATTACH_IN_LEN 12
 /* new state to set if UPDATE=1 */
 #define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
 #define        MC_CMD_DRV_ATTACH_LBN 0
 #define        MC_CMD_DRV_ATTACH_WIDTH 1
 #define        MC_CMD_DRV_PREBOOT_LBN 1
 #define        MC_CMD_DRV_PREBOOT_WIDTH 1
 /* 1 to set new state, or 0 to just report the existing state */
 #define       MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
 /* preferred datapath firmware (for Huntington; ignored for Siena) */
 #define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
 /* enum: Prefer to use full featured firmware */
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
@@ -2229,13 +2718,16 @@
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
 
 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
 /* Flags associated with this function */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
 /* enum: Labels the lowest-numbered function visible to the OS */
 #define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
 /* enum: The function can control the link state of the physical port it is
@@ -2260,6 +2752,7 @@
 #define    MC_CMD_SHMUART_IN_LEN 4
 /* ??? */
 #define       MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define       MC_CMD_SHMUART_IN_FLAG_LEN 4
 
 /* MC_CMD_SHMUART_OUT msgresponse */
 #define    MC_CMD_SHMUART_OUT_LEN 0
@@ -2297,6 +2790,7 @@
  * (TBD).
  */
 #define       MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define       MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
 
@@ -2314,8 +2808,10 @@
 #define    MC_CMD_PCIE_CREDITS_IN_LEN 8
 /* poll period. 0 is disabled */
 #define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
 /* wipe statistics */
 #define       MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define       MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
 
 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */
 #define    MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -2346,31 +2842,54 @@
 /* MC_CMD_RXD_MONITOR_IN msgrequest */
 #define    MC_CMD_RXD_MONITOR_IN_LEN 12
 #define       MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_IN_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define       MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
 
 /* MC_CMD_RXD_MONITOR_OUT msgresponse */
 #define    MC_CMD_RXD_MONITOR_OUT_LEN 80
 #define       MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
 
 
 /***********************************/
@@ -2379,13 +2898,14 @@
  */
 #define MC_CMD_PUTS 0x23
 
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
+#define       MC_CMD_PUTS_IN_DEST_LEN 4
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define        MC_CMD_PUTS_IN_UART_WIDTH 1
 #define        MC_CMD_PUTS_IN_PORT_LBN 1
@@ -2417,6 +2937,7 @@
 #define    MC_CMD_GET_PHY_CFG_OUT_LEN 72
 /* flags */
 #define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -2433,8 +2954,10 @@
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define       MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
 /* Bitmask of supported capabilities */
 #define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
 #define        MC_CMD_PHY_CAP_10HDX_LBN 1
 #define        MC_CMD_PHY_CAP_10HDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -2459,17 +2982,39 @@
 #define        MC_CMD_PHY_CAP_40000FDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_DDM_LBN 12
 #define        MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define        MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define        MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define        MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define        MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define        MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define        MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define       MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
 /* enum: Xaui. */
 #define          MC_CMD_MEDIA_XAUI 0x1
 /* enum: CX4. */
@@ -2485,6 +3030,7 @@
 /* enum: QSFP+. */
 #define          MC_CMD_MEDIA_QSFP_PLUS 0x7
 #define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
 /* enum: Native clause 22 */
 #define          MC_CMD_MMD_CLAUSE22 0x0
 #define          MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -2515,6 +3061,7 @@
 #define    MC_CMD_START_BIST_IN_LEN 4
 /* Type of test. */
 #define       MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define       MC_CMD_START_BIST_IN_TYPE_LEN 4
 /* enum: Run the PHY's short cable BIST. */
 #define          MC_CMD_PHY_BIST_CABLE_SHORT 0x1
 /* enum: Run the PHY's long cable BIST. */
@@ -2556,6 +3103,7 @@
 #define    MC_CMD_POLL_BIST_OUT_LEN 8
 /* result */
 #define       MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define       MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
 /* enum: Running. */
 #define          MC_CMD_POLL_BIST_RUNNING 0x1
 /* enum: Passed. */
@@ -2565,19 +3113,26 @@
 /* enum: Timed-out. */
 #define          MC_CMD_POLL_BIST_TIMEOUT 0x4
 #define       MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
 
 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
 #define    MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
 /* Status of each channel A */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
 /* enum: Open. */
@@ -2590,14 +3145,17 @@
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
 /* Status of each channel B */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel C */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel D */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 
@@ -2605,9 +3163,11 @@
 #define    MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
 /* enum: Complete. */
 #define          MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
 /* enum: Bus switch off I2C write. */
@@ -2631,9 +3191,11 @@
 #define    MC_CMD_POLL_BIST_OUT_MEM_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
 /* enum: Test has completed. */
 #define          MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
 /* enum: RAM test - walk ones. */
@@ -2650,8 +3212,10 @@
 #define          MC_CMD_POLL_BIST_MEM_ECC 0x6
 /* Failure address, only valid if result is POLL_BIST_FAILED */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
 /* Bus or address space to which the failure address corresponds */
 #define       MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
 /* enum: MC MIPS bus. */
 #define          MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
 /* enum: CSR IREG bus. */
@@ -2672,14 +3236,19 @@
 #define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
 /* Pattern written to RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
 /* Actual value read from RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
 /* ECC error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
 /* ECC parity error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
 /* ECC fatal error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
 
 
 /***********************************/
@@ -2831,6 +3400,143 @@
 /*            Enum values, see field(s): */
 /*               100M */
 
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define    MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/*               MC_CMD_LOOPBACK_NONE  0x0 */
+/* enum: Data. */
+/*               MC_CMD_LOOPBACK_DATA  0x1 */
+/* enum: GMAC. */
+/*               MC_CMD_LOOPBACK_GMAC  0x2 */
+/* enum: XGMII. */
+/*               MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/*               MC_CMD_LOOPBACK_XGXS  0x4 */
+/* enum: XAUI. */
+/*               MC_CMD_LOOPBACK_XAUI  0x5 */
+/* enum: GMII. */
+/*               MC_CMD_LOOPBACK_GMII  0x6 */
+/* enum: SGMII. */
+/*               MC_CMD_LOOPBACK_SGMII  0x7 */
+/* enum: XGBR. */
+/*               MC_CMD_LOOPBACK_XGBR  0x8 */
+/* enum: XFI. */
+/*               MC_CMD_LOOPBACK_XFI  0x9 */
+/* enum: XAUI Far. */
+/*               MC_CMD_LOOPBACK_XAUI_FAR  0xa */
+/* enum: GMII Far. */
+/*               MC_CMD_LOOPBACK_GMII_FAR  0xb */
+/* enum: SGMII Far. */
+/*               MC_CMD_LOOPBACK_SGMII_FAR  0xc */
+/* enum: XFI Far. */
+/*               MC_CMD_LOOPBACK_XFI_FAR  0xd */
+/* enum: GPhy. */
+/*               MC_CMD_LOOPBACK_GPHY  0xe */
+/* enum: PhyXS. */
+/*               MC_CMD_LOOPBACK_PHYXS  0xf */
+/* enum: PCS. */
+/*               MC_CMD_LOOPBACK_PCS  0x10 */
+/* enum: PMA-PMD. */
+/*               MC_CMD_LOOPBACK_PMAPMD  0x11 */
+/* enum: Cross-Port. */
+/*               MC_CMD_LOOPBACK_XPORT  0x12 */
+/* enum: XGMII-Wireside. */
+/*               MC_CMD_LOOPBACK_XGMII_WS  0x13 */
+/* enum: XAUI Wireside. */
+/*               MC_CMD_LOOPBACK_XAUI_WS  0x14 */
+/* enum: XAUI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_FAR  0x15 */
+/* enum: XAUI Wireside near. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_NEAR  0x16 */
+/* enum: GMII Wireside. */
+/*               MC_CMD_LOOPBACK_GMII_WS  0x17 */
+/* enum: XFI Wireside. */
+/*               MC_CMD_LOOPBACK_XFI_WS  0x18 */
+/* enum: XFI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XFI_WS_FAR  0x19 */
+/* enum: PhyXS Wireside. */
+/*               MC_CMD_LOOPBACK_PHYXS_WS  0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/*               MC_CMD_LOOPBACK_PMA_INT  0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/*               MC_CMD_LOOPBACK_SD_NEAR  0x1c */
+/* enum: KR Serdes Serial. */
+/*               MC_CMD_LOOPBACK_SD_FAR  0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/*               MC_CMD_LOOPBACK_PMA_INT_WS  0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/*               MC_CMD_LOOPBACK_SD_FEP2_WS  0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/*               MC_CMD_LOOPBACK_SD_FEP1_5_WS  0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/*               MC_CMD_LOOPBACK_SD_FEP_WS  0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/*               MC_CMD_LOOPBACK_SD_FES_WS  0x22 */
+/* enum: Near side of AOE Siena side port */
+/*               MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23 */
+/* enum: Medford Wireside datapath loopback */
+/*               MC_CMD_LOOPBACK_DATA_WS  0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/*               MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25 */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 25G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 50 loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 100G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/*            Enum values, see field(s): */
+/*               100M */
+
 
 /***********************************/
 /* MC_CMD_GET_LINK
@@ -2848,17 +3554,22 @@
 #define    MC_CMD_GET_LINK_OUT_LEN 28
 /* near-side advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define       MC_CMD_GET_LINK_OUT_CAP_LEN 4
 /* link-partner advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define       MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
 /* Autonegotiated speed in mbit/s. The link may still be down even if this
  * reads non-zero.
  */
 #define       MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define       MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
 /* Current loopback setting. */
 #define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 #define       MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define       MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
 #define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -2873,9 +3584,11 @@
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define       MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define       MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
 #define        MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -2899,8 +3612,10 @@
 #define    MC_CMD_SET_LINK_IN_LEN 16
 /* ??? */
 #define       MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_CAP_LEN 4
 /* Flags */
 #define       MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
 #define        MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -2909,12 +3624,14 @@
 #define        MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
 /* Loopback mode. */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 /* A loopback speed of "0" is supported, and means (choose any available
  * speed).
  */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
 
 /* MC_CMD_SET_LINK_OUT msgresponse */
 #define    MC_CMD_SET_LINK_OUT_LEN 0
@@ -2932,6 +3649,7 @@
 #define    MC_CMD_SET_ID_LED_IN_LEN 4
 /* Set LED state. */
 #define       MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define       MC_CMD_SET_ID_LED_IN_STATE_LEN 4
 #define          MC_CMD_LED_OFF  0x0 /* enum */
 #define          MC_CMD_LED_ON  0x1 /* enum */
 #define          MC_CMD_LED_DEFAULT  0x2 /* enum */
@@ -2954,17 +3672,21 @@
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 #define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
@@ -2978,6 +3700,7 @@
 /* enum: Issue flow control. */
 #define          MC_CMD_FCNTL_GENERATE 0x5
 #define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
@@ -2987,17 +3710,21 @@
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 /*               MC_CMD_FCNTL_OFF 0x0 */
 /* enum: Respond to flow control. */
@@ -3011,6 +3738,7 @@
 /* enum: Issue flow control. */
 /*               MC_CMD_FCNTL_GENERATE 0x5 */
 #define       MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
 /* Select which parameters to configure. A parameter will only be modified if
@@ -3019,6 +3747,7 @@
  * set).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define       MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -3040,6 +3769,7 @@
  * to 0.
  */
 #define       MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define       MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
 
 
 /***********************************/
@@ -3144,6 +3874,7 @@
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
 #define       MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define       MC_CMD_MAC_STATS_IN_CMD_LEN 4
 #define        MC_CMD_MAC_STATS_IN_DMA_LBN 0
 #define        MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -3158,9 +3889,16 @@
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
 #define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define       MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
 /* port id so vadapter stats can be provided */
 #define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
 
 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
 #define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -3305,9 +4043,126 @@
 #define          MC_CMD_GMAC_DMABUF_START  0x40
 /* enum: End of GMAC stats buffer space, for Siena only. */
 #define          MC_CMD_GMAC_DMABUF_END    0x5f
-#define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define          MC_CMD_MAC_GENERATION_END 0x60
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_FEC_DMABUF_START  0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_UNCORRECTED_ERRORS  0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_CORRECTED_ERRORS  0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0  0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1  0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2  0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3  0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V2  0x68
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_CTPIO_DMABUF_START  0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define          MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK  0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define          MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS  0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define          MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL  0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define          MC_CMD_MAC_CTPIO_OVERFLOW_FAIL  0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define          MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL  0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define          MC_CMD_MAC_CTPIO_TIMEOUT_FAIL  0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define          MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL  0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define          MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL  0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define          MC_CMD_MAC_CTPIO_INVALID_WR_FAIL  0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define          MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK  0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define          MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK  0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define          MC_CMD_MAC_CTPIO_RUNT_FALLBACK  0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define          MC_CMD_MAC_CTPIO_SUCCESS  0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define          MC_CMD_MAC_CTPIO_FALLBACK  0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define          MC_CMD_MAC_CTPIO_POISON  0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define          MC_CMD_MAC_CTPIO_ERASE  0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V3  0x79
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
 
 /***********************************/
 /* MC_CMD_SRIOV
@@ -3318,21 +4173,28 @@
 /* MC_CMD_SRIOV_IN msgrequest */
 #define    MC_CMD_SRIOV_IN_LEN 12
 #define       MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define       MC_CMD_SRIOV_IN_ENABLE_LEN 4
 #define       MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define       MC_CMD_SRIOV_IN_VI_BASE_LEN 4
 #define       MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define       MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
 
 /* MC_CMD_SRIOV_OUT msgresponse */
 #define    MC_CMD_SRIOV_OUT_LEN 8
 #define       MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define       MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
 #define       MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define       MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
 
 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
 #define    MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
 /* this is only used for the first record */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -3342,6 +4204,7 @@
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
 #define          MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -3352,6 +4215,7 @@
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
 
@@ -3403,10 +4267,12 @@
 /* MC_CMD_WOL_FILTER_SET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
 #define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
 #define          MC_CMD_FILTER_MODE_SIMPLE    0x0 /* enum */
 #define          MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
 /* A type value of 1 is unused. */
 #define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
 /* enum: Magic */
 #define          MC_CMD_WOL_TYPE_MAGIC      0x0
 /* enum: MS Windows Magic */
@@ -3428,7 +4294,9 @@
 /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -3437,9 +4305,13 @@
 /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -3448,7 +4320,9 @@
 /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -3461,7 +4335,9 @@
 /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -3476,8 +4352,11 @@
 /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -3486,6 +4365,7 @@
 /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_SET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
@@ -3499,6 +4379,7 @@
 /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -3516,6 +4397,7 @@
 /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define       MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
 #define          MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
 #define          MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
 
@@ -3556,6 +4438,7 @@
 #define    MC_CMD_NVRAM_TYPES_OUT_LEN 4
 /* Bit mask of supported types. */
 #define       MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define       MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
 /* enum: Disabled callisto. */
 #define          MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
 /* enum: MC firmware. */
@@ -3612,47 +4495,65 @@
 /* MC_CMD_NVRAM_INFO_IN msgrequest */
 #define    MC_CMD_NVRAM_INFO_IN_LEN 4
 #define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_INFO_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_OUT_LEN 24
 #define       MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
 
 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
 #define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
 /* Writes must be multiples of this size. Added to support the MUM on Sorrento.
  */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
 
 
 /***********************************/
@@ -3670,6 +4571,7 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
@@ -3680,9 +4582,11 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
@@ -3703,20 +4607,26 @@
 /* MC_CMD_NVRAM_READ_IN msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_LEN 12
 #define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_V2_LEN 16
 #define       MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
 /* Optional control info. If a partition is stored with an A/B versioning
  * scheme (i.e. in more than one physical partition in NVRAM) the host can set
  * this to control which underlying physical partition is used to read data
@@ -3726,6 +4636,7 @@
  * verifying by reading with MODE=TARGET_BACKUP.
  */
 #define       MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define       MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
 /* enum: Same as omitting MODE: caller sees data in current partition unless it
  * holds the write lock in which case it sees data in the partition it is
  * updating.
@@ -3765,10 +4676,13 @@
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -3791,10 +4705,13 @@
 /* MC_CMD_NVRAM_ERASE_IN msgrequest */
 #define    MC_CMD_NVRAM_ERASE_IN_LEN 12
 #define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */
 #define    MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -3815,9 +4732,11 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
 
 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
  * request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -3826,10 +4745,13 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
@@ -3848,16 +4770,19 @@
  * This process takes a few seconds to complete. So is likely to take more than
  * the MCDI timeout. Hence signature verification is initiated when
  * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
 /* Result of nvram update completion processing */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
 /* enum: Verify succeeded without any errors. */
 #define          MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
 /* enum: CMS format verification failed due to an internal error. */
@@ -3884,6 +4809,12 @@
  * Trusted approver's list.
  */
 #define          MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
 
 
 /***********************************/
@@ -3911,6 +4842,7 @@
 /* MC_CMD_REBOOT_IN msgrequest */
 #define    MC_CMD_REBOOT_IN_LEN 4
 #define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define       MC_CMD_REBOOT_IN_FLAGS_LEN 4
 #define          MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
 
 /* MC_CMD_REBOOT_OUT msgresponse */
@@ -3947,11 +4879,12 @@
  */
 #define MC_CMD_REBOOT_MODE 0x3f
 
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_REBOOT_MODE_IN msgrequest */
 #define    MC_CMD_REBOOT_MODE_IN_LEN 4
 #define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
 /* enum: Normal. */
 #define          MC_CMD_REBOOT_MODE_NORMAL 0x0
 /* enum: Power-on Reset. */
@@ -3966,6 +4899,7 @@
 /* MC_CMD_REBOOT_MODE_OUT msgresponse */
 #define    MC_CMD_REBOOT_MODE_OUT_LEN 4
 #define       MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
 
 
 /***********************************/
@@ -4001,7 +4935,7 @@
  */
 #define MC_CMD_SENSOR_INFO 0x41
 
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_SENSOR_INFO_IN msgrequest */
 #define    MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -4015,12 +4949,14 @@
  * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
  */
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
 
 /* MC_CMD_SENSOR_INFO_OUT msgresponse */
 #define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
 /* enum: Controller temperature: degC */
 #define          MC_CMD_SENSOR_CONTROLLER_TEMP  0x0
 /* enum: Phy common temperature: degC */
@@ -4183,6 +5119,20 @@
 #define          MC_CMD_SENSOR_BOARD_FRONT_TEMP  0x4f
 /* enum: Board temperature (back): degC */
 #define          MC_CMD_SENSOR_BOARD_BACK_TEMP  0x50
+/* enum: 1.8v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V8  0x51
+/* enum: 2.5v power current: mA */
+#define          MC_CMD_SENSOR_IN_I2V5  0x52
+/* enum: 3.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I3V3  0x53
+/* enum: 12v power current: mA */
+#define          MC_CMD_SENSOR_IN_I12V0  0x54
+/* enum: 1.3v power: mV */
+#define          MC_CMD_SENSOR_IN_1V3  0x55
+/* enum: 1.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V3  0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE2_NEXT  0x5f
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4196,6 +5146,7 @@
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO_OUT */
 #define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -4247,7 +5198,7 @@
  */
 #define MC_CMD_READ_SENSORS 0x42
 
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
@@ -4266,6 +5217,7 @@
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
 /* Size in bytes of host buffer. */
 #define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_SENSORS_OUT msgresponse */
 #define    MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -4319,6 +5271,7 @@
 /* MC_CMD_GET_PHY_STATE_OUT msgresponse */
 #define    MC_CMD_GET_PHY_STATE_OUT_LEN 4
 #define       MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_PHY_STATE_OK 0x1
 /* enum: Faulty. */
@@ -4355,6 +5308,7 @@
 /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_GET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
@@ -4371,6 +5325,7 @@
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS  0x2 /* enum */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
@@ -4381,13 +5336,16 @@
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
 
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -4398,6 +5356,7 @@
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
@@ -4412,7 +5371,9 @@
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -4451,6 +5412,7 @@
 #define    MC_CMD_TESTASSERT_V2_IN_LEN 4
 /* How to provoke the assertion */
 #define       MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define       MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
  * you're testing firmware, this is what you want.
  */
@@ -4486,6 +5448,7 @@
 #define    MC_CMD_WORKAROUND_IN_LEN 8
 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define       MC_CMD_WORKAROUND_IN_TYPE_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -4514,6 +5477,7 @@
  * the workaround
  */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define       MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
@@ -4523,6 +5487,7 @@
  */
 #define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
 #define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
 
@@ -4543,6 +5508,7 @@
 /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -4550,6 +5516,7 @@
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -4568,12 +5535,14 @@
 /* MC_CMD_NVRAM_TEST_IN msgrequest */
 #define    MC_CMD_NVRAM_TEST_IN_LEN 4
 #define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_TEST_OUT msgresponse */
 #define    MC_CMD_NVRAM_TEST_OUT_LEN 4
 #define       MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define       MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
 /* enum: Passed. */
 #define          MC_CMD_NVRAM_TEST_PASS 0x0
 /* enum: Failed. */
@@ -4594,12 +5563,16 @@
 #define    MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
 /* 0-6 low->high de-emph. */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
 /* 0-8 0-8 low->high boost */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
 
 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
 #define    MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -4608,10 +5581,13 @@
 #define    MC_CMD_MRSFP_TWEAK_OUT_LEN 12
 /* input bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
 /* output bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
 /* direction */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
 /* enum: Out. */
 #define          MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
 /* enum: In. */
@@ -4626,21 +5602,26 @@
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
 
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
 #define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
 #define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
 
 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
 #define    MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -4657,9 +5638,13 @@
 /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
 #define    MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
 
 
 /***********************************/
@@ -4680,6 +5665,7 @@
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
 /* total number of partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
 /* type ID code for each of NUM_PARTITIONS partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -4700,6 +5686,7 @@
 #define    MC_CMD_NVRAM_METADATA_IN_LEN 4
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
 
 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */
 #define    MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -4707,7 +5694,9 @@
 #define    MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
 #define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
 #define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -4716,6 +5705,7 @@
 #define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
 /* Subtype ID code for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
 /* 1st component of W.X.Y.Z version number for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -4756,8 +5746,10 @@
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
 /* Number of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
 
 
 /***********************************/
@@ -4772,6 +5764,7 @@
 #define    MC_CMD_CLP_IN_LEN 4
 /* Sub operation */
 #define       MC_CMD_CLP_IN_OP_OFST 0
+#define       MC_CMD_CLP_IN_OP_LEN 4
 /* enum: Return to factory default settings */
 #define          MC_CMD_CLP_OP_DEFAULT 0x1
 /* enum: Set MAC address */
@@ -4789,6 +5782,7 @@
 /* MC_CMD_CLP_IN_DEFAULT msgrequest */
 #define    MC_CMD_CLP_IN_DEFAULT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_DEFAULT msgresponse */
 #define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -4796,6 +5790,7 @@
 /* MC_CMD_CLP_IN_SET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_SET_MAC_LEN 12
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* MAC address assigned to port */
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -4809,6 +5804,7 @@
 /* MC_CMD_CLP_IN_GET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_GET_MAC_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_MAC msgresponse */
 #define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -4822,6 +5818,7 @@
 /* MC_CMD_CLP_IN_SET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* Boot flag */
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -4832,6 +5829,7 @@
 /* MC_CMD_CLP_IN_GET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
 #define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -4849,11 +5847,12 @@
  */
 #define MC_CMD_MUM 0x57
 
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_MUM_IN msgrequest */
 #define    MC_CMD_MUM_IN_LEN 4
 #define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define       MC_CMD_MUM_IN_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_OP_LBN 0
 #define        MC_CMD_MUM_IN_OP_WIDTH 8
 /* enum: NULL MCDI command to MUM */
@@ -4893,26 +5892,32 @@
 #define    MC_CMD_MUM_IN_NULL_LEN 4
 /* MUM cmd header */
 #define       MC_CMD_MUM_IN_CMD_OFST 0
+#define       MC_CMD_MUM_IN_CMD_LEN 4
 
 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */
 #define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_READ_LEN 16
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to read from registers of */
 #define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_READ_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE 0x1
 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
 /* 32-bit address to read from */
 #define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_READ_ADDR_LEN 4
 /* Number of words to read. */
 #define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
 
 /* MC_CMD_MUM_IN_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -4920,12 +5925,15 @@
 #define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to write to registers of */
 #define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 /*               MC_CMD_MUM_DEV_HITTITE 0x1 */
 /* 32-bit address to write to */
 #define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
 /* Words to write */
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -4938,12 +5946,16 @@
 #define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* MUM I2C cmd code */
 #define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
 /* Number of bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
 /* Number of bytes to read */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
 /* Bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -4954,21 +5966,28 @@
 #define    MC_CMD_MUM_IN_LOG_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define       MC_CMD_MUM_IN_LOG_OP_LEN 4
 #define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
 
 /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
 #define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/*            MC_CMD_MUM_IN_LOG_OP_LEN 4 */
 /* Enable/disable debug output to UART */
 #define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -4981,40 +6000,56 @@
 /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -5027,26 +6062,34 @@
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
 
@@ -5054,7 +6097,9 @@
 #define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
 #define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -5064,13 +6109,16 @@
 #define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Bit-mask of clocks to be programmed */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
 #define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
 /* Control flags for clock programming */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -5082,19 +6130,24 @@
 #define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Enable/Disable FPGA config from flash */
 #define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
 #define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_QSFP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
 #define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -5104,52 +6157,77 @@
 #define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
 #define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
 #define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
 #define    MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_OUT msgresponse */
 #define    MC_CMD_MUM_OUT_LEN 0
@@ -5160,6 +6238,7 @@
 /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
 #define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
 #define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -5197,8 +6276,10 @@
 #define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
 /* The first 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -5207,8 +6288,10 @@
 #define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
 /* The first 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -5216,11 +6299,14 @@
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -5249,6 +6335,7 @@
 /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
 #define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
 #define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
 
 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
 #define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -5256,6 +6343,7 @@
 /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
 #define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
 #define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -5263,7 +6351,9 @@
 /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -5272,6 +6362,7 @@
 /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -5279,6 +6370,7 @@
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -5287,11 +6379,14 @@
 /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
 
 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -5299,12 +6394,14 @@
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
 /* Discrete (soldered) DDR resistor strap info */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
 /* Number of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
 /* Array of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -5365,6 +6462,7 @@
 /* EVB_PORT_ID structuredef */
 #define    EVB_PORT_ID_LEN 4
 #define       EVB_PORT_ID_PORT_ID_OFST 0
+#define       EVB_PORT_ID_PORT_ID_LEN 4
 /* enum: An invalid port handle. */
 #define          EVB_PORT_ID_NULL  0x0
 /* enum: The port assigned to this function.. */
@@ -5460,6 +6558,10 @@
 #define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
 /* enum: MUM firmware partition */
 #define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define          NVRAM_PARTITION_TYPE_SUC_FIRMWARE         0xc00
 /* enum: MUM Non-volatile log output partition. */
 #define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
 /* enum: MUM Application table partition. */
@@ -5474,8 +6576,8 @@
 #define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: UEFI expansion ROM if separate from PXE */
 #define          NVRAM_PARTITION_TYPE_EXPANSION_UEFI       0xd00
-/* enum: Spare partition 0 */
-#define          NVRAM_PARTITION_TYPE_SPARE_0              0x1000
+/* enum: Used by the expansion ROM for logging */
+#define          NVRAM_PARTITION_TYPE_PXE_LOG              0x1000
 /* enum: Used for XIP code of shmbooted images */
 #define          NVRAM_PARTITION_TYPE_XIP_SCRATCH          0x1100
 /* enum: Spare partition 2 */
@@ -5488,6 +6590,27 @@
 #define          NVRAM_PARTITION_TYPE_SPARE_4              0x1400
 /* enum: Spare partition 5 */
 #define          NVRAM_PARTITION_TYPE_SPARE_5              0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define          NVRAM_PARTITION_TYPE_STATUS               0x1600
+/* enum: Spare partition 13 */
+#define          NVRAM_PARTITION_TYPE_SPARE_13              0x1700
+/* enum: Spare partition 14 */
+#define          NVRAM_PARTITION_TYPE_SPARE_14              0x1800
+/* enum: Spare partition 15 */
+#define          NVRAM_PARTITION_TYPE_SPARE_15              0x1900
+/* enum: Spare partition 16 */
+#define          NVRAM_PARTITION_TYPE_SPARE_16              0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define          NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS    0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define          NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS    0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define          NVRAM_PARTITION_TYPE_FRU_INFORMATION       0x1d00
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5502,6 +6625,7 @@
 /* LICENSED_APP_ID structuredef */
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
+#define       LICENSED_APP_ID_ID_LEN 4
 /* enum: OpenOnload */
 #define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
@@ -5526,6 +6650,14 @@
 #define          LICENSED_APP_ID_SOLARCAPTURE_TAP        0x400
 /* enum: Capture SolarSystem 40G */
 #define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G  0x1000
+/* enum: ScaleOut Onload */
+#define          LICENSED_APP_ID_SCALEOUT_ONLOAD         0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define          LICENSED_APP_ID_DSHBRD                  0x4000
+/* enum: SolarCapture Trading Analytics */
+#define          LICENSED_APP_ID_SCATRD                  0x8000
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
@@ -5590,6 +6722,14 @@
 #define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_DSHBRD_LBN 14
+#define        LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define        LICENSED_V3_APPS_SCATRD_LBN 15
+#define        LICENSED_V3_APPS_SCATRD_WIDTH 1
 #define       LICENSED_V3_APPS_MASK_LBN 0
 #define       LICENSED_V3_APPS_MASK_WIDTH 64
 
@@ -5636,6 +6776,18 @@
 #define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
 /* enum: This is a TX completion event, not a timestamp */
 #define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION  0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO  0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI  0x13
 /* enum: This is the low part of a TX timestamp event */
 #define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
 /* enum: This is the high part of a TX timestamp event */
@@ -5669,6 +6821,19 @@
 #define       RSS_MODE_HASH_SELECTOR_LBN 0
 #define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
+/* CTPIO_STATS_MAP structuredef */
+#define    CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define       CTPIO_STATS_MAP_VI_OFST 0
+#define       CTPIO_STATS_MAP_VI_LEN 2
+#define       CTPIO_STATS_MAP_VI_LBN 0
+#define       CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define       CTPIO_STATS_MAP_BUCKET_OFST 2
+#define       CTPIO_STATS_MAP_BUCKET_LEN 2
+#define       CTPIO_STATS_MAP_BUCKET_LBN 16
+#define       CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
 
 /***********************************/
 /* MC_CMD_READ_REGS
@@ -5676,7 +6841,7 @@
  */
 #define MC_CMD_READ_REGS 0x50
 
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_REGS_IN msgrequest */
 #define    MC_CMD_READ_REGS_IN_LEN 0
@@ -5709,17 +6874,22 @@
 #define    MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -5735,6 +6905,7 @@
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
 #define       MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
@@ -5745,13 +6916,16 @@
 #define          MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
@@ -5762,6 +6936,7 @@
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -5774,6 +6949,7 @@
 #define    MC_CMD_INIT_EVQ_OUT_LEN 4
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
 
 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -5781,17 +6957,22 @@
 #define    MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -5828,6 +7009,7 @@
  */
 #define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
@@ -5838,13 +7020,16 @@
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
@@ -5855,6 +7040,7 @@
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -5867,8 +7053,10 @@
 #define    MC_CMD_INIT_EVQ_V2_OUT_LEN 8
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
 /* Actual configuration applied on the card */
 #define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -5916,17 +7104,22 @@
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -5945,8 +7138,10 @@
 #define        MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -5961,17 +7156,22 @@
 #define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -6007,8 +7207,10 @@
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6017,6 +7219,7 @@
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
 
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -6040,18 +7243,23 @@
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6072,8 +7280,10 @@
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -6088,18 +7298,23 @@
 #define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6122,10 +7337,14 @@
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6135,6 +7354,7 @@
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
 /* Flags related to Qbb flow control mode. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -6161,6 +7381,7 @@
  * passed to INIT_EVQ
  */
 #define       MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_EVQ_OUT msgresponse */
 #define    MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -6178,6 +7399,7 @@
 #define    MC_CMD_FINI_RXQ_IN_LEN 4
 /* Instance of RXQ to destroy */
 #define       MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_RXQ_OUT msgresponse */
 #define    MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -6195,6 +7417,7 @@
 #define    MC_CMD_FINI_TXQ_IN_LEN 4
 /* Instance of TXQ to destroy */
 #define       MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_TXQ_OUT msgresponse */
 #define    MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -6212,6 +7435,7 @@
 #define    MC_CMD_DRIVER_EVENT_IN_LEN 12
 /* Handle of target EVQ */
 #define       MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define       MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
 /* Bits 0 - 63 of event */
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -6237,6 +7461,7 @@
 #define    MC_CMD_PROXY_CMD_IN_LEN 4
 /* The handle of the target function. */
 #define       MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define       MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
@@ -6252,6 +7477,7 @@
 #define    MC_PROXY_STATUS_BUFFER_LEN 16
 /* Handle allocated by the firmware for this proxy transaction */
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
 /* enum: An invalid handle. */
 #define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
@@ -6282,6 +7508,7 @@
  * elevated privilege mask granted to the requesting function.
  */
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
 
@@ -6298,6 +7525,7 @@
 /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
 #define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6309,6 +7537,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
@@ -6318,6 +7547,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6328,8 +7558,10 @@
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -6337,6 +7569,7 @@
 /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6348,6 +7581,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
@@ -6357,6 +7591,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6367,12 +7602,15 @@
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
 
 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
 #define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6392,7 +7630,9 @@
 /* MC_CMD_PROXY_COMPLETE_IN msgrequest */
 #define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
 #define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
 #define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
  * is stored in the REPLY_BUFF.
  */
@@ -6408,6 +7648,7 @@
  */
 #define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
 #define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
 
 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
 #define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -6427,17 +7668,22 @@
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
 /* Owner ID to use */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
 /* Size of buffer table pages to use, in bytes (note that only a few values are
  * legal on any specific hardware).
  */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
 
 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
 /* Buffer table IDs for use in DMA descriptors. */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
 
 
 /***********************************/
@@ -6453,10 +7699,13 @@
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
 /* ID */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Num entries */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 /* Buffer table entry address */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -6479,48 +7728,11 @@
 /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
 #define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
 
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
-/* PORT_CONFIG_ENTRY structuredef */
-#define    PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define       PORT_CONFIG_ENTRY_CORE_OFST 1
-#define       PORT_CONFIG_ENTRY_CORE_LEN 1
-#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
-#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
-#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
-#define       PORT_CONFIG_ENTRY_CORE_LBN 8
-#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define       PORT_CONFIG_ENTRY_LANES_OFST 4
-#define       PORT_CONFIG_ENTRY_LANES_LBN 32
-#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
 
 /***********************************/
 /* MC_CMD_FILTER_OP
@@ -6534,6 +7746,7 @@
 #define    MC_CMD_FILTER_OP_IN_LEN 108
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_IN_OP_LEN 4
 /* enum: single-recipient filter insert */
 #define          MC_CMD_FILTER_OP_IN_OP_INSERT  0x0
 /* enum: single-recipient filter remove */
@@ -6554,8 +7767,10 @@
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -6586,6 +7801,7 @@
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
@@ -6598,8 +7814,10 @@
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -6614,13 +7832,16 @@
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
@@ -6653,8 +7874,10 @@
 #define       MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
 /* Firmware defined register 1 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define       MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
 /* source IP address to match (as bytes in network order; set last 12 bytes to
  * 0 for IPv4 address)
  */
@@ -6673,6 +7896,7 @@
 #define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* filter handle (for remove / unsubscribe operations) */
@@ -6683,8 +7907,10 @@
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -6743,6 +7969,7 @@
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
@@ -6755,8 +7982,10 @@
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -6771,13 +8000,16 @@
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
@@ -6810,11 +8042,13 @@
 #define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
  * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
  * VXLAN/NVGRE, or 1 for Geneve)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
@@ -6880,10 +8114,12 @@
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
  * order; set last 12 bytes to 0 for IPv4 address)
  */
@@ -6899,6 +8135,7 @@
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6918,6 +8155,7 @@
 #define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_EXT_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6944,6 +8182,7 @@
 #define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
 /* enum: read flags indicating restrictions on filter insertion for the calling
@@ -6966,10 +8205,12 @@
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* number of supported match types */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
 /* array of supported match types (valid MATCH_FIELDS values for
  * MC_CMD_FILTER_OP) sorted in decreasing priority order
  */
@@ -6982,10 +8223,12 @@
 #define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* bitfield of filter insertion restrictions */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
 
@@ -7005,11 +8248,16 @@
 #define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
 /* identifies the target of the operation */
 #define       MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
 /* enum: RX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
 /* enum: Lookup engine (with requested metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
@@ -7021,26 +8269,33 @@
 #define          MC_CMD_PARSER_DISP_RW_IN_MISC_STATE  0x5
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define       MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
 #define          MC_CMD_PARSER_DISP_RW_IN_READ  0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
 #define          MC_CMD_PARSER_DISP_RW_IN_WRITE  0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
 #define          MC_CMD_PARSER_DISP_RW_IN_RMW  0x2
 /* data memory address (DICPU targets) or LUE index (LUE targets) */
 #define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
 /* selector (for MISC_STATE target) */
 #define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
 /* enum: Port to datapath mapping */
 #define          MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING  0x1
 /* value to write (for DMEM writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -7049,6 +8304,7 @@
 #define    MC_CMD_PARSER_DISP_RW_OUT_LEN 52
 /* value read (for DMEM reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
 /* value read (for LUE reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -7093,6 +8349,7 @@
 #define    MC_CMD_SET_PF_COUNT_IN_LEN 4
 /* New number of PFs on the device. */
 #define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
 
 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */
 #define    MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -7113,6 +8370,7 @@
 #define    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
 
 
 /***********************************/
@@ -7127,6 +8385,7 @@
 #define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
 
 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
 #define    MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -7144,8 +8403,10 @@
 #define    MC_CMD_ALLOC_VIS_IN_LEN 8
 /* The minimum number of VIs that is acceptable */
 #define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
 
 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
  * Use extended version in new code.
@@ -7153,21 +8414,26 @@
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
 
 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
 #define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
@@ -7201,15 +8467,20 @@
 #define    MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
 #define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
 
 
 /***********************************/
@@ -7224,19 +8495,24 @@
 #define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
 #define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF, or 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous, 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
 
 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
 #define    MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -7258,12 +8534,15 @@
 #define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
@@ -7278,6 +8557,7 @@
 #define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
 /* The VI number to query. */
 #define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
 
 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
 #define    MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -7311,6 +8591,7 @@
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
 /* Combined metadata field. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -7392,6 +8673,7 @@
 #define    MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
 
 
 /***********************************/
@@ -7406,6 +8688,7 @@
 #define    MC_CMD_FREE_PIOBUF_IN_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 
 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */
 #define    MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -7423,6 +8706,7 @@
 #define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
 /* VI number to get information for. */
 #define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 
 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -7445,6 +8729,7 @@
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
 
 
 /***********************************/
@@ -7459,6 +8744,7 @@
 #define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
 /* VI number to set information for. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 /* Transaction processing steering hint 1 for use with the Rx Queue. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -7478,6 +8764,7 @@
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
 
 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -7494,6 +8781,7 @@
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /* enum: MISC. */
 #define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC  0x0
 /* enum: IDO. */
@@ -7506,10 +8794,12 @@
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -7557,10 +8847,12 @@
 /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -7627,6 +8919,7 @@
  * in a command from the host.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE     0x0 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET    0x1 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS    0x2 /* enum */
@@ -7636,6 +8929,7 @@
  * mc_flash_layout.h.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT  0x0
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
@@ -7672,12 +8966,14 @@
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL  0xffffffff
 /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
 /* enum: Last chunk, containing checksum rather than data */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST  0xffffffff
 /* enum: Abort download of this item */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT  0xfffffffe
 /* Length of this chunk in bytes */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
 /* Data for this chunk */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -7688,8 +8984,10 @@
 #define    MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
 /* Same as MC_CMD_ERR field, but included as 0 in success cases */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
 /* Extra status information */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
 /* enum: Code download OK, completed. */
 #define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE  0x0
 /* enum: Code download aborted as requested. */
@@ -7726,6 +9024,7 @@
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -7793,6 +9092,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
@@ -7813,6 +9114,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7822,6 +9125,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
@@ -7848,7 +9153,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -7864,6 +9171,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7888,7 +9197,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -7901,12 +9212,16 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
 
 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -7915,6 +9230,7 @@
 #define    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -7982,6 +9298,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
@@ -8002,6 +9320,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8011,6 +9331,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
@@ -8037,7 +9359,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8053,6 +9377,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8077,7 +9403,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8090,14 +9418,19 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8124,6 +9457,18 @@
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
@@ -8181,9 +9526,10 @@
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
 
 /* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -8251,6 +9597,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
@@ -8271,6 +9619,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8280,6 +9630,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
@@ -8306,7 +9658,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8322,6 +9676,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8346,7 +9702,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8359,14 +9717,19 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8393,6 +9756,18 @@
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
@@ -8463,6 +9838,348 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K  0x1
 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP  0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE  0x5
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST  0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD  0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST  0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE  0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE  0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS  0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT  0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW  0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP  0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE  0x5
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST  0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS  0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR  0x103
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED  0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT  0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K   0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K  0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
 
 
 /***********************************/
@@ -8502,6 +10219,7 @@
 #define    MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
 
 
 /***********************************/
@@ -8516,6 +10234,7 @@
 #define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
 
 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -8533,17 +10252,22 @@
 #define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
 /* the desired maximum fill level */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -8561,10 +10285,13 @@
 #define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8573,25 +10300,32 @@
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8600,18 +10334,23 @@
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -8629,8 +10368,10 @@
 #define    MC_CMD_LINK_PIOBUF_IN_LEN 8
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -8648,6 +10389,7 @@
 #define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -8665,8 +10407,10 @@
 #define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
 /* The port to connect to the v-switch's upstream port. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of v-switch to create. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
@@ -8679,6 +10423,7 @@
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -8689,6 +10434,7 @@
  * v-ports with this number of tags.
  */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
 #define    MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -8706,6 +10452,7 @@
 #define    MC_CMD_VSWITCH_FREE_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */
 #define    MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -8725,6 +10472,7 @@
 #define    MC_CMD_VSWITCH_QUERY_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
 #define    MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -8742,8 +10490,10 @@
 #define    MC_CMD_VPORT_ALLOC_IN_LEN 20
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of the new v-port. */
 #define       MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN (obsolete) */
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN  0x1
 /* enum: VEB (obsolete) */
@@ -8764,6 +10514,7 @@
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST  0x6
 /* Flags controlling v-port creation */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -8773,8 +10524,10 @@
  * v-switch.
  */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8784,6 +10537,7 @@
 #define    MC_CMD_VPORT_ALLOC_OUT_LEN 4
 /* The handle of the new v-port */
 #define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
 
 
 /***********************************/
@@ -8798,6 +10552,7 @@
 #define    MC_CMD_VPORT_FREE_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_FREE_OUT msgresponse */
 #define    MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -8815,18 +10570,23 @@
 #define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
 /* The port to connect to the v-adaptor's port. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Flags controlling v-adaptor creation */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
 /* The number of VLAN tags to transparently insert/remove. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8853,6 +10613,7 @@
 #define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -8870,6 +10631,7 @@
 #define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The new MAC address to assign to this v-adaptor */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -8890,6 +10652,7 @@
 #define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -8910,15 +10673,19 @@
 #define    MC_CMD_VADAPTOR_QUERY_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
 /* The number of VLAN tags that may still be added */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
@@ -8933,8 +10700,10 @@
 #define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
 /* The port to assign. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
 /* The target function to modify. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -8955,9 +10724,13 @@
 /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
 #define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
 /* Write enable bits 0-3, set to write, clear to read. */
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -8969,9 +10742,13 @@
  */
 #define    MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
 
 
 /***********************************/
@@ -8986,11 +10763,13 @@
 #define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
 /* The handle of the owning upstream port */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
 /* The handle of the new Onload stack */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
 
 
 /***********************************/
@@ -9005,6 +10784,7 @@
 #define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
 /* The handle of the Onload stack */
 #define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -9022,8 +10802,10 @@
 #define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
 /* The handle of the owning upstream port */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of context to allocate */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
 /* enum: Allocate a context for exclusive use. The key and indirection table
  * must be explicitly configured.
  */
@@ -9037,6 +10819,7 @@
  * in the indirection table will be in the range 0 to NUM_QUEUES-1.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -9045,6 +10828,7 @@
  * handle.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
 /* enum: guaranteed invalid RSS context handle value */
 #define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
@@ -9061,6 +10845,7 @@
 #define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -9078,6 +10863,7 @@
 #define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -9098,6 +10884,7 @@
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -9118,6 +10905,7 @@
 #define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 /* The 128-byte indirection table (1 byte per entry) */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -9138,6 +10926,7 @@
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -9158,6 +10947,7 @@
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 /* Hash control flags. The _EN bits are always supported, but new modes are
  * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
  * in this case, the MODE fields may be set to non-zero values, and will take
@@ -9171,6 +10961,7 @@
  * particular packet type.)
  */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9210,6 +11001,7 @@
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -9227,6 +11019,7 @@
  * always be used for a SET regardless of old/new driver vs. old/new firmware.
  */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9263,11 +11056,13 @@
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
 /* The handle of the owning upstream port */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed
  * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
  * referenced RSS contexts must span no more than this number.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -9276,6 +11071,7 @@
  * handle.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
 /* enum: guaranteed invalid .1p mapping handle value */
 #define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
@@ -9292,6 +11088,7 @@
 #define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -9309,6 +11106,7 @@
 #define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
  * handle)
  */
@@ -9331,6 +11129,7 @@
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -9356,10 +11155,13 @@
 #define    MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
 /* Base absolute interrupt vector number. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
 
 
 /***********************************/
@@ -9376,10 +11178,13 @@
  * let the system find a suitable base.
  */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
 
 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
 #define    MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -9397,6 +11202,7 @@
 #define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9417,6 +11223,7 @@
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9437,6 +11244,7 @@
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -9444,6 +11252,7 @@
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
 /* The number of MAC addresses returned */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
 /* Array of MAC addresses */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -9465,8 +11274,10 @@
 #define    MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
 /* Flags requesting what should be changed. */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -9476,14 +11287,17 @@
  * v-switch.
  */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
 /* The number of MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
 /* MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -9492,6 +11306,7 @@
 /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
 #define    MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
 #define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
 
@@ -9508,15 +11323,18 @@
 #define    MC_CMD_EVB_PORT_QUERY_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
 
 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
 #define    MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The number of VLAN tags that may be used on a v-adaptor connected to this
  * EVB port.
  */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
@@ -9528,14 +11346,16 @@
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
 
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
 /* Index of the first buffer table entry. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Number of buffer table entries to dump. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -9559,6 +11379,7 @@
 /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
@@ -9588,6 +11409,7 @@
 /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
 #define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -9611,8 +11433,10 @@
 #define    MC_CMD_GET_CLOCK_OUT_LEN 8
 /* System frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* DPCPU frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 
 
 /***********************************/
@@ -9621,36 +11445,43 @@
  */
 #define MC_CMD_SET_CLOCK 0xad
 
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
 #define    MC_CMD_SET_CLOCK_IN_LEN 28
 /* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
 /* enum: Leave the system clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
 /* enum: Leave the inter-core clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
 /* enum: Leave the DPCPU clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for PCS clock domain */
 #define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
 /* enum: Leave the PCS clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for MC clock domain */
 #define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
 /* enum: Leave the MC clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for rmon clock domain */
 #define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
 /* enum: Leave the rmon clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for vswitch clock domain */
 #define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
 /* enum: Leave the vswitch clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
@@ -9658,30 +11489,37 @@
 #define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* enum: The system clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
 /* enum: The inter-core clock domain doesn't exist / isn't used */
 #define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 /* enum: The dpcpu clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
 /* Resulting PCS frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
 /* enum: The PCS clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting MC frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
 /* enum: The MC clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
 /* Resulting rmon frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
 /* enum: The rmon clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
 /* Resulting vswitch frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
 /* enum: The vswitch clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
@@ -9692,11 +11530,12 @@
  */
 #define MC_CMD_DPCPU_RPC 0xae
 
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define       MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
 /* enum: RxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
@@ -9761,12 +11600,15 @@
 #define       MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
 /* Register data to write. Only valid in write/write-read. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
 /* Register address. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
 
 /* MC_CMD_DPCPU_RPC_OUT msgresponse */
 #define    MC_CMD_DPCPU_RPC_OUT_LEN 36
 #define       MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define       MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
 /* DATA */
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -9777,9 +11619,13 @@
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
 
 
 /***********************************/
@@ -9794,6 +11640,7 @@
 #define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
 /* Interrupt level relative to base for function. */
 #define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
 
 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
 #define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -9811,6 +11658,7 @@
 #define    MC_CMD_SHMBOOT_OP_IN_LEN 4
 /* Identifies the operation to perform */
 #define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
 /* enum: Copy slave_data section to the slave core. (Greenport only) */
 #define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
 
@@ -9824,13 +11672,16 @@
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
 
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CAP_BLK_READ_IN msgrequest */
 #define    MC_CMD_CAP_BLK_READ_IN_LEN 12
 #define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
 
 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */
 #define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -9850,53 +11701,77 @@
  */
 #define MC_CMD_DUMP_DO 0xe8
 
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_DO_IN msgrequest */
 #define    MC_CMD_DUMP_DO_IN_LEN 52
 #define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define       MC_CMD_DUMP_DO_IN_PADDING_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM  0x1 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY  0x2 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI  0x3 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART  0x4 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE  0x1000 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH  0x2 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 /* enum: The uart port this command was received over (if using a uart
  * transport)
  */
 #define          MC_CMD_DUMP_DO_IN_UART_PORT_SRC  0xff
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 /* MC_CMD_DUMP_DO_OUT msgresponse */
 #define    MC_CMD_DUMP_DO_OUT_LEN 4
 #define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
 
 
 /***********************************/
@@ -9905,41 +11780,64 @@
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
 
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
 #define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 
 /***********************************/
@@ -9950,17 +11848,20 @@
  */
 #define MC_CMD_SET_PSU 0xea
 
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_PSU_IN msgrequest */
 #define    MC_CMD_SET_PSU_IN_LEN 12
 #define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define       MC_CMD_SET_PSU_IN_PARAM_LEN 4
 #define          MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE  0x0 /* enum */
 #define       MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define       MC_CMD_SET_PSU_IN_RAIL_LEN 4
 #define          MC_CMD_SET_PSU_IN_RAIL_0V9  0x0 /* enum */
 #define          MC_CMD_SET_PSU_IN_RAIL_1V2  0x1 /* enum */
 /* desired value, eg voltage in mV */
 #define       MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PSU_IN_VALUE_LEN 4
 
 /* MC_CMD_SET_PSU_OUT msgresponse */
 #define    MC_CMD_SET_PSU_OUT_LEN 0
@@ -9980,7 +11881,9 @@
 /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
 #define    MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
 
 
 /***********************************/
@@ -10016,12 +11919,16 @@
 #define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
 /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
 #define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -10044,12 +11951,16 @@
 #define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
 /* CRC32 over OFFSET, LENGTH, RESERVED */
 #define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
 /* Offset from which to read the data */
 #define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
 
 /* MC_CMD_UART_RECV_DATA_IN msgresponse */
 #define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -10057,12 +11968,16 @@
 #define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
 /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
 #define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -10075,14 +11990,16 @@
  */
 #define MC_CMD_READ_FUSES 0xf0
 
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_FUSES_IN msgrequest */
 #define    MC_CMD_READ_FUSES_IN_LEN 8
 /* Offset in OTP to read */
 #define       MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define       MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
 /* Length of data to read in bytes */
 #define       MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define       MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_FUSES_OUT msgresponse */
 #define    MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -10090,6 +12007,7 @@
 #define    MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
 /* Length of returned OTP data in bytes */
 #define       MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define       MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
 /* Returned data */
 #define       MC_CMD_READ_FUSES_OUT_DATA_OFST 4
 #define       MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -10197,6 +12115,60 @@
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC  0x9
 /* enum: CTLE EQ Resistor (0-7, Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES  0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN  0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE  0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK  0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN  0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD  0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2  0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3  0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4  0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5  0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6  0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7  0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8  0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9  0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10  0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11  0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12  0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF  0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN  0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD  0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN  0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD  0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT  0x20
+/* enum: CDR integral loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG  0x21
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
@@ -10268,7 +12240,7 @@
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV  0x0
 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE  0x1
@@ -10290,9 +12262,9 @@
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 /* enum: TX Amplitude Fine control (Medford) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE  0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV  0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY  0xc
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
@@ -10361,7 +12333,24 @@
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
 
 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10393,10 +12382,12 @@
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
 
 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
 #define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
 
 
 /***********************************/
@@ -10594,6 +12585,7 @@
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
 
 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10636,6 +12628,7 @@
 #define    MC_CMD_LICENSING_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
@@ -10647,23 +12640,30 @@
 #define    MC_CMD_LICENSING_OUT_LEN 28
 /* count of application keys which are valid */
 #define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being blacklisted */
 #define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being for the wrong node
  */
 #define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define       MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
@@ -10683,6 +12683,7 @@
 #define    MC_CMD_LICENSING_V3_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
@@ -10696,20 +12697,26 @@
 #define    MC_CMD_LICENSING_V3_OUT_LEN 88
 /* count of keys which are valid */
 #define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
 /* count of keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
 /* count of keys which are invalid due to being for the wrong node */
 #define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
@@ -10750,8 +12757,10 @@
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
 /* type of license (eg 3) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
 /* length of the license ID (in bytes) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
 /* the unique license ID of the adapter */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -10789,11 +12798,13 @@
 #define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
 /* application ID to query (LICENSED_APP_ID_xxx) */
 #define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
 
 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
 #define    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
@@ -10824,6 +12835,7 @@
 #define    MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
@@ -10874,8 +12886,10 @@
 #define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
 /* enum: mask application */
@@ -10900,8 +12914,10 @@
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
 /* validation challenge */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -10910,6 +12926,7 @@
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
 /* feature expiry (time_t) */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
 /* validation response */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -10918,10 +12935,13 @@
 #define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
 /* flag */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
 
 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
 #define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -10959,8 +12979,10 @@
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
 /* application expiry time */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
 /* application expiry units */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
 /* enum: expiry units are accounting units */
 #define          MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC  0x0
 /* enum: expiry units are calendar days */
@@ -10984,7 +13006,7 @@
  */
 #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
 
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
 #define    MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -10995,6 +13017,7 @@
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
 /* whether to turn on or turn off the masked features */
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
 /* enum: turn the features off */
 #define          MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF  0x0
 /* enum: turn the features back on */
@@ -11014,12 +13037,13 @@
  */
 #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
 
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
 /* operation code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
 /* enum: install a new license, overwriting any existing temporary license.
  * This is an asynchronous operation owing to the time taken to validate an
  * ECDSA license
@@ -11037,6 +13061,7 @@
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
 /* ECDSA license and signature */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -11044,15 +13069,18 @@
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
 /* status code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
 /* enum: finished validating and installing license */
 #define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK  0x0
 /* enum: license validation and installation in progress */
@@ -11084,14 +13112,17 @@
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -11101,6 +13132,7 @@
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11123,20 +13155,24 @@
 #define    MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
@@ -11153,6 +13189,7 @@
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
 /* the type of configuration setting to change */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible
  * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
  */
@@ -11166,6 +13203,7 @@
  * on the type of configuration setting being changed
  */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 /* new value: the details depend on the type of configuration setting being
  * changed
  */
@@ -11190,12 +13228,14 @@
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
 /* the type of configuration setting to read */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
 /* handle for the entity to query: queue handle, EVB port ID, etc. depending on
  * the type of configuration setting being read
  */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 
 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -11228,12 +13268,15 @@
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -11243,6 +13286,7 @@
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11265,18 +13309,22 @@
 #define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
@@ -11291,16 +13339,22 @@
 #define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
 /* The rx queue to get stats for. */
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
 
 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
 #define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
 
 
 /***********************************/
@@ -11309,6 +13363,8 @@
  */
 #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
 
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
 
@@ -11316,20 +13372,27 @@
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
 /* The maximum number of PFs the device can expose */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
 /* The maximum number of VFs the device can expose in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
 /* The maximum number of MSI-X vectors the device can provide in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each PF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each VF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one PF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one VF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
 
 
 /***********************************/
@@ -11347,10 +13410,13 @@
 #define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
 #define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
 /* Default (canonical) board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
 /* Current board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
 
 
 /***********************************/
@@ -11359,21 +13425,26 @@
  */
 #define MC_CMD_READ_ATB 0x100
 
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_ATB_IN msgrequest */
 #define    MC_CMD_READ_ATB_IN_LEN 16
 #define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
 #define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
 #define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
 #define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
 #define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
 
 /* MC_CMD_READ_ATB_OUT msgresponse */
 #define    MC_CMD_READ_ATB_OUT_LEN 4
 #define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
 
 
 /***********************************/
@@ -11390,7 +13461,9 @@
 /* Each workaround is represented by a single bit according to the enums below.
  */
 #define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
 #define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -11425,6 +13498,7 @@
  * 1,3 = 0x00030001
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
@@ -11434,6 +13508,7 @@
  * set to 1.
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
@@ -11460,6 +13535,10 @@
  * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
  */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN  0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE          0x4000
 /* enum: Set this bit to indicate that a new privilege mask is to be set,
  * otherwise the command will only read the existing mask.
  */
@@ -11469,6 +13548,7 @@
 #define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
 /* For an admin function, always all the privileges are reported. */
 #define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
 
 
 /***********************************/
@@ -11485,12 +13565,14 @@
  * e.g. VF 1,3 = 0x00030001
  */
 #define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
 /* New link state mode to be set */
 #define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
@@ -11501,11 +13583,12 @@
 /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
 #define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
 #define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
 
 
 /***********************************/
 /* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
  * parameter to MC_CMD_INIT_RXQ.
  */
 #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -11519,8 +13602,10 @@
 #define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
 /* Minimum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
 /* Maximum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
 
 
 /***********************************/
@@ -11529,7 +13614,7 @@
  */
 #define MC_CMD_FUSE_DIAGS 0x102
 
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_FUSE_DIAGS_IN msgrequest */
 #define    MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -11538,28 +13623,40 @@
 #define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
 /* Total number of mismatched bits between pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
 
 
 /***********************************/
@@ -11576,6 +13673,7 @@
 #define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
 /* The groups of functions to have their privilege masks modified. */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
@@ -11584,6 +13682,7 @@
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
 /* For VFS_OF_PF specify the PF, for ONE specify the target function */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -11592,10 +13691,12 @@
  * refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
 /* Privileges to be removed from the target functions. For privilege
  * definitions refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
 
 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
 #define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -11613,8 +13714,10 @@
 #define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
 #define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -11633,7 +13736,7 @@
  */
 #define MC_CMD_XPM_WRITE_BYTES 0x104
 
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -11641,8 +13744,10 @@
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
 /* Start address (byte) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
 /* Data */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -11659,14 +13764,16 @@
  */
 #define MC_CMD_XPM_READ_SECTOR 0x105
 
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
 /* Sector index */
 #define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
 /* Sector size */
 #define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
 
 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -11674,9 +13781,11 @@
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
 /* Sector type */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
 #define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA      0x3 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
 /* Sector data */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
@@ -11691,7 +13800,7 @@
  */
 #define MC_CMD_XPM_WRITE_SECTOR 0x106
 
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -11708,10 +13817,12 @@
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
 /* Sector type */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
 /* Sector size */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
 /* Sector data */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -11722,6 +13833,7 @@
 #define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
 /* New sector index */
 #define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
 
 
 /***********************************/
@@ -11730,12 +13842,13 @@
  */
 #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
 
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
 /* Sector index */
 #define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -11747,14 +13860,16 @@
  */
 #define MC_CMD_XPM_BLANK_CHECK 0x108
 
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
 #define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -11762,6 +13877,7 @@
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
 /* Total number of bad (non-blank) locations */
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
  * into MCDI response)
  */
@@ -11777,14 +13893,16 @@
  */
 #define MC_CMD_XPM_REPAIR 0x109
 
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_REPAIR_IN msgrequest */
 #define    MC_CMD_XPM_REPAIR_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_REPAIR_OUT msgresponse */
 #define    MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -11797,7 +13915,7 @@
  */
 #define MC_CMD_XPM_DECODER_TEST 0x10a
 
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
 #define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -11816,7 +13934,7 @@
  */
 #define MC_CMD_XPM_WRITE_TEST 0x10b
 
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -11842,10 +13960,13 @@
 #define    MC_CMD_EXEC_SIGNED_IN_LEN 28
 /* the length of code to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define       MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
 /* the length of date to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define       MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
 /* the XPM sector containing the key to use */
 #define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
 /* the expected CMAC value */
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -11868,11 +13989,34 @@
 #define    MC_CMD_PREPARE_SIGNED_IN_LEN 4
 /* the length of data area to clear */
 #define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
 
 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
 #define    MC_CMD_PREPARE_SIGNED_OUT_LEN 0
 
 
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
 /***********************************/
 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
  * Configure UDP ports for tunnel encapsulation hardware acceleration. The
@@ -11913,27 +14057,6 @@
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
 
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
-/* enum: Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
 
 /***********************************/
 /* MC_CMD_RX_BALANCING
@@ -11950,12 +14073,16 @@
 #define    MC_CMD_RX_BALANCING_IN_LEN 16
 /* The RX port whose upconverter table will be modified */
 #define       MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define       MC_CMD_RX_BALANCING_IN_PORT_LEN 4
 /* The VLAN priority associated to the table index and vFIFO */
 #define       MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define       MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
 /* The resulting bit of SRC^DST for indexing the table */
 #define       MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define       MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
 /* The RX engine to which the vFIFO in the table entry will point to */
 #define       MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define       MC_CMD_RX_BALANCING_IN_ENG_LEN 4
 
 /* MC_CMD_RX_BALANCING_OUT msgresponse */
 #define    MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -11976,8 +14103,10 @@
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
 /* The tag to be appended */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
 /* The length of the data */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
 /* The data to be contained in the TLV structure */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -12002,6 +14131,7 @@
 #define    MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
 /* Data type to be checked */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
 
 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -12009,10 +14139,13 @@
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
 /* Number of sectors found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
 /* Number of bytes found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
 /* Length of signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
 /* Signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -12037,12 +14170,16 @@
 #define    MC_CMD_SET_EVQ_TMR_IN_LEN 16
 /* Function-relative queue instance */
 #define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
 /* Requested value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
 /* Requested value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS  0x0 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START  0x1 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START  0x2 /* enum */
@@ -12052,8 +14189,10 @@
 #define    MC_CMD_SET_EVQ_TMR_OUT_LEN 8
 /* Actual value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
 /* Actual value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
 
 
 /***********************************/
@@ -12071,29 +14210,35 @@
 #define    MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
 /* Reserved for future use. */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
  * nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
  * load/reload count.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
  * allowed for timer load/reload counts.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
  * multiple of this step size will be rounded in an implementation defined
  * manner.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
  * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
 /* Timer durations requested via MCDI that are not a multiple of this step size
  * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
 /* For timers updated using the bug35388 workaround, this is the time interval
  * (in nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
@@ -12101,17 +14246,20 @@
  * is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, this is the maximum value
  * allowed for timer load/reload counts. This field is only meaningful if the
  * bug35388 workaround is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, timer load/reload counts
  * not a multiple of this step size will be rounded in an implementation
  * defined manner. This field is only meaningful if the bug35388 workaround is
  * enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
 
 
 /***********************************/
@@ -12129,19 +14277,24 @@
  * local queue index.
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
 /* Will the common pool be used as TX_vFIFO_ULL (1) */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED       0x1 /* enum */
 /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED      0x0
 /* Number of buffers to reserve for the common pool */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
 /* TX datapath to which the Common Pool is connected to. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
 /* enum: Extracts information from function */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1
 /* Network port or RX Engine to which the common pool connects. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
 /* enum: Extracts information from function */
 /*               MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1 */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0          0x0 /* enum */
@@ -12157,6 +14310,7 @@
 #define    MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
 /* ID of the common pool allocated */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
 
 
 /***********************************/
@@ -12173,8 +14327,10 @@
 /* Common pool previously allocated to which the new vFIFO will be associated
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
 /* Port or RX engine to associate the vFIFO egress */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
 /* enum: Extracts information from common pool */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE   -0x1
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0          0x0 /* enum */
@@ -12187,12 +14343,15 @@
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1     0x5
 /* Minimum number of buffers that the pool must have */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
 /* enum: Do not check the space available */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM     0x0
 /* Will the vFIFO be used as TX_vFIFO_ULL */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
 /* Network priority of the vFIFO,if applicable */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
 /* enum: Search for the lowest unused priority */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE  -0x1
 
@@ -12200,8 +14359,10 @@
 #define    MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
 /* Short vFIFO ID */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
 /* Network priority of the vFIFO */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
 
 
 /***********************************/
@@ -12217,6 +14378,7 @@
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
 /* Short vFIFO ID */
 #define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
 
 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -12235,6 +14397,7 @@
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
 /* Common pool ID given when pool allocated */
 #define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
 
 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -12256,8 +14419,10 @@
 #define    MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
 /* Available buffers for the ENG to NET vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
 
 
 #endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 6e1f282b2976..ce8aabf9091e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
 	return 0;
 }
 
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
 {
-	u32 result = 0;
+	#define SET_BIT(name)	__set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+					  linkset)
 
+	bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
 	switch (media) {
 	case MC_CMD_MEDIA_KX4:
-		result |= SUPPORTED_Backplane;
+		SET_BIT(Backplane);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
-			result |= SUPPORTED_1000baseKX_Full;
+			SET_BIT(1000baseKX_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-			result |= SUPPORTED_10000baseKX4_Full;
+			SET_BIT(10000baseKX4_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-			result |= SUPPORTED_40000baseKR4_Full;
+			SET_BIT(40000baseKR4_Full);
 		break;
 
 	case MC_CMD_MEDIA_XFP:
 	case MC_CMD_MEDIA_SFP_PLUS:
 	case MC_CMD_MEDIA_QSFP_PLUS:
-		result |= SUPPORTED_FIBRE;
+		SET_BIT(FIBRE);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
-			result |= SUPPORTED_1000baseT_Full;
+			SET_BIT(1000baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-			result |= SUPPORTED_10000baseT_Full;
+			SET_BIT(10000baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-			result |= SUPPORTED_40000baseCR4_Full;
+			SET_BIT(40000baseCR4_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+			SET_BIT(100000baseCR4_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+			SET_BIT(25000baseCR_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+			SET_BIT(50000baseCR2_Full);
 		break;
 
 	case MC_CMD_MEDIA_BASE_T:
-		result |= SUPPORTED_TP;
+		SET_BIT(TP);
 		if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
-			result |= SUPPORTED_10baseT_Half;
+			SET_BIT(10baseT_Half);
 		if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
-			result |= SUPPORTED_10baseT_Full;
+			SET_BIT(10baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
-			result |= SUPPORTED_100baseT_Half;
+			SET_BIT(100baseT_Half);
 		if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
-			result |= SUPPORTED_100baseT_Full;
+			SET_BIT(100baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
-			result |= SUPPORTED_1000baseT_Half;
+			SET_BIT(1000baseT_Half);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
-			result |= SUPPORTED_1000baseT_Full;
+			SET_BIT(1000baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-			result |= SUPPORTED_10000baseT_Full;
+			SET_BIT(10000baseT_Full);
 		break;
 	}
 
 	if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
-		result |= SUPPORTED_Pause;
+		SET_BIT(Pause);
 	if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
-		result |= SUPPORTED_Asym_Pause;
+		SET_BIT(Asym_Pause);
 	if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
-		result |= SUPPORTED_Autoneg;
+		SET_BIT(Autoneg);
 
-	return result;
+	#undef SET_BIT
 }
 
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
 {
 	u32 result = 0;
 
-	if (cap & SUPPORTED_10baseT_Half)
+	#define TEST_BIT(name)	test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+					 linkset)
+
+	if (TEST_BIT(10baseT_Half))
 		result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
-	if (cap & SUPPORTED_10baseT_Full)
+	if (TEST_BIT(10baseT_Full))
 		result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
-	if (cap & SUPPORTED_100baseT_Half)
+	if (TEST_BIT(100baseT_Half))
 		result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
-	if (cap & SUPPORTED_100baseT_Full)
+	if (TEST_BIT(100baseT_Full))
 		result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
-	if (cap & SUPPORTED_1000baseT_Half)
+	if (TEST_BIT(1000baseT_Half))
 		result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
-	if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+	if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
 		result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
-	if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+	if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
 		result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
-	if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+	if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
 		result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
-	if (cap & SUPPORTED_Pause)
+	if (TEST_BIT(100000baseCR4_Full))
+		result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+	if (TEST_BIT(25000baseCR_Full))
+		result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+	if (TEST_BIT(50000baseCR2_Full))
+		result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+	if (TEST_BIT(Pause))
 		result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
-	if (cap & SUPPORTED_Asym_Pause)
+	if (TEST_BIT(Asym_Pause))
 		result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
-	if (cap & SUPPORTED_Autoneg)
+	if (TEST_BIT(Autoneg))
 		result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
 
+	#undef TEST_BIT
+
 	return result;
 }
 
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
 	return flags;
 }
 
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
 {
 	switch (media) {
 	case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
 
 	caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
 	if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
-		efx->link_advertising =
-			mcdi_to_ethtool_cap(phy_data->media, caps);
+		mcdi_to_ethtool_linkset(phy_data->media, caps,
+					efx->link_advertising);
 	else
 		phy_data->forced_cap = caps;
 
@@ -435,8 +454,8 @@ fail:
 int efx_mcdi_port_reconfigure(struct efx_nic *efx)
 {
 	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
-	u32 caps = (efx->link_advertising ?
-		    ethtool_to_mcdi_cap(efx->link_advertising) :
+	u32 caps = (efx->link_advertising[0] ?
+		    ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
 		    phy_cfg->forced_cap);
 
 	return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
 	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
 	int rc;
-	u32 supported, advertising, lp_advertising;
 
-	supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
-	advertising = efx->link_advertising;
 	cmd->base.speed = efx->link_state.speed;
 	cmd->base.duplex = efx->link_state.fd;
 	cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
 	cmd->base.phy_address = phy_cfg->port;
-	cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+	cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
 	cmd->base.mdio_support = (efx->mdio.mode_support &
 			      (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
 
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						supported);
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						advertising);
+	mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+				cmd->link_modes.supported);
+	memcpy(cmd->link_modes.advertising, efx->link_advertising,
+	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
 
 	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), NULL);
 	if (rc)
 		return;
-	lp_advertising =
-		mcdi_to_ethtool_cap(phy_cfg->media,
-				    MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
-						lp_advertising);
+	mcdi_to_ethtool_linkset(phy_cfg->media,
+				MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+				cmd->link_modes.lp_advertising);
 }
 
 static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
 	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 caps;
 	int rc;
-	u32 advertising;
-
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						cmd->link_modes.advertising);
 
 	if (cmd->base.autoneg) {
-		caps = (ethtool_to_mcdi_cap(advertising) |
-			 1 << MC_CMD_PHY_CAP_AN_LBN);
+		caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+			1 << MC_CMD_PHY_CAP_AN_LBN);
 	} else if (cmd->base.duplex) {
 		switch (cmd->base.speed) {
-		case 10:    caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN;    break;
-		case 100:   caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;   break;
-		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;  break;
-		case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
-		case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
-		default:    return -EINVAL;
+		case 10:     caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN;     break;
+		case 100:    caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;    break;
+		case 1000:   caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;   break;
+		case 10000:  caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN;  break;
+		case 40000:  caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN;  break;
+		case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+		case 25000:  caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN;  break;
+		case 50000:  caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN;  break;
+		default:     return -EINVAL;
 		}
 	} else {
 		switch (cmd->base.speed) {
-		case 10:    caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN;    break;
-		case 100:   caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN;   break;
-		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN;  break;
-		default:    return -EINVAL;
+		case 10:     caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN;     break;
+		case 100:    caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN;    break;
+		case 1000:   caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN;   break;
+		default:     return -EINVAL;
 		}
 	}
 
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
 		return rc;
 
 	if (cmd->base.autoneg) {
-		efx_link_set_advertising(
-			efx, advertising | ADVERTISED_Autoneg);
+		efx_link_set_advertising(efx, cmd->link_modes.advertising);
 		phy_cfg->forced_cap = 0;
 	} else {
-		efx_link_set_advertising(efx, 0);
+		efx_link_clear_advertising(efx);
 		phy_cfg->forced_cap = caps;
 	}
 	return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
 	[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
 	[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
 	[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
 };
 
 void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
@@ -1087,7 +1101,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
 	int period = action == EFX_STATS_ENABLE ? 1000 : 0;
 	dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
 	u32 dma_len = action != EFX_STATS_DISABLE ?
-		MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+		efx->num_mac_stats * sizeof(u64) : 0;
 
 	BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
 
@@ -1121,7 +1135,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
 {
 	__le64 *dma_stats = efx->stats_buffer.addr;
 
-	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
 	efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
 }
@@ -1139,10 +1153,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
 	__le64 *dma_stats = efx->stats_buffer.addr;
 	int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
 
-	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 	efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
 
-	while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+	while (dma_stats[efx->num_mac_stats - 1] ==
 				EFX_MC_STATS_GENERATION_INVALID &&
 			attempts-- != 0)
 		udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1181,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
 
 	/* Allocate buffer for stats */
 	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
-				  MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+				  efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
 	if (rc)
 		return rc;
 	netif_dbg(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c0537ea06c9a..d20a8660ee48 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -191,6 +191,7 @@ struct efx_tx_buffer {
  *	Size of the region is efx_piobuf_size.
  * @piobuf_offset: Buffer offset to be specified in PIO descriptors
  * @initialised: Has hardware queue been initialised?
+ * @timestamping: Is timestamping enabled for this channel?
  * @handle_tso: TSO xmit preparation handler.  Sets up the TSO metadata and
  *	may also map tx data, depending on the nature of the TSO implementation.
  * @read_count: Current read pointer.
@@ -202,6 +203,10 @@ struct efx_tx_buffer {
  *	avoid cache-line ping-pong between the xmit path and the
  *	completion path.
  * @merge_events: Number of TX merged completion events
+ * @completed_desc_ptr: Most recent completed pointer - only used with
+ *      timestamping.
+ * @completed_timestamp_major: Top part of the most recent tx timestamp.
+ * @completed_timestamp_minor: Low part of the most recent tx timestamp.
  * @insert_count: Current insert pointer
  *	This is the number of buffers that have been added to the
  *	software ring.
@@ -247,6 +252,7 @@ struct efx_tx_queue {
 	void __iomem *piobuf;
 	unsigned int piobuf_offset;
 	bool initialised;
+	bool timestamping;
 
 	/* Function pointers used in the fast path. */
 	int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -257,6 +263,9 @@ struct efx_tx_queue {
 	unsigned int merge_events;
 	unsigned int bytes_compl;
 	unsigned int pkts_compl;
+	unsigned int completed_desc_ptr;
+	u32 completed_timestamp_major;
+	u32 completed_timestamp_minor;
 
 	/* Members used only on the xmit path */
 	unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -522,8 +531,12 @@ struct efx_msi_context {
  * @copy: Copy the channel state prior to reallocation.  May be %NULL if
  *	reallocation is not supported.
  * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @want_txqs: Determine whether this channel should have TX queues
+ *	created.  If %NULL, TX queues are not created.
  * @keep_eventq: Flag for whether event queue should be kept initialised
  *	while the device is stopped
+ * @want_pio: Flag for whether PIO buffers should be linked to this
+ *	channel's TX queues.
  */
 struct efx_channel_type {
 	void (*handle_no_channel)(struct efx_nic *);
@@ -532,7 +545,9 @@ struct efx_channel_type {
 	void (*get_name)(struct efx_channel *, char *buf, size_t len);
 	struct efx_channel *(*copy)(const struct efx_channel *);
 	bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+	bool (*want_txqs)(struct efx_channel *);
 	bool keep_eventq;
+	bool want_pio;
 };
 
 enum efx_led_mode {
@@ -708,6 +723,7 @@ struct vfdi_status;
  * @reset_work: Scheduled reset workitem
  * @membase_phys: Memory BAR value as physical address
  * @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
  * @interrupt_mode: Interrupt mode
  * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
  * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
@@ -734,6 +750,7 @@ struct vfdi_status;
  * @n_channels: Number of channels in use
  * @n_rx_channels: Number of channels used for RX (= number of RX queues)
  * @n_tx_channels: Number of channels used for TX
+ * @n_extra_tx_channels: Number of extra channels with TX queues
  * @rx_ip_align: RX DMA address offset to have IP header aligned in
  *	in accordance with NET_IP_ALIGN
  * @rx_dma_len: Current maximum RX DMA length
@@ -773,6 +790,8 @@ struct vfdi_status;
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
  * @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ *	field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
  * @stats_buffer: DMA buffer for statistics
  * @phy_type: PHY type
  * @phy_op: PHY interface
@@ -812,6 +831,7 @@ struct vfdi_status;
  * @vf_init_count: Number of VFs that have been fully initialised.
  * @vi_scale: log2 number of vnics per VF.
  * @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
  * @vpd_sn: Serial number read from VPD
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
@@ -842,6 +862,8 @@ struct efx_nic {
 	resource_size_t membase_phys;
 	void __iomem *membase;
 
+	unsigned int vi_stride;
+
 	enum efx_int_mode interrupt_mode;
 	unsigned int timer_quantum_ns;
 	unsigned int timer_max_ns;
@@ -875,6 +897,7 @@ struct efx_nic {
 	unsigned rss_spread;
 	unsigned tx_channel_offset;
 	unsigned n_tx_channels;
+	unsigned n_extra_tx_channels;
 	unsigned int rx_ip_align;
 	unsigned int rx_dma_len;
 	unsigned int rx_buffer_order;
@@ -918,6 +941,7 @@ struct efx_nic {
 
 	netdev_features_t fixed_features;
 
+	u16 num_mac_stats;
 	struct efx_buffer stats_buffer;
 	u64 rx_nodesc_drops_total;
 	u64 rx_nodesc_drops_while_down;
@@ -930,7 +954,7 @@ struct efx_nic {
 	unsigned int mdio_bus;
 	enum efx_phy_mode phy_mode;
 
-	u32 link_advertising;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
 	struct efx_link_state link_state;
 	unsigned int n_link_state_changes;
 
@@ -965,6 +989,7 @@ struct efx_nic {
 #endif
 
 	struct efx_ptp_data *ptp_data;
+	bool ptp_warned;
 
 	char *vpd_sn;
 
@@ -1154,7 +1179,7 @@ struct efx_udp_tunnel {
  */
 struct efx_nic_type {
 	bool is_vf;
-	unsigned int mem_bar;
+	unsigned int (*mem_bar)(struct efx_nic *efx);
 	unsigned int (*mem_map_size)(struct efx_nic *efx);
 	int (*probe)(struct efx_nic *efx);
 	void (*remove)(struct efx_nic *efx);
@@ -1355,8 +1380,8 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
 
 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
 {
-	return channel->channel - channel->efx->tx_channel_offset <
-		channel->efx->n_tx_channels;
+	return channel->type && channel->type->want_txqs &&
+				channel->type->want_txqs(channel);
 }
 
 static inline struct efx_tx_queue *
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b51b6371724..6549fc685a48 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -325,6 +325,30 @@ enum {
 	EF10_STAT_tx_bad,
 	EF10_STAT_tx_bad_bytes,
 	EF10_STAT_tx_overflow,
+	EF10_STAT_V1_COUNT,
+	EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+	EF10_STAT_fec_corrected_errors,
+	EF10_STAT_fec_corrected_symbols_lane0,
+	EF10_STAT_fec_corrected_symbols_lane1,
+	EF10_STAT_fec_corrected_symbols_lane2,
+	EF10_STAT_fec_corrected_symbols_lane3,
+	EF10_STAT_ctpio_dmabuf_start,
+	EF10_STAT_ctpio_vi_busy_fallback,
+	EF10_STAT_ctpio_long_write_success,
+	EF10_STAT_ctpio_missing_dbell_fail,
+	EF10_STAT_ctpio_overflow_fail,
+	EF10_STAT_ctpio_underflow_fail,
+	EF10_STAT_ctpio_timeout_fail,
+	EF10_STAT_ctpio_noncontig_wr_fail,
+	EF10_STAT_ctpio_frm_clobber_fail,
+	EF10_STAT_ctpio_invalid_wr_fail,
+	EF10_STAT_ctpio_vi_clobber_fallback,
+	EF10_STAT_ctpio_unqualified_fallback,
+	EF10_STAT_ctpio_runt_fallback,
+	EF10_STAT_ctpio_success,
+	EF10_STAT_ctpio_fallback,
+	EF10_STAT_ctpio_poison,
+	EF10_STAT_ctpio_erase,
 	EF10_STAT_COUNT
 };
 
@@ -416,6 +440,7 @@ struct efx_ef10_nic_data {
 	struct efx_udp_tunnel udp_tunnels[16];
 	bool udp_tunnels_dirty;
 	struct mutex udp_tunnels_lock;
+	u64 licensed_features;
 };
 
 int efx_init_sriov(void);
@@ -424,6 +449,7 @@ void efx_fini_sriov(void);
 struct ethtool_ts_info;
 int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
 void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
 void efx_ptp_remove(struct efx_nic *efx);
 int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
 int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
@@ -447,6 +473,8 @@ static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
 }
 void efx_ptp_start_datapath(struct efx_nic *efx);
 void efx_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index caa89bf7603e..f21661532ed3 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -149,18 +149,14 @@ enum ptp_packet_state {
 /* Maximum parts-per-billion adjustment that is acceptable */
 #define MAX_PPB			1000000
 
-/* Number of bits required to hold the above */
-#define	MAX_PPB_BITS		20
-
-/* Number of extra bits allowed when calculating fractional ns.
- * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
- * be less than 63.
- */
-#define	PPB_EXTRA_BITS		2
-
 /* Precalculate scale word to avoid long long division at runtime */
-#define	PPB_SCALE_WORD	((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
-			MAX_PPB_BITS)) / 1000000000LL)
+/* This is equivalent to 2^66 / 10^9. */
+#define PPB_SCALE_WORD  ((1LL << (57)) / 1953125LL)
+
+/* How much to shift down after scaling to convert to FP40 */
+#define PPB_SHIFT_FP40		26
+/* ... and FP44. */
+#define PPB_SHIFT_FP44		22
 
 #define PTP_SYNC_ATTEMPTS	4
 
@@ -218,8 +214,8 @@ struct efx_ptp_timeset {
  * @channel: The PTP channel (Siena only)
  * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
  *	separate events)
- * @rxq: Receive queue (awaiting timestamps)
- * @txq: Transmit queue
+ * @rxq: Receive SKB queue (awaiting timestamps)
+ * @txq: Transmit SKB queue
  * @evt_list: List of MC receive events awaiting packets
  * @evt_free_list: List of free events
  * @evt_lock: Lock for manipulating evt_list and evt_free_list
@@ -233,19 +229,36 @@ struct efx_ptp_timeset {
  * @config: Current timestamp configuration
  * @enabled: PTP operation enabled
  * @mode: Mode in which PTP operating (PTP version)
- * @time_format: Time format supported by this NIC
  * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
  * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time.minor_max: Wrap point for NIC minor times
+ * @nic_time.sync_event_diff_min: Minimum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much earlier than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_diff_max: Maximum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much later than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_minor_shift: Shift required to make minor time from
+ * field in MCDI time sync event.
  * @min_synchronisation_ns: Minimum acceptable corrected sync window
- * @ts_corrections.tx: Required driver correction of transmit timestamps
- * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @capabilities: Capabilities flags from the NIC
+ * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
+ *                         timestamps
+ * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
+ *                         timestamps
  * @ts_corrections.pps_out: PPS output error (information only)
  * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @ts_corrections.general_tx: Required driver correction of general packet
+ *                             transmit timestamps
+ * @ts_corrections.general_rx: Required driver correction of general packet
+ *                             receive timestamps
  * @evt_frags: Partly assembled PTP events
  * @evt_frag_idx: Current fragment number
  * @evt_code: Last event code
  * @start: Address at which MC indicates ready for synchronisation
  * @host_time_pps: Host time at last PPS
+ * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion
+ * frequency adjustment into a fixed point fractional nanosecond format.
  * @current_adjfreq: Current ppb adjustment.
  * @phc_clock: Pointer to registered phc device (if primary function)
  * @phc_clock_info: Registration structure for phc device
@@ -264,6 +277,7 @@ struct efx_ptp_timeset {
  * @oversize_sync_windows: Number of corrected sync windows that are too large
  * @rx_no_timestamp: Number of packets received without a timestamp.
  * @timeset: Last set of synchronisation statistics.
+ * @xmit_skb: Transmit SKB function.
  */
 struct efx_ptp_data {
 	struct efx_nic *efx;
@@ -284,22 +298,31 @@ struct efx_ptp_data {
 	struct hwtstamp_config config;
 	bool enabled;
 	unsigned int mode;
-	unsigned int time_format;
 	void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
 	ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
 				      s32 correction);
+	struct {
+		u32 minor_max;
+		u32 sync_event_diff_min;
+		u32 sync_event_diff_max;
+		unsigned int sync_event_minor_shift;
+	} nic_time;
 	unsigned int min_synchronisation_ns;
+	unsigned int capabilities;
 	struct {
-		s32 tx;
-		s32 rx;
+		s32 ptp_tx;
+		s32 ptp_rx;
 		s32 pps_out;
 		s32 pps_in;
+		s32 general_tx;
+		s32 general_rx;
 	} ts_corrections;
 	efx_qword_t evt_frags[MAX_EVENT_FRAGS];
 	int evt_frag_idx;
 	int evt_code;
 	struct efx_buffer start;
 	struct pps_event_time host_time_pps;
+	unsigned int adjfreq_ppb_shift;
 	s64 current_adjfreq;
 	struct ptp_clock *phc_clock;
 	struct ptp_clock_info phc_clock_info;
@@ -319,6 +342,7 @@ struct efx_ptp_data {
 	unsigned int rx_no_timestamp;
 	struct efx_ptp_timeset
 	timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+	void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
 };
 
 static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
@@ -329,6 +353,24 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
 static int efx_phc_enable(struct ptp_clock_info *ptp,
 			  struct ptp_clock_request *request, int on);
 
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
+		(nic_data->datapath_caps2 &
+		 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
+		));
+}
+
+/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
+ * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit.
+ */
+static bool efx_ptp_want_txqs(struct efx_channel *channel)
+{
+	return efx_ptp_use_mac_tx_timestamps(channel->efx);
+}
+
 #define PTP_SW_STAT(ext_name, field_name)				\
 	{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
 #define PTP_MC_STAT(ext_name, mcdi_name)				\
@@ -471,6 +513,89 @@ static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
 	return efx_ptp_s27_to_ktime(nic_major, nic_minor);
 }
 
+/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */
+static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+	struct timespec64 ts = ns_to_timespec64(ns);
+
+	*nic_major = (u32)ts.tv_sec;
+	*nic_minor = ts.tv_nsec * 4;
+}
+
+static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+						 s32 correction)
+{
+	ktime_t kt;
+
+	nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4);
+	correction = DIV_ROUND_CLOSEST(correction, 4);
+
+	kt = ktime_set(nic_major, nic_minor);
+
+	if (correction >= 0)
+		kt = ktime_add_ns(kt, (u64)correction);
+	else
+		kt = ktime_sub_ns(kt, (u64)-correction);
+	return kt;
+}
+
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
+{
+	return efx->ptp_data ? efx->ptp_data->channel : NULL;
+}
+
+static u32 last_sync_timestamp_major(struct efx_nic *efx)
+{
+	struct efx_channel *channel = efx_ptp_channel(efx);
+	u32 major = 0;
+
+	if (channel)
+		major = channel->sync_timestamp_major;
+	return major;
+}
+
+/* The 8000 series and later can provide the time from the MAC, which is only
+ * 48 bits long and provides meta-information in the top 2 bits.
+ */
+static ktime_t
+efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+				    struct efx_ptp_data *ptp,
+				    u32 nic_major, u32 nic_minor,
+				    s32 correction)
+{
+	ktime_t kt = { 0 };
+
+	if (!(nic_major & 0x80000000)) {
+		WARN_ON_ONCE(nic_major >> 16);
+		/* Use the top bits from the latest sync event. */
+		nic_major &= 0xffff;
+		nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
+
+		kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+					     correction);
+	}
+	return kt;
+}
+
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	ktime_t kt;
+
+	if (efx_ptp_use_mac_tx_timestamps(efx))
+		kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp,
+				tx_queue->completed_timestamp_major,
+				tx_queue->completed_timestamp_minor,
+				ptp->ts_corrections.general_tx);
+	else
+		kt = ptp->nic_to_kernel_time(
+				tx_queue->completed_timestamp_major,
+				tx_queue->completed_timestamp_minor,
+				ptp->ts_corrections.general_tx);
+	return kt;
+}
+
 /* Get PTP attributes and set up time conversions */
 static int efx_ptp_get_attributes(struct efx_nic *efx)
 {
@@ -502,31 +627,71 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
 		return rc;
 	}
 
-	if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+	switch (fmt) {
+	case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION:
 		ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
 		ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
-	} else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+		ptp->nic_time.minor_max = 1 << 27;
+		ptp->nic_time.sync_event_minor_shift = 19;
+		break;
+	case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
 		ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
 		ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
-	} else {
+		ptp->nic_time.minor_max = 1000000000;
+		ptp->nic_time.sync_event_minor_shift = 22;
+		break;
+	case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
+		ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
+		ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
+		ptp->nic_time.minor_max = 4000000000UL;
+		ptp->nic_time.sync_event_minor_shift = 24;
+		break;
+	default:
 		return -ERANGE;
 	}
 
-	ptp->time_format = fmt;
-
-	/* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
-	 * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
-	 * to use for the minimum acceptable corrected synchronization window.
+	/* Precalculate acceptable difference between the minor time in the
+	 * packet prefix and the last MCDI time sync event. We expect the
+	 * packet prefix timestamp to be after of sync event by up to one
+	 * sync event interval (0.25s) but we allow it to exceed this by a
+	 * fuzz factor of (0.1s)
+	 */
+	ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max
+		- (ptp->nic_time.minor_max / 10);
+	ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4)
+		+ (ptp->nic_time.minor_max / 10);
+
+	/* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older
+	 * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return
+	 * a value to use for the minimum acceptable corrected synchronization
+	 * window and may return further capabilities.
 	 * If we have the extra information store it. For older firmware that
 	 * does not implement the extended command use the default value.
 	 */
-	if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+	if (rc == 0 &&
+	    out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST)
 		ptp->min_synchronisation_ns =
 			MCDI_DWORD(outbuf,
 				   PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
 	else
 		ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
 
+	if (rc == 0 &&
+	    out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+		ptp->capabilities = MCDI_DWORD(outbuf,
+					PTP_OUT_GET_ATTRIBUTES_CAPABILITIES);
+	else
+		ptp->capabilities = 0;
+
+	/* Set up the shift for conversion between frequency
+	 * adjustments in parts-per-billion and the fixed-point
+	 * fractional ns format that the adapter uses.
+	 */
+	if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN))
+		ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44;
+	else
+		ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40;
+
 	return 0;
 }
 
@@ -534,8 +699,9 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
 static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN);
 	int rc;
+	size_t out_len;
 
 	/* Get the timestamp corrections from the NIC. If this operation is
 	 * not supported (older NICs) then no correction is required.
@@ -545,21 +711,37 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
 
 	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-				outbuf, sizeof(outbuf), NULL);
+				outbuf, sizeof(outbuf), &out_len);
 	if (rc == 0) {
-		efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+		efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf,
 			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
-		efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+		efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf,
 			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
 		efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
 			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
 		efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
 			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+
+		if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) {
+			efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD(
+				outbuf,
+				PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX);
+			efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD(
+				outbuf,
+				PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX);
+		} else {
+			efx->ptp_data->ts_corrections.general_tx =
+				efx->ptp_data->ts_corrections.ptp_tx;
+			efx->ptp_data->ts_corrections.general_rx =
+				efx->ptp_data->ts_corrections.ptp_rx;
+		}
 	} else if (rc == -EINVAL) {
-		efx->ptp_data->ts_corrections.tx = 0;
-		efx->ptp_data->ts_corrections.rx = 0;
+		efx->ptp_data->ts_corrections.ptp_tx = 0;
+		efx->ptp_data->ts_corrections.ptp_rx = 0;
 		efx->ptp_data->ts_corrections.pps_out = 0;
 		efx->ptp_data->ts_corrections.pps_in = 0;
+		efx->ptp_data->ts_corrections.general_tx = 0;
+		efx->ptp_data->ts_corrections.general_rx = 0;
 	} else {
 		efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
 				       sizeof(outbuf), rc);
@@ -873,8 +1055,24 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
 	return rc;
 }
 
+/* Transmit a PTP packet via the dedicated hardware timestamped queue. */
+static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
+{
+	struct efx_ptp_data *ptp_data = efx->ptp_data;
+	struct efx_tx_queue *tx_queue;
+	u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+
+	tx_queue = &ptp_data->channel->tx_queue[type];
+	if (tx_queue && tx_queue->timestamping) {
+		efx_enqueue_skb(tx_queue, skb);
+	} else {
+		WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
+		dev_kfree_skb_any(skb);
+	}
+}
+
 /* Transmit a PTP packet, via the MCDI interface, to the wire. */
-static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
 {
 	struct efx_ptp_data *ptp_data = efx->ptp_data;
 	struct skb_shared_hwtstamps timestamps;
@@ -910,16 +1108,16 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
 	timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
 		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
 		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
-		ptp_data->ts_corrections.tx);
+		ptp_data->ts_corrections.ptp_tx);
 
 	skb_tstamp_tx(skb, &timestamps);
 
 	rc = 0;
 
 fail:
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 
-	return rc;
+	return;
 }
 
 static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
@@ -1189,7 +1387,7 @@ static void efx_ptp_worker(struct work_struct *work)
 	efx_ptp_process_events(efx, &tempq);
 
 	while ((skb = skb_dequeue(&ptp_data->txq)))
-		efx_ptp_xmit_skb(efx, skb);
+		ptp_data->xmit_skb(efx, skb);
 
 	while ((skb = __skb_dequeue(&tempq)))
 		efx_ptp_process_rx(efx, skb);
@@ -1239,6 +1437,14 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
 		goto fail2;
 	}
 
+	if (efx_ptp_use_mac_tx_timestamps(efx)) {
+		ptp->xmit_skb = efx_ptp_xmit_skb_queue;
+		/* Request sync events on this channel. */
+		channel->sync_events_state = SYNC_EVENTS_QUIESCENT;
+	} else {
+		ptp->xmit_skb = efx_ptp_xmit_skb_mc;
+	}
+
 	INIT_WORK(&ptp->work, efx_ptp_worker);
 	ptp->config.flags = 0;
 	ptp->config.tx_type = HWTSTAMP_TX_OFF;
@@ -1303,11 +1509,21 @@ fail1:
 static int efx_ptp_probe_channel(struct efx_channel *channel)
 {
 	struct efx_nic *efx = channel->efx;
+	int rc;
 
 	channel->irq_moderation_us = 0;
 	channel->rx_queue.core_index = 0;
 
-	return efx_ptp_probe(efx, channel);
+	rc = efx_ptp_probe(efx, channel);
+	/* Failure to probe PTP is not fatal; this channel will just not be
+	 * used for anything.
+	 * In the case of EPERM, efx_ptp_probe will print its own message (in
+	 * efx_ptp_get_attributes()), so we don't need to.
+	 */
+	if (rc && rc != -EPERM)
+		netif_warn(efx, drv, efx->net_dev,
+			   "Failed to probe PTP, rc=%d\n", rc);
+	return 0;
 }
 
 void efx_ptp_remove(struct efx_nic *efx)
@@ -1332,6 +1548,7 @@ void efx_ptp_remove(struct efx_nic *efx)
 
 	efx_nic_free_buffer(efx, &efx->ptp_data->start);
 	kfree(efx->ptp_data);
+	efx->ptp_data = NULL;
 }
 
 static void efx_ptp_remove_channel(struct efx_channel *channel)
@@ -1548,6 +1765,17 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
 	ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
 				     SOF_TIMESTAMPING_RX_HARDWARE |
 				     SOF_TIMESTAMPING_RAW_HARDWARE);
+	/* Check licensed features.  If we don't have the license for TX
+	 * timestamps, the NIC will not support them.
+	 */
+	if (efx_ptp_use_mac_tx_timestamps(efx)) {
+		struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+		if (!(nic_data->licensed_features &
+		      (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN)))
+			ts_info->so_timestamping &=
+				~SOF_TIMESTAMPING_TX_HARDWARE;
+	}
 	if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
 		ts_info->phc_index =
 			ptp_clock_index(primary->ptp_data->phc_clock);
@@ -1627,7 +1855,7 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
 		evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
 			EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
 			EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
-			ptp->ts_corrections.rx);
+			ptp->ts_corrections.ptp_rx);
 		evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
 		list_add_tail(&evt->link, &ptp->evt_list);
 
@@ -1662,9 +1890,11 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
 	int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
 
 	if (!ptp) {
-		if (net_ratelimit())
+		if (!efx->ptp_warned) {
 			netif_warn(efx, drv, efx->net_dev,
 				   "Received PTP event but PTP not set up\n");
+			efx->ptp_warned = true;
+		}
 		return;
 	}
 
@@ -1707,9 +1937,20 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
 
 void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
 {
+	struct efx_nic *efx = channel->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+
+	/* When extracting the sync timestamp minor value, we should discard
+	 * the least significant two bits. These are not required in order
+	 * to reconstruct full-range timestamps and they are optionally used
+	 * to report status depending on the options supplied when subscribing
+	 * for sync events.
+	 */
 	channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
 	channel->sync_timestamp_minor =
-		MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+		(MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC)
+			<< ptp->nic_time.sync_event_minor_shift;
+
 	/* if sync events have been disabled then we want to silently ignore
 	 * this event, so throw away result.
 	 */
@@ -1717,15 +1958,6 @@ void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
 		       SYNC_EVENTS_VALID);
 }
 
-/* make some assumptions about the time representation rather than abstract it,
- * since we currently only support one type of inline timestamping and only on
- * EF10.
- */
-#define MINOR_TICKS_PER_SECOND 0x8000000
-/* Fuzz factor for sync events to be out of order with RX events */
-#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
-#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
-
 static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
@@ -1743,31 +1975,33 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
 				   struct sk_buff *skb)
 {
 	struct efx_nic *efx = channel->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
 	u32 pkt_timestamp_major, pkt_timestamp_minor;
 	u32 diff, carry;
 	struct skb_shared_hwtstamps *timestamps;
 
-	pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
-							  skb_mac_header(skb)) +
-			       (u32) efx->ptp_data->ts_corrections.rx) &
-			      (MINOR_TICKS_PER_SECOND - 1);
+	if (channel->sync_events_state != SYNC_EVENTS_VALID)
+		return;
+
+	pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
 
 	/* get the difference between the packet and sync timestamps,
 	 * modulo one second
 	 */
-	diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
-		(MINOR_TICKS_PER_SECOND - 1);
+	diff = pkt_timestamp_minor - channel->sync_timestamp_minor;
+	if (pkt_timestamp_minor < channel->sync_timestamp_minor)
+		diff += ptp->nic_time.minor_max;
+
 	/* do we roll over a second boundary and need to carry the one? */
-	carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+	carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ?
 		1 : 0;
 
-	if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
-		    FUZZ) {
+	if (diff <= ptp->nic_time.sync_event_diff_max) {
 		/* packet is ahead of the sync event by a quarter of a second or
 		 * less (allowing for fuzz)
 		 */
 		pkt_timestamp_major = channel->sync_timestamp_major + carry;
-	} else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+	} else if (diff >= ptp->nic_time.sync_event_diff_min) {
 		/* packet is behind the sync event but within the fuzz factor.
 		 * This means the RX packet and sync event crossed as they were
 		 * placed on the event queue, which can sometimes happen.
@@ -1789,7 +2023,9 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
 	/* attach the timestamps to the skb */
 	timestamps = skb_hwtstamps(skb);
 	timestamps->hwtstamp =
-		efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+		ptp->nic_to_kernel_time(pkt_timestamp_major,
+					pkt_timestamp_minor,
+					ptp->ts_corrections.general_rx);
 }
 
 static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
@@ -1807,9 +2043,10 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 	else if (delta < -MAX_PPB)
 		delta = -MAX_PPB;
 
-	/* Convert ppb to fixed point ns. */
-	adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
-			 (PPB_EXTRA_BITS + MAX_PPB_BITS));
+	/* Convert ppb to fixed point ns taking care to round correctly. */
+	adjustment_ns = ((s64)delta * PPB_SCALE_WORD +
+			 (1 << (ptp_data->adjfreq_ppb_shift - 1))) >>
+			ptp_data->adjfreq_ppb_shift;
 
 	MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
 	MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
@@ -1916,6 +2153,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
 	.get_name		= efx_ptp_get_channel_name,
 	/* no copy operation; there is no need to reallocate this channel */
 	.receive_skb		= efx_ptp_rx,
+	.want_txqs		= efx_ptp_want_txqs,
 	.keep_eventq		= false,
 };
 
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a617f657eae3..ae8645ae4492 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -242,6 +242,14 @@ static int siena_dimension_resources(struct efx_nic *efx)
 	return 0;
 }
 
+/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3)
+ * for memory.
+ */
+static unsigned int siena_mem_bar(struct efx_nic *efx)
+{
+	return 2;
+}
+
 static unsigned int siena_mem_map_size(struct efx_nic *efx)
 {
 	return FR_CZ_MC_TREG_SMEM +
@@ -547,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
 
 	dma_stats = efx->stats_buffer.addr;
 
-	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	generation_end = dma_stats[efx->num_mac_stats - 1];
 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
 		return 0;
 	rmb();
@@ -950,7 +958,7 @@ fail:
 
 const struct efx_nic_type siena_a0_nic_type = {
 	.is_vf = false,
-	.mem_bar = EFX_MEM_BAR,
+	.mem_bar = siena_mem_bar,
 	.mem_map_size = siena_mem_map_size,
 	.probe = siena_probe_nic,
 	.remove = siena_remove_nic,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9937a2450e57..cece961f2e82 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,9 +77,23 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 	}
 
 	if (buffer->flags & EFX_TX_BUF_SKB) {
+		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
 		(*pkts_compl)++;
-		(*bytes_compl) += buffer->skb->len;
+		(*bytes_compl) += skb->len;
+		if (tx_queue->timestamping &&
+		    (tx_queue->completed_timestamp_major ||
+		     tx_queue->completed_timestamp_minor)) {
+			struct skb_shared_hwtstamps hwtstamp;
+
+			hwtstamp.hwtstamp =
+				efx_ptp_nic_to_kernel_time(tx_queue);
+			skb_tstamp_tx(skb, &hwtstamp);
+
+			tx_queue->completed_timestamp_major = 0;
+			tx_queue->completed_timestamp_minor = 0;
+		}
 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
 			   "TX queue %d transmission id %x complete\n",
@@ -828,6 +842,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 	tx_queue->old_read_count = 0;
 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
 	tx_queue->xmit_more_available = false;
+	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+				  tx_queue->channel == efx_ptp_channel(efx));
+	tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+	tx_queue->completed_timestamp_major = 0;
+	tx_queue->completed_timestamp_minor = 0;
 
 	/* Set up default function pointers. These may get replaced by
 	 * efx_nic_init_tx() based off NIC/queue capabilities.
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
new file mode 100644
index 000000000000..6bcfe27fc560
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -0,0 +1,34 @@
+config NET_VENDOR_SOCIONEXT
+	bool "Socionext ethernet drivers"
+	default y
+	---help---
+	  Option to select ethernet drivers for Socionext platforms.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Socionext devices. If you say Y, you will be asked
+	  for your specific card in the following questions.
+
+if NET_VENDOR_SOCIONEXT
+
+config SNI_AVE
+	tristate "Socionext AVE ethernet support"
+	depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+	select PHYLIB
+	---help---
+	  Driver for gigabit ethernet MACs, called AVE, in the
+	  Socionext UniPhier family.
+
+config SNI_NETSEC
+	tristate "Socionext NETSEC ethernet support"
+	depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+	select PHYLIB
+	select MII
+	---help---
+	  Enable to add support for the SocioNext NetSec Gigabit Ethernet
+	  controller + PHY, as found on the Synquacer SC2A11 SoC
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called netsec.  If unsure, say N.
+
+endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
new file mode 100644
index 000000000000..7fd837a999fd
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for all ethernet ip drivers on Socionext platforms
+#
+obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 000000000000..f4c0b02ddad8
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST			0x104
+#define NETSEC_REG_COM_INIT			0x120
+
+#define NETSEC_REG_TOP_STATUS			0x200
+#define NETSEC_IRQ_RX				BIT(1)
+#define NETSEC_IRQ_TX				BIT(0)
+
+#define NETSEC_REG_TOP_INTEN			0x204
+#define NETSEC_REG_INTEN_SET			0x234
+#define NETSEC_REG_INTEN_CLR			0x238
+
+#define NETSEC_REG_NRM_TX_STATUS		0x400
+#define NETSEC_REG_NRM_TX_INTEN			0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET		0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR		0x42c
+#define NRM_TX_ST_NTOWNR	BIT(17)
+#define NRM_TX_ST_TR_ERR	BIT(16)
+#define NRM_TX_ST_TXDONE	BIT(15)
+#define NRM_TX_ST_TMREXP	BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS		0x440
+#define NETSEC_REG_NRM_RX_INTEN			0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET		0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR		0x46c
+#define NRM_RX_ST_RC_ERR	BIT(16)
+#define NRM_RX_ST_PKTCNT	BIT(15)
+#define NRM_RX_ST_TMREXP	BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF			0xd0
+
+#define NETSEC_REG_CLK_EN			0x100
+
+#define NETSEC_REG_PKT_CTRL			0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL			0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER		0x22c
+#define NETSEC_REG_F_TAIKI_VER			0x230
+#define NETSEC_REG_DMA_HM_CTRL			0x214
+#define NETSEC_REG_DMA_MH_CTRL			0x220
+#define NETSEC_REG_ADDR_DIS_CORE		0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF		0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF		0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT		0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT		0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT	0x418
+
+#define NETSEC_REG_NRM_TX_TMR			0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT		0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT		0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR		0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR		0x460
+
+#define NETSEC_REG_NRM_RX_TMR			0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP		0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW		0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP		0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW		0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG		0x430
+#define NETSEC_REG_NRM_RX_CONFIG		0x470
+
+#define MAC_REG_STATUS				0x1024
+#define MAC_REG_DATA				0x11c0
+#define MAC_REG_CMD				0x11c4
+#define MAC_REG_FLOW_TH				0x11cc
+#define MAC_REG_INTF_SEL			0x11d4
+#define MAC_REG_DESC_INIT			0x11fc
+#define MAC_REG_DESC_SOFT_RST			0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS	0x500
+
+#define GMAC_REG_MCR				0x0000
+#define GMAC_REG_MFFR				0x0004
+#define GMAC_REG_GAR				0x0010
+#define GMAC_REG_GDR				0x0014
+#define GMAC_REG_FCR				0x0018
+#define GMAC_REG_BMR				0x1000
+#define GMAC_REG_RDLAR				0x100c
+#define GMAC_REG_TDLAR				0x1010
+#define GMAC_REG_OMR				0x1018
+
+#define MHZ(n)		((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD		31
+#define NETSEC_TX_SHIFT_LD_FIELD		30
+#define NETSEC_TX_SHIFT_DRID_FIELD		24
+#define NETSEC_TX_SHIFT_PT_FIELD		21
+#define NETSEC_TX_SHIFT_TDRID_FIELD		16
+#define NETSEC_TX_SHIFT_CC_FIELD		15
+#define NETSEC_TX_SHIFT_FS_FIELD		9
+#define NETSEC_TX_LAST				8
+#define NETSEC_TX_SHIFT_CO			7
+#define NETSEC_TX_SHIFT_SO			6
+#define NETSEC_TX_SHIFT_TRS_FIELD		4
+
+#define NETSEC_RX_PKT_OWN_FIELD			31
+#define NETSEC_RX_PKT_LD_FIELD			30
+#define NETSEC_RX_PKT_SDRID_FIELD		24
+#define NETSEC_RX_PKT_FR_FIELD			23
+#define NETSEC_RX_PKT_ER_FIELD			21
+#define NETSEC_RX_PKT_ERR_FIELD			16
+#define NETSEC_RX_PKT_TDRID_FIELD		12
+#define NETSEC_RX_PKT_FS_FIELD			9
+#define NETSEC_RX_PKT_LS_FIELD			8
+#define NETSEC_RX_PKT_CO_FIELD			6
+
+#define NETSEC_RX_PKT_ERR_MASK			3
+
+#define NETSEC_MAX_TX_PKT_LEN			1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN		9018
+
+#define NETSEC_RING_GMAC			15
+#define NETSEC_RING_MAX				2
+
+#define NETSEC_TCP_SEG_LEN_MAX			1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX		8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL		0
+#define NETSEC_RX_CKSUM_OK			1
+#define NETSEC_RX_CKSUM_NG			2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END	BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE		BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T		BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N		BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX			2047
+
+#define NETSEC_FLOW_START_TH_MAX		95
+#define NETSEC_FLOW_STOP_TH_MAX			95
+#define NETSEC_FLOW_PAUSE_TIME_MIN		5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL		0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM		BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO		BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER	BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE	BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER		BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH	BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G			BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C			BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D			BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB			BIT(2)
+#define NETSEC_COM_INIT_REG_CLS			BIT(1)
+#define NETSEC_COM_INIT_REG_ALL			(NETSEC_COM_INIT_REG_CLS | \
+						 NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET		0
+#define NETSEC_SOFT_RST_REG_RUN			BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP		1
+#define MH_CTRL__MODE_TRANS			BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ			0
+#define NETSEC_GMAC_CMD_ST_WRITE		BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY			BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON		0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET		0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR			0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST			BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR			BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN			BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST			BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE			BIT(20)
+#define NETSEC_MCR_PS				BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES			BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON	0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON	0x0001a00c
+
+#define NETSEC_FCR_RFE				BIT(2)
+#define NETSEC_FCR_TFE				BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW			BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB			BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA		11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR		6
+#define GMAC_REG_SHIFT_CR_GAR			2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ	2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ	3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ	0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ	1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ	4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ	5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON		0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON		0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI		0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP	BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST	BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE		4
+#define NETSEC_REG_DESC_ENDIAN			0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST	1
+#define NETSEC_MAC_DESC_INIT_REG_INIT		1
+
+#define NETSEC_EEPROM_MAC_ADDRESS		0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H		0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L		0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE		0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H		0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L		0x18
+#define NETSEC_EEPROM_MH_ME_SIZE		0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS		0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE		0x24
+
+#define DESC_NUM	128
+#define NAPI_BUDGET	(DESC_NUM / 2)
+
+#define DESC_SZ	sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)	((x) & 0xffff0000)
+
+enum ring_id {
+	NETSEC_RING_TX = 0,
+	NETSEC_RING_RX
+};
+
+struct netsec_desc {
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	void *addr;
+	u16 len;
+};
+
+struct netsec_desc_ring {
+	dma_addr_t desc_dma;
+	struct netsec_desc *desc;
+	void *vaddr;
+	u16 pkt_cnt;
+	u16 head, tail;
+};
+
+struct netsec_priv {
+	struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+	struct ethtool_coalesce et_coalesce;
+	spinlock_t reglock; /* protect reg access */
+	struct napi_struct napi;
+	phy_interface_t phy_interface;
+	struct net_device *ndev;
+	struct device_node *phy_np;
+	struct phy_device *phydev;
+	struct mii_bus *mii_bus;
+	void __iomem *ioaddr;
+	void __iomem *eeprom_base;
+	struct device *dev;
+	struct clk *clk;
+	u32 msg_enable;
+	u32 freq;
+	bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+	u32 attr;
+	u32 data_buf_addr_up;
+	u32 data_buf_addr_lw;
+	u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+	u16 tcp_seg_len;
+	bool tcp_seg_offload_flag;
+	bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+	int rx_cksum_result;
+	int err_code;
+	bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+	writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+	return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC		1000
+#define TIMEOUT_SECONDARY_MS_MAC	100
+
+static u32 netsec_clk_type(u32 freq)
+{
+	if (freq < MHZ(35))
+		return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+	if (freq < MHZ(60))
+		return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+	if (freq < MHZ(100))
+		return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+	if (freq < MHZ(150))
+		return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+	if (freq < MHZ(250))
+		return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+	return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+	u32 timeout = TIMEOUT_SPINS_MAC;
+
+	while (--timeout && netsec_read(priv, addr) & mask)
+		cpu_relax();
+	if (timeout)
+		return 0;
+
+	timeout = TIMEOUT_SECONDARY_MS_MAC;
+	while (--timeout && netsec_read(priv, addr) & mask)
+		usleep_range(1000, 2000);
+
+	if (timeout)
+		return 0;
+
+	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+	netsec_write(priv, MAC_REG_DATA, value);
+	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+	return netsec_wait_while_busy(priv,
+				      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+	int ret;
+
+	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+	ret = netsec_wait_while_busy(priv,
+				     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+	if (ret)
+		return ret;
+
+	*read = netsec_read(priv, MAC_REG_DATA);
+
+	return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+				      u32 addr, u32 mask)
+{
+	u32 timeout = TIMEOUT_SPINS_MAC;
+	int ret, data;
+
+	do {
+		ret = netsec_mac_read(priv, addr, &data);
+		if (ret)
+			break;
+		cpu_relax();
+	} while (--timeout && (data & mask));
+
+	if (timeout)
+		return 0;
+
+	timeout = TIMEOUT_SECONDARY_MS_MAC;
+	do {
+		usleep_range(1000, 2000);
+
+		ret = netsec_mac_read(priv, addr, &data);
+		if (ret)
+			break;
+		cpu_relax();
+	} while (--timeout && (data & mask));
+
+	if (timeout && !ret)
+		return 0;
+
+	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+	struct phy_device *phydev = priv->ndev->phydev;
+	u32 value = 0;
+
+	value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+				 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+	if (phydev->speed != SPEED_1000)
+		value |= NETSEC_MCR_PS;
+
+	if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+	    phydev->speed == SPEED_100)
+		value |= NETSEC_GMAC_MCR_REG_FES;
+
+	value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+	if (phy_interface_mode_is_rgmii(priv->phy_interface))
+		value |= NETSEC_GMAC_MCR_REG_IBN;
+
+	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+			    int phy_addr, int reg, u16 val)
+{
+	struct netsec_priv *priv = bus->priv;
+
+	if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_GAR,
+			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+			     reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+			     NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+			     (netsec_clk_type(priv->freq) <<
+			      GMAC_REG_SHIFT_CR_GAR)))
+		return -ETIMEDOUT;
+
+	return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+					  NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+	struct netsec_priv *priv = bus->priv;
+	u32 data;
+	int ret;
+
+	if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+			     reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+			     (netsec_clk_type(priv->freq) <<
+			      GMAC_REG_SHIFT_CR_GAR)))
+		return -ETIMEDOUT;
+
+	ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+					 NETSEC_GMAC_GAR_REG_GB);
+	if (ret)
+		return ret;
+
+	ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+	if (ret)
+		return ret;
+
+	return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+				  struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, "netsec", sizeof(info->driver));
+	strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+				  struct ethtool_coalesce *et_coalesce)
+{
+	struct netsec_priv *priv = netdev_priv(net_device);
+
+	*et_coalesce = priv->et_coalesce;
+
+	return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+				  struct ethtool_coalesce *et_coalesce)
+{
+	struct netsec_priv *priv = netdev_priv(net_device);
+
+	priv->et_coalesce = *et_coalesce;
+
+	if (priv->et_coalesce.tx_coalesce_usecs < 50)
+		priv->et_coalesce.tx_coalesce_usecs = 50;
+	if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+		priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+	netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+		     priv->et_coalesce.tx_max_coalesced_frames);
+	netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+		     priv->et_coalesce.tx_coalesce_usecs);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+	if (priv->et_coalesce.rx_coalesce_usecs < 50)
+		priv->et_coalesce.rx_coalesce_usecs = 50;
+	if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+		priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+		     priv->et_coalesce.rx_max_coalesced_frames);
+	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+		     priv->et_coalesce.rx_coalesce_usecs);
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+	return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+	struct netsec_priv *priv = netdev_priv(dev);
+
+	return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+	struct netsec_priv *priv = netdev_priv(dev);
+
+	priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+	.get_drvinfo		= netsec_et_get_drvinfo,
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
+	.get_link		= ethtool_op_get_link,
+	.get_coalesce		= netsec_et_get_coalesce,
+	.set_coalesce		= netsec_et_set_coalesce,
+	.get_msglevel		= netsec_et_get_msglevel,
+	.set_msglevel		= netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+					struct netsec_desc *desc)
+{
+	struct sk_buff *skb;
+
+	if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+		skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+	} else {
+		desc->len = L1_CACHE_ALIGN(desc->len);
+		skb = netdev_alloc_skb(priv->ndev, desc->len);
+	}
+	if (!skb)
+		return NULL;
+
+	desc->addr = skb->data;
+	desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+					DMA_FROM_DEVICE);
+	if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+	return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+			     struct netsec_desc_ring *dring, u16 idx,
+			     const struct netsec_desc *desc,
+			     struct sk_buff *skb)
+{
+	struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+	u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+		   (1 << NETSEC_RX_PKT_FS_FIELD) |
+		   (1 << NETSEC_RX_PKT_LS_FIELD);
+
+	if (idx == DESC_NUM - 1)
+		attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+	de->buf_len_info = desc->len;
+	de->attr = attr;
+	dma_wmb();
+
+	dring->desc[idx].dma_addr = desc->dma_addr;
+	dring->desc[idx].addr = desc->addr;
+	dring->desc[idx].len = desc->len;
+	dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+					struct netsec_desc_ring *dring,
+					u16 idx,
+					struct netsec_rx_pkt_info *rxpi,
+					struct netsec_desc *desc, u16 *len)
+{
+	struct netsec_de de = {};
+
+	memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+	*len = de.buf_len_info >> 16;
+
+	rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+	rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+	rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+							NETSEC_RX_PKT_ERR_MASK;
+	*desc = dring->desc[idx];
+	return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+					      struct netsec_rx_pkt_info *rxpi,
+					      struct netsec_desc *desc,
+					      u16 *len)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	struct sk_buff *tmp_skb, *skb = NULL;
+	struct netsec_desc td;
+	int tail;
+
+	*rxpi = (struct netsec_rx_pkt_info){};
+
+	td.len = priv->ndev->mtu + 22;
+
+	tmp_skb = netsec_alloc_skb(priv, &td);
+
+	dma_rmb();
+
+	tail = dring->tail;
+
+	if (!tmp_skb) {
+		netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+				 dring->desc[tail].skb);
+	} else {
+		skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+		netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+	}
+
+	/* move tail ahead */
+	dring->tail = (dring->tail + 1) % DESC_NUM;
+
+	dring->pkt_cnt--;
+
+	return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+	unsigned int pkts, bytes;
+
+	dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+	if (dring->pkt_cnt < budget)
+		budget = dring->pkt_cnt;
+
+	pkts = 0;
+	bytes = 0;
+
+	while (pkts < budget) {
+		struct netsec_desc *desc;
+		struct netsec_de *entry;
+		int tail, eop;
+
+		tail = dring->tail;
+
+		/* move tail ahead */
+		dring->tail = (tail + 1) % DESC_NUM;
+
+		desc = &dring->desc[tail];
+		entry = dring->vaddr + DESC_SZ * tail;
+
+		eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+		dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+				 DMA_TO_DEVICE);
+		if (eop) {
+			pkts++;
+			bytes += desc->skb->len;
+			dev_kfree_skb(desc->skb);
+		}
+		*desc = (struct netsec_desc){};
+	}
+	dring->pkt_cnt -= budget;
+
+	priv->ndev->stats.tx_packets += budget;
+	priv->ndev->stats.tx_bytes += bytes;
+
+	netdev_completed_queue(priv->ndev, budget, bytes);
+
+	return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+	struct net_device *ndev = priv->ndev;
+	int new, done = 0;
+
+	do {
+		new = netsec_clean_tx_dring(priv, budget);
+		done += new;
+		budget -= new;
+	} while (new);
+
+	if (done && netif_queue_stopped(ndev))
+		netif_wake_queue(ndev);
+
+	return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	struct net_device *ndev = priv->ndev;
+	struct netsec_rx_pkt_info rx_info;
+	int done = 0, rx_num = 0;
+	struct netsec_desc desc;
+	struct sk_buff *skb;
+	u16 len;
+
+	while (done < budget) {
+		if (!rx_num) {
+			rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+			dring->pkt_cnt += rx_num;
+
+			/* move head 'rx_num' */
+			dring->head = (dring->head + rx_num) % DESC_NUM;
+
+			rx_num = dring->pkt_cnt;
+			if (!rx_num)
+				break;
+		}
+		done++;
+		rx_num--;
+		skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+		if (unlikely(!skb) || rx_info.err_flag) {
+			netif_err(priv, drv, priv->ndev,
+				  "%s: rx fail err(%d)\n",
+				  __func__, rx_info.err_code);
+			ndev->stats.rx_dropped++;
+			continue;
+		}
+
+		dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+				 DMA_FROM_DEVICE);
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, priv->ndev);
+
+		if (priv->rx_cksum_offload_flag &&
+		    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+			ndev->stats.rx_packets++;
+			ndev->stats.rx_bytes += len;
+		}
+	}
+
+	return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct netsec_priv *priv;
+	struct net_device *ndev;
+	int tx, rx, done, todo;
+
+	priv = container_of(napi, struct netsec_priv, napi);
+	ndev = priv->ndev;
+
+	todo = budget;
+	do {
+		if (!todo)
+			break;
+
+		tx = netsec_process_tx(priv, todo);
+		todo -= tx;
+
+		if (!todo)
+			break;
+
+		rx = netsec_process_rx(priv, todo);
+		todo -= rx;
+	} while (rx || tx);
+
+	done = budget - todo;
+
+	if (done < budget && napi_complete_done(napi, done)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->reglock, flags);
+		netsec_write(priv, NETSEC_REG_INTEN_SET,
+			     NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+		spin_unlock_irqrestore(&priv->reglock, flags);
+	}
+
+	return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+			     struct netsec_desc_ring *dring,
+			     const struct netsec_tx_pkt_ctrl *tx_ctrl,
+			     const struct netsec_desc *desc,
+			     struct sk_buff *skb)
+{
+	int idx = dring->head;
+	struct netsec_de *de;
+	u32 attr;
+
+	de = dring->vaddr + (DESC_SZ * idx);
+
+	attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+	       (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+	       (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+	       (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+	       (1 << NETSEC_TX_LAST) |
+	       (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+	       (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+	       (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+	if (idx == DESC_NUM - 1)
+		attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+	de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+	de->attr = attr;
+	dma_wmb();
+
+	dring->desc[idx] = *desc;
+	dring->desc[idx].skb = skb;
+
+	/* move head ahead */
+	dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+					    struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+	struct netsec_tx_pkt_ctrl tx_ctrl = {};
+	struct netsec_desc tx_desc;
+	u16 tso_seg_len = 0;
+	int filled;
+
+	/* differentiate between full/emtpy ring */
+	if (dring->head >= dring->tail)
+		filled = dring->head - dring->tail;
+	else
+		filled = dring->head + DESC_NUM - dring->tail;
+
+	if (DESC_NUM - filled < 2) { /* if less than 2 available */
+		netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+		netif_stop_queue(priv->ndev);
+		dma_wmb();
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		tx_ctrl.cksum_offload_flag = true;
+
+	if (skb_is_gso(skb))
+		tso_seg_len = skb_shinfo(skb)->gso_size;
+
+	if (tso_seg_len > 0) {
+		if (skb->protocol == htons(ETH_P_IP)) {
+			ip_hdr(skb)->tot_len = 0;
+			tcp_hdr(skb)->check =
+				~tcp_v4_check(0, ip_hdr(skb)->saddr,
+					      ip_hdr(skb)->daddr, 0);
+		} else {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+		}
+
+		tx_ctrl.tcp_seg_offload_flag = true;
+		tx_ctrl.tcp_seg_len = tso_seg_len;
+	}
+
+	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+					  skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+		netif_err(priv, drv, priv->ndev,
+			  "%s: DMA mapping failed\n", __func__);
+		ndev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	tx_desc.addr = skb->data;
+	tx_desc.len = skb_headlen(skb);
+
+	skb_tx_timestamp(skb);
+	netdev_sent_queue(priv->ndev, skb->len);
+
+	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+	netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+	return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[id];
+	struct netsec_desc *desc;
+	u16 idx;
+
+	if (!dring->vaddr || !dring->desc)
+		return;
+
+	for (idx = 0; idx < DESC_NUM; idx++) {
+		desc = &dring->desc[idx];
+		if (!desc->addr)
+			continue;
+
+		dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+				 id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+							      DMA_TO_DEVICE);
+		dev_kfree_skb(desc->skb);
+	}
+
+	memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+	memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+	dring->head = 0;
+	dring->tail = 0;
+	dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+	if (dring->vaddr) {
+		dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+				  dring->vaddr, dring->desc_dma);
+		dring->vaddr = NULL;
+	}
+
+	kfree(dring->desc);
+	dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[id];
+	int ret = 0;
+
+	dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+					   &dring->desc_dma, GFP_KERNEL);
+	if (!dring->vaddr) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+	if (!dring->desc) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	return 0;
+err:
+	netsec_free_dring(priv, id);
+
+	return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	struct netsec_desc desc;
+	struct sk_buff *skb;
+	int n;
+
+	desc.len = priv->ndev->mtu + 22;
+
+	for (n = 0; n < DESC_NUM; n++) {
+		skb = netsec_alloc_skb(priv, &desc);
+		if (!skb) {
+			netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+			return -ENOMEM;
+		}
+		netsec_set_rx_de(priv, dring, n, &desc, skb);
+	}
+
+	return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+					   u32 addr_h, u32 addr_l, u32 size)
+{
+	u64 base = (u64)addr_h << 32 | addr_l;
+	void __iomem *ucode;
+	u32 i;
+
+	ucode = ioremap(base, size * sizeof(u32));
+	if (!ucode)
+		return -ENOMEM;
+
+	for (i = 0; i < size; i++)
+		netsec_write(priv, reg, readl(ucode + i * 4));
+
+	iounmap(ucode);
+	return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+	u32 addr_h, addr_l, size;
+	int err;
+
+	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+	size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+					      addr_h, addr_l, size);
+	if (err)
+		return err;
+
+	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+	size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+					      addr_h, addr_l, size);
+	if (err)
+		return err;
+
+	addr_h = 0;
+	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+	size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+					      addr_h, addr_l, size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+	u32 value;
+	int err;
+
+	/* stop DMA engines */
+	if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+		netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+			     NETSEC_DMA_CTRL_REG_STOP);
+		netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+			     NETSEC_DMA_CTRL_REG_STOP);
+
+		while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+		       NETSEC_DMA_CTRL_REG_STOP)
+			cpu_relax();
+
+		while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+		       NETSEC_DMA_CTRL_REG_STOP)
+			cpu_relax();
+	}
+
+	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+	netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+	while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+		cpu_relax();
+
+	/* set desc_start addr */
+	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+		     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+		     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+
+	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+		     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+		     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+
+	/* set normal tx dring ring config */
+	netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+		     1 << NETSEC_REG_DESC_ENDIAN);
+	netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+		     1 << NETSEC_REG_DESC_ENDIAN);
+
+	err = netsec_netdev_load_microcode(priv);
+	if (err) {
+		netif_err(priv, probe, priv->ndev,
+			  "%s: failed to load microcode (%d)\n", __func__, err);
+		return err;
+	}
+
+	/* start DMA engines */
+	netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+	netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+	usleep_range(1000, 2000);
+
+	if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+	      NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+		netif_err(priv, probe, priv->ndev,
+			  "microengine start failed\n");
+		return -ENXIO;
+	}
+	netsec_write(priv, NETSEC_REG_TOP_STATUS,
+		     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+	value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+	if (priv->ndev->mtu > ETH_DATA_LEN)
+		value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+	/* change to normal mode */
+	netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+	netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+	while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+		NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+		cpu_relax();
+
+	/* clear any pending EMPTY/ERR irq status */
+	netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+	/* Disable TX & RX intr */
+	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+	return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+	struct phy_device *phydev = priv->ndev->phydev;
+	u32 value = 0;
+	int ret;
+
+	if (phydev->speed != SPEED_1000)
+		value = (NETSEC_GMAC_MCR_REG_CST |
+			 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_BMR,
+			     NETSEC_GMAC_BMR_REG_RESET))
+		return -ETIMEDOUT;
+
+	/* Wait soft reset */
+	usleep_range(1000, 5000);
+
+	ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+	if (ret)
+		return ret;
+	if (value & NETSEC_GMAC_BMR_REG_SWR)
+		return -EAGAIN;
+
+	netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+	if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+		return -ETIMEDOUT;
+
+	netsec_write(priv, MAC_REG_DESC_INIT, 1);
+	if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+		return -ETIMEDOUT;
+
+	if (netsec_mac_write(priv, GMAC_REG_BMR,
+			     NETSEC_GMAC_BMR_REG_COMMON))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+			     NETSEC_GMAC_RDLAR_REG_COMMON))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+			     NETSEC_GMAC_TDLAR_REG_COMMON))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+		return -ETIMEDOUT;
+
+	ret = netsec_mac_update_to_phy_state(priv);
+	if (ret)
+		return ret;
+
+	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+	if (ret)
+		return ret;
+
+	value |= NETSEC_GMAC_OMR_REG_SR;
+	value |= NETSEC_GMAC_OMR_REG_ST;
+
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+	netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+	if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+	u32 value;
+	int ret;
+
+	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+	if (ret)
+		return ret;
+	value &= ~NETSEC_GMAC_OMR_REG_SR;
+	value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+	/* disable all interrupts */
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+	return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	if (ndev->phydev->link)
+		netsec_start_gmac(priv);
+	else
+		netsec_stop_gmac(priv);
+
+	phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+	struct netsec_priv *priv = dev_id;
+	u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+	unsigned long flags;
+
+	/* Disable interrupts */
+	if (status & NETSEC_IRQ_TX) {
+		val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+		netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+	}
+	if (status & NETSEC_IRQ_RX) {
+		val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+		netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+	}
+
+	spin_lock_irqsave(&priv->reglock, flags);
+	netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+	spin_unlock_irqrestore(&priv->reglock, flags);
+
+	napi_schedule(&priv->napi);
+
+	return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	pm_runtime_get_sync(priv->dev);
+
+	ret = netsec_setup_rx_dring(priv);
+	if (ret) {
+		netif_err(priv, probe, priv->ndev,
+			  "%s: fail setup ring\n", __func__);
+		goto err1;
+	}
+
+	ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+			  IRQF_SHARED, "netsec", priv);
+	if (ret) {
+		netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+		goto err2;
+	}
+
+	if (dev_of_node(priv->dev)) {
+		if (!of_phy_connect(priv->ndev, priv->phy_np,
+				    netsec_phy_adjust_link, 0,
+				    priv->phy_interface)) {
+			netif_err(priv, link, priv->ndev, "missing PHY\n");
+			ret = -ENODEV;
+			goto err3;
+		}
+	} else {
+		ret = phy_connect_direct(priv->ndev, priv->phydev,
+					 netsec_phy_adjust_link,
+					 priv->phy_interface);
+		if (ret) {
+			netif_err(priv, link, priv->ndev,
+				  "phy_connect_direct() failed (%d)\n", ret);
+			goto err3;
+		}
+	}
+
+	phy_start(ndev->phydev);
+
+	netsec_start_gmac(priv);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+
+	/* Enable RX intr. */
+	netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+	return 0;
+err3:
+	free_irq(priv->ndev->irq, priv);
+err2:
+	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+	pm_runtime_put_sync(priv->dev);
+	return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(priv->ndev);
+	dma_wmb();
+
+	napi_disable(&priv->napi);
+
+	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+	netsec_stop_gmac(priv);
+
+	free_irq(priv->ndev->irq, priv);
+
+	netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+	phy_stop(ndev->phydev);
+	phy_disconnect(ndev->phydev);
+
+	pm_runtime_put_sync(priv->dev);
+
+	return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+	if (ret)
+		return ret;
+
+	ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+	if (ret)
+		goto err1;
+
+	ret = netsec_reset_hardware(priv);
+	if (ret)
+		goto err2;
+
+	return 0;
+err2:
+	netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+	netsec_free_dring(priv, NETSEC_RING_TX);
+	return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	netsec_free_dring(priv, NETSEC_RING_RX);
+	netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+				      netdev_features_t features)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+	return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+			       int cmd)
+{
+	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+	.ndo_init		= netsec_netdev_init,
+	.ndo_uninit		= netsec_netdev_uninit,
+	.ndo_open		= netsec_netdev_open,
+	.ndo_stop		= netsec_netdev_stop,
+	.ndo_start_xmit		= netsec_netdev_start_xmit,
+	.ndo_set_features	= netsec_netdev_set_features,
+	.ndo_set_mac_address    = eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+			   struct netsec_priv *priv)
+{
+	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+	if (!priv->phy_np) {
+		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+		return -EINVAL;
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "phy_ref_clk not found\n");
+		return PTR_ERR(priv->clk);
+	}
+	priv->freq = clk_get_rate(priv->clk);
+
+	return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+			     struct netsec_priv *priv, u32 *phy_addr)
+{
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_ACPI))
+		return -ENODEV;
+
+	ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"missing required property 'phy-channel'\n");
+		return ret;
+	}
+
+	ret = device_property_read_u32(&pdev->dev,
+				       "socionext,phy-clock-frequency",
+				       &priv->freq);
+	if (ret)
+		dev_err(&pdev->dev,
+			"missing required property 'socionext,phy-clock-frequency'\n");
+	return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+	struct phy_device *phydev = priv->phydev;
+
+	if (!dev_of_node(priv->dev) && phydev) {
+		phy_device_remove(phydev);
+		phy_device_free(phydev);
+	}
+
+	mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+	struct mii_bus *bus;
+	int ret;
+
+	bus = devm_mdiobus_alloc(priv->dev);
+	if (!bus)
+		return -ENOMEM;
+
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+	bus->priv = priv;
+	bus->name = "SNI NETSEC MDIO";
+	bus->read = netsec_phy_read;
+	bus->write = netsec_phy_write;
+	bus->parent = priv->dev;
+	priv->mii_bus = bus;
+
+	if (dev_of_node(priv->dev)) {
+		struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+		mdio_node = of_get_child_by_name(parent, "mdio");
+		if (mdio_node) {
+			parent = mdio_node;
+		} else {
+			/* older f/w doesn't populate the mdio subnode,
+			 * allow relaxed upgrade of f/w in due time.
+			 */
+			dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+		}
+
+		ret = of_mdiobus_register(bus, parent);
+		of_node_put(mdio_node);
+
+		if (ret) {
+			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+			return ret;
+		}
+	} else {
+		/* Mask out all PHYs from auto probing. */
+		bus->phy_mask = ~0;
+		ret = mdiobus_register(bus);
+		if (ret) {
+			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+			return ret;
+		}
+
+		priv->phydev = get_phy_device(bus, phy_addr, false);
+		if (IS_ERR(priv->phydev)) {
+			ret = PTR_ERR(priv->phydev);
+			dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+			priv->phydev = NULL;
+			return -ENODEV;
+		}
+
+		ret = phy_device_register(priv->phydev);
+		if (ret) {
+			mdiobus_unregister(bus);
+			dev_err(priv->dev,
+				"phy_device_register err(%d)\n", ret);
+		}
+	}
+
+	return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+	struct resource *mmio_res, *eeprom_res, *irq_res;
+	u8 *mac, macbuf[ETH_ALEN];
+	struct netsec_priv *priv;
+	u32 hw_ver, phy_addr = 0;
+	struct net_device *ndev;
+	int ret;
+
+	mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mmio_res) {
+		dev_err(&pdev->dev, "No MMIO resource found.\n");
+		return -ENODEV;
+	}
+
+	eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!eeprom_res) {
+		dev_info(&pdev->dev, "No EEPROM resource found.\n");
+		return -ENODEV;
+	}
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_res) {
+		dev_err(&pdev->dev, "No IRQ resource found.\n");
+		return -ENODEV;
+	}
+
+	ndev = alloc_etherdev(sizeof(*priv));
+	if (!ndev)
+		return -ENOMEM;
+
+	priv = netdev_priv(ndev);
+
+	spin_lock_init(&priv->reglock);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, priv);
+	ndev->irq = irq_res->start;
+	priv->dev = &pdev->dev;
+	priv->ndev = ndev;
+
+	priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+			   NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+	priv->phy_interface = device_get_phy_mode(&pdev->dev);
+	if (priv->phy_interface < 0) {
+		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+		ret = -ENODEV;
+		goto free_ndev;
+	}
+
+	priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+				    resource_size(mmio_res));
+	if (!priv->ioaddr) {
+		dev_err(&pdev->dev, "devm_ioremap() failed\n");
+		ret = -ENXIO;
+		goto free_ndev;
+	}
+
+	priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+					 resource_size(eeprom_res));
+	if (!priv->eeprom_base) {
+		dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+		ret = -ENXIO;
+		goto free_ndev;
+	}
+
+	mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+	if (mac)
+		ether_addr_copy(ndev->dev_addr, mac);
+
+	if (priv->eeprom_base &&
+	    (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+		void __iomem *macp = priv->eeprom_base +
+					NETSEC_EEPROM_MAC_ADDRESS;
+
+		ndev->dev_addr[0] = readb(macp + 3);
+		ndev->dev_addr[1] = readb(macp + 2);
+		ndev->dev_addr[2] = readb(macp + 1);
+		ndev->dev_addr[3] = readb(macp + 0);
+		ndev->dev_addr[4] = readb(macp + 7);
+		ndev->dev_addr[5] = readb(macp + 6);
+	}
+
+	if (!is_valid_ether_addr(ndev->dev_addr)) {
+		dev_warn(&pdev->dev, "No MAC address found, using random\n");
+		eth_hw_addr_random(ndev);
+	}
+
+	if (dev_of_node(&pdev->dev))
+		ret = netsec_of_probe(pdev, priv);
+	else
+		ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+	if (ret)
+		goto free_ndev;
+
+	if (!priv->freq) {
+		dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+		ret = -ENODEV;
+		goto free_ndev;
+	}
+
+	/* default for throughput */
+	priv->et_coalesce.rx_coalesce_usecs = 500;
+	priv->et_coalesce.rx_max_coalesced_frames = 8;
+	priv->et_coalesce.tx_coalesce_usecs = 500;
+	priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+	ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+				       &ndev->max_mtu);
+	if (ret < 0)
+		ndev->max_mtu = ETH_DATA_LEN;
+
+	/* runtime_pm coverage just for probe, open/close also cover it */
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+	/* this driver only supports F_TAIKI style NETSEC */
+	if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+	    NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+		ret = -ENODEV;
+		goto pm_disable;
+	}
+
+	dev_info(&pdev->dev, "hardware revision %d.%d\n",
+		 hw_ver >> 16, hw_ver & 0xffff);
+
+	netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+	ndev->netdev_ops = &netsec_netdev_ops;
+	ndev->ethtool_ops = &netsec_ethtool_ops;
+
+	ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	ndev->hw_features = ndev->features;
+
+	priv->rx_cksum_offload_flag = true;
+
+	ret = netsec_register_mdio(priv, phy_addr);
+	if (ret)
+		goto unreg_napi;
+
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+		dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		netif_err(priv, probe, ndev, "register_netdev() failed\n");
+		goto unreg_mii;
+	}
+
+	pm_runtime_put_sync(&pdev->dev);
+	return 0;
+
+unreg_mii:
+	netsec_unregister_mdio(priv);
+unreg_napi:
+	netif_napi_del(&priv->napi);
+pm_disable:
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+free_ndev:
+	free_netdev(ndev);
+	dev_err(&pdev->dev, "init failed\n");
+
+	return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+	struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+	unregister_netdev(priv->ndev);
+
+	netsec_unregister_mdio(priv);
+
+	netif_napi_del(&priv->napi);
+
+	pm_runtime_disable(&pdev->dev);
+	free_netdev(priv->ndev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+	struct netsec_priv *priv = dev_get_drvdata(dev);
+
+	netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+	struct netsec_priv *priv = dev_get_drvdata(dev);
+
+	clk_prepare_enable(priv->clk);
+
+	netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+					       NETSEC_CLK_EN_REG_DOM_C |
+					       NETSEC_CLK_EN_REG_DOM_G);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+	SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+	{ .compatible = "socionext,synquacer-netsec" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+	{ "SCX0001" },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+	.probe	= netsec_probe,
+	.remove	= netsec_remove,
+	.driver = {
+		.name = "netsec",
+		.pm = &netsec_pm_ops,
+		.of_match_table = netsec_dt_ids,
+		.acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+	},
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
new file mode 100644
index 000000000000..111e7ca9df56
--- /dev/null
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * sni_ave.c - Socionext UniPhier AVE ethernet driver
+ * Copyright 2014 Panasonic Corporation
+ * Copyright 2015-2017 Socionext Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+
+/* General Register Group */
+#define AVE_IDR			0x000	/* ID */
+#define AVE_VR			0x004	/* Version */
+#define AVE_GRR			0x008	/* Global Reset */
+#define AVE_CFGR		0x00c	/* Configuration */
+
+/* Interrupt Register Group */
+#define AVE_GIMR		0x100	/* Global Interrupt Mask */
+#define AVE_GISR		0x104	/* Global Interrupt Status */
+
+/* MAC Register Group */
+#define AVE_TXCR		0x200	/* TX Setup */
+#define AVE_RXCR		0x204	/* RX Setup */
+#define AVE_RXMAC1R		0x208	/* MAC address (lower) */
+#define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
+#define AVE_MDIOCTR		0x214	/* MDIO Control */
+#define AVE_MDIOAR		0x218	/* MDIO Address */
+#define AVE_MDIOWDR		0x21c	/* MDIO Data */
+#define AVE_MDIOSR		0x220	/* MDIO Status */
+#define AVE_MDIORDR		0x224	/* MDIO Rd Data */
+
+/* Descriptor Control Register Group */
+#define AVE_DESCC		0x300	/* Descriptor Control */
+#define AVE_TXDC		0x304	/* TX Descriptor Configuration */
+#define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
+#define AVE_IIRQC		0x34c	/* Interval IRQ Control */
+
+/* Packet Filter Register Group */
+#define AVE_PKTF_BASE		0x800	/* PF Base Address */
+#define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
+#define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
+#define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
+#define AVE_PFEN		0xffc	/* Packet Filter Enable */
+#define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
+#define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
+#define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
+#define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
+
+/* 64bit descriptor memory */
+#define AVE_DESC_SIZE_64	12	/* Descriptor Size */
+
+#define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
+#define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
+#define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
+
+/* 32bit descriptor memory */
+#define AVE_DESC_SIZE_32	8	/* Descriptor Size */
+
+#define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
+#define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
+#define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
+
+/* RMII Bridge Register Group */
+#define AVE_RSTCTRL		0x8028	/* Reset control */
+#define AVE_RSTCTRL_RMIIRST	BIT(16)
+#define AVE_LINKSEL		0x8034	/* Link speed setting */
+#define AVE_LINKSEL_100M	BIT(0)
+
+/* AVE_GRR */
+#define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
+#define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
+#define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
+
+/* AVE_CFGR */
+#define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
+#define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
+#define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
+#define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
+
+/* AVE_GISR (common with GIMR) */
+#define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
+#define AVE_GI_TX		BIT(16)	/* Tx complete */
+#define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
+#define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
+#define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
+#define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
+
+/* AVE_TXCR */
+#define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
+#define AVE_TXCR_TXSPD_1G	BIT(17)
+#define AVE_TXCR_TXSPD_100	BIT(16)
+
+/* AVE_RXCR */
+#define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
+#define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
+#define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
+#define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
+#define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
+#define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
+
+/* AVE_MDIOCTR */
+#define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
+#define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
+
+/* AVE_MDIOSR */
+#define AVE_MDIOSR_STS		BIT(0)	/* access status */
+
+/* AVE_DESCC */
+#define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
+#define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
+#define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
+#define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
+
+/* AVE_TXDC */
+#define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
+#define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
+#define AVE_TXDC_ADDR_START	0
+
+/* AVE_RXDC0 */
+#define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
+#define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
+#define AVE_RXDC0_ADDR_START	0
+
+/* AVE_IIRQC */
+#define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
+#define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
+
+/* Command status for descriptor */
+#define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
+#define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
+#define AVE_STS_OK		BIT(27)	/* Normal transmit */
+/* TX */
+#define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
+#define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
+#define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
+#define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
+#define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
+#define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
+/* RX */
+#define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
+#define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
+#define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
+
+/* Packet filter */
+#define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
+#define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
+#define AVE_PFMBIT_MASK		GENMASK(15, 0)
+
+#define AVE_PF_SIZE		17	/* Number of all packet filter */
+#define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
+
+#define AVE_PFNUM_FILTER	0	/* No.0 */
+#define AVE_PFNUM_UNICAST	1	/* No.1 */
+#define AVE_PFNUM_BROADCAST	2	/* No.2 */
+#define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
+
+/* NETIF Message control */
+#define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
+				 NETIF_MSG_PROBE  |	\
+				 NETIF_MSG_LINK   |	\
+				 NETIF_MSG_TIMER  |	\
+				 NETIF_MSG_IFDOWN |	\
+				 NETIF_MSG_IFUP   |	\
+				 NETIF_MSG_RX_ERR |	\
+				 NETIF_MSG_TX_ERR)
+
+/* Parameter for descriptor */
+#define AVE_NR_TXDESC		32	/* Tx descriptor */
+#define AVE_NR_RXDESC		64	/* Rx descriptor */
+
+#define AVE_DESC_OFS_CMDSTS	0
+#define AVE_DESC_OFS_ADDRL	4
+#define AVE_DESC_OFS_ADDRU	8
+
+/* Parameter for ethernet frame */
+#define AVE_MAX_ETHFRAME	1518
+
+/* Parameter for interrupt */
+#define AVE_INTM_COUNT		20
+#define AVE_FORCE_TXINTCNT	1
+
+#define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
+
+enum desc_id {
+	AVE_DESCID_RX,
+	AVE_DESCID_TX,
+};
+
+enum desc_state {
+	AVE_DESC_RX_PERMIT,
+	AVE_DESC_RX_SUSPEND,
+	AVE_DESC_START,
+	AVE_DESC_STOP,
+};
+
+struct ave_desc {
+	struct sk_buff	*skbs;
+	dma_addr_t	skbs_dma;
+	size_t		skbs_dmalen;
+};
+
+struct ave_desc_info {
+	u32	ndesc;		/* number of descriptor */
+	u32	daddr;		/* start address of descriptor */
+	u32	proc_idx;	/* index of processing packet */
+	u32	done_idx;	/* index of processed packet */
+	struct ave_desc *desc;	/* skb info related descriptor */
+};
+
+struct ave_soc_data {
+	bool	is_desc_64bit;
+};
+
+struct ave_stats {
+	struct	u64_stats_sync	syncp;
+	u64	packets;
+	u64	bytes;
+	u64	errors;
+	u64	dropped;
+	u64	collisions;
+	u64	fifo_errors;
+};
+
+struct ave_private {
+	void __iomem            *base;
+	int                     irq;
+	int			phy_id;
+	unsigned int		desc_size;
+	u32			msg_enable;
+	struct clk		*clk;
+	struct reset_control	*rst;
+	phy_interface_t		phy_mode;
+	struct phy_device	*phydev;
+	struct mii_bus		*mdio;
+
+	/* stats */
+	struct ave_stats	stats_rx;
+	struct ave_stats	stats_tx;
+
+	/* NAPI support */
+	struct net_device	*ndev;
+	struct napi_struct	napi_rx;
+	struct napi_struct	napi_tx;
+
+	/* descriptor */
+	struct ave_desc_info	rx;
+	struct ave_desc_info	tx;
+
+	/* flow control */
+	int pause_auto;
+	int pause_rx;
+	int pause_tx;
+
+	const struct ave_soc_data *data;
+};
+
+static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
+			 int offset)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 addr;
+
+	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+		+ entry * priv->desc_size + offset;
+
+	return readl(priv->base + addr);
+}
+
+static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
+				int entry)
+{
+	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
+}
+
+static void ave_desc_write(struct net_device *ndev, enum desc_id id,
+			   int entry, int offset, u32 val)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 addr;
+
+	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+		+ entry * priv->desc_size + offset;
+
+	writel(val, priv->base + addr);
+}
+
+static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
+				  int entry, u32 val)
+{
+	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
+}
+
+static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
+				int entry, dma_addr_t paddr)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
+		       lower_32_bits(paddr));
+	if (IS_DESC_64BIT(priv))
+		ave_desc_write(ndev, id,
+			       entry, AVE_DESC_OFS_ADDRU,
+			       upper_32_bits(paddr));
+}
+
+static u32 ave_irq_disable_all(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 ret;
+
+	ret = readl(priv->base + AVE_GIMR);
+	writel(0, priv->base + AVE_GIMR);
+
+	return ret;
+}
+
+static void ave_irq_restore(struct net_device *ndev, u32 val)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	writel(val, priv->base + AVE_GIMR);
+}
+
+static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
+	writel(bitflag, priv->base + AVE_GISR);
+}
+
+static void ave_hw_write_macaddr(struct net_device *ndev,
+				 const unsigned char *mac_addr,
+				 int reg1, int reg2)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	writel(mac_addr[0] | mac_addr[1] << 8 |
+	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
+	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
+}
+
+static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 major, minor, vr;
+
+	vr = readl(priv->base + AVE_VR);
+	major = (vr & GENMASK(15, 8)) >> 8;
+	minor = (vr & GENMASK(7, 0));
+	snprintf(buf, len, "v%u.%u", major, minor);
+}
+
+static void ave_ethtool_get_drvinfo(struct net_device *ndev,
+				    struct ethtool_drvinfo *info)
+{
+	struct device *dev = ndev->dev.parent;
+
+	strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
+	strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
+}
+
+static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	priv->msg_enable = val;
+}
+
+static void ave_ethtool_get_wol(struct net_device *ndev,
+				struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts   = 0;
+
+	if (ndev->phydev)
+		phy_ethtool_get_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+			       struct ethtool_wolinfo *wol)
+{
+	int ret;
+
+	if (!ndev->phydev ||
+	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
+		return -EOPNOTSUPP;
+
+	ret = phy_ethtool_set_wol(ndev->phydev, wol);
+	if (!ret)
+		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
+
+	return ret;
+}
+
+static void ave_ethtool_get_pauseparam(struct net_device *ndev,
+				       struct ethtool_pauseparam *pause)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	pause->autoneg  = priv->pause_auto;
+	pause->rx_pause = priv->pause_rx;
+	pause->tx_pause = priv->pause_tx;
+}
+
+static int ave_ethtool_set_pauseparam(struct net_device *ndev,
+				      struct ethtool_pauseparam *pause)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!phydev)
+		return -EINVAL;
+
+	priv->pause_auto = pause->autoneg;
+	priv->pause_rx   = pause->rx_pause;
+	priv->pause_tx   = pause->tx_pause;
+
+	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+	if (pause->rx_pause)
+		phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+	if (pause->tx_pause)
+		phydev->advertising ^= ADVERTISED_Asym_Pause;
+
+	if (pause->autoneg) {
+		if (netif_running(ndev))
+			phy_start_aneg(phydev);
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops ave_ethtool_ops = {
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
+	.get_drvinfo		= ave_ethtool_get_drvinfo,
+	.nway_reset		= phy_ethtool_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_msglevel		= ave_ethtool_get_msglevel,
+	.set_msglevel		= ave_ethtool_set_msglevel,
+	.get_wol		= ave_ethtool_get_wol,
+	.set_wol		= ave_ethtool_set_wol,
+	.get_pauseparam         = ave_ethtool_get_pauseparam,
+	.set_pauseparam         = ave_ethtool_set_pauseparam,
+};
+
+static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
+{
+	struct net_device *ndev = bus->priv;
+	struct ave_private *priv;
+	u32 mdioctl, mdiosr;
+	int ret;
+
+	priv = netdev_priv(ndev);
+
+	/* write address */
+	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+	/* read request */
+	mdioctl = readl(priv->base + AVE_MDIOCTR);
+	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
+	       priv->base + AVE_MDIOCTR);
+
+	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+	if (ret) {
+		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
+			   phyid, regnum);
+		return ret;
+	}
+
+	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
+}
+
+static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
+			     u16 val)
+{
+	struct net_device *ndev = bus->priv;
+	struct ave_private *priv;
+	u32 mdioctl, mdiosr;
+	int ret;
+
+	priv = netdev_priv(ndev);
+
+	/* write address */
+	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+	/* write data */
+	writel(val, priv->base + AVE_MDIOWDR);
+
+	/* write request */
+	mdioctl = readl(priv->base + AVE_MDIOCTR);
+	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
+	       priv->base + AVE_MDIOCTR);
+
+	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+	if (ret)
+		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
+			   phyid, regnum);
+
+	return ret;
+}
+
+static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
+		       void *ptr, size_t len, enum dma_data_direction dir,
+		       dma_addr_t *paddr)
+{
+	dma_addr_t map_addr;
+
+	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
+	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
+		return -ENOMEM;
+
+	desc->skbs_dma = map_addr;
+	desc->skbs_dmalen = len;
+	*paddr = map_addr;
+
+	return 0;
+}
+
+static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
+			  enum dma_data_direction dir)
+{
+	if (!desc->skbs_dma)
+		return;
+
+	dma_unmap_single(ndev->dev.parent,
+			 desc->skbs_dma, desc->skbs_dmalen, dir);
+	desc->skbs_dma = 0;
+}
+
+/* Prepare Rx descriptor and memory */
+static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct sk_buff *skb;
+	dma_addr_t paddr;
+	int ret;
+
+	skb = priv->rx.desc[entry].skbs;
+	if (!skb) {
+		skb = netdev_alloc_skb_ip_align(ndev,
+						AVE_MAX_ETHFRAME);
+		if (!skb) {
+			netdev_err(ndev, "can't allocate skb for Rx\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* set disable to cmdsts */
+	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+			      AVE_STS_INTR | AVE_STS_OWN);
+
+	/* map Rx buffer
+	 * Rx buffer set to the Rx descriptor has two restrictions:
+	 * - Rx buffer address is 4 byte aligned.
+	 * - Rx buffer begins with 2 byte headroom, and data will be put from
+	 *   (buffer + 2).
+	 * To satisfy this, specify the address to put back the buffer
+	 * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
+	 * and expand the map size by NET_IP_ALIGN.
+	 */
+	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
+			  skb->data - NET_IP_ALIGN,
+			  AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+			  DMA_FROM_DEVICE, &paddr);
+	if (ret) {
+		netdev_err(ndev, "can't map skb for Rx\n");
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+	priv->rx.desc[entry].skbs = skb;
+
+	/* set buffer pointer */
+	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
+
+	/* set enable to cmdsts */
+	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
+
+	return ret;
+}
+
+/* Switch state of descriptor */
+static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	int ret = 0;
+	u32 val;
+
+	switch (state) {
+	case AVE_DESC_START:
+		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
+		break;
+
+	case AVE_DESC_STOP:
+		writel(0, priv->base + AVE_DESCC);
+		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
+				       150, 15000)) {
+			netdev_err(ndev, "can't stop descriptor\n");
+			ret = -EBUSY;
+		}
+		break;
+
+	case AVE_DESC_RX_SUSPEND:
+		val = readl(priv->base + AVE_DESCC);
+		val |= AVE_DESCC_RDSTP;
+		val &= ~AVE_DESCC_STATUS_MASK;
+		writel(val, priv->base + AVE_DESCC);
+		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
+				       val & (AVE_DESCC_RDSTP << 16),
+				       150, 150000)) {
+			netdev_err(ndev, "can't suspend descriptor\n");
+			ret = -EBUSY;
+		}
+		break;
+
+	case AVE_DESC_RX_PERMIT:
+		val = readl(priv->base + AVE_DESCC);
+		val &= ~AVE_DESCC_RDSTP;
+		val &= ~AVE_DESCC_STATUS_MASK;
+		writel(val, priv->base + AVE_DESCC);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ave_tx_complete(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 proc_idx, done_idx, ndesc, cmdsts;
+	unsigned int nr_freebuf = 0;
+	unsigned int tx_packets = 0;
+	unsigned int tx_bytes = 0;
+
+	proc_idx = priv->tx.proc_idx;
+	done_idx = priv->tx.done_idx;
+	ndesc    = priv->tx.ndesc;
+
+	/* free pre-stored skb from done_idx to proc_idx */
+	while (proc_idx != done_idx) {
+		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
+
+		/* do nothing if owner is HW (==1 for Tx) */
+		if (cmdsts & AVE_STS_OWN)
+			break;
+
+		/* check Tx status and updates statistics */
+		if (cmdsts & AVE_STS_OK) {
+			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
+			/* success */
+			if (cmdsts & AVE_STS_LAST)
+				tx_packets++;
+		} else {
+			/* error */
+			if (cmdsts & AVE_STS_LAST) {
+				priv->stats_tx.errors++;
+				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
+					priv->stats_tx.collisions++;
+			}
+		}
+
+		/* release skb */
+		if (priv->tx.desc[done_idx].skbs) {
+			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
+				      DMA_TO_DEVICE);
+			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
+			priv->tx.desc[done_idx].skbs = NULL;
+			nr_freebuf++;
+		}
+		done_idx = (done_idx + 1) % ndesc;
+	}
+
+	priv->tx.done_idx = done_idx;
+
+	/* update stats */
+	u64_stats_update_begin(&priv->stats_tx.syncp);
+	priv->stats_tx.packets += tx_packets;
+	priv->stats_tx.bytes   += tx_bytes;
+	u64_stats_update_end(&priv->stats_tx.syncp);
+
+	/* wake queue for freeing buffer */
+	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
+		netif_wake_queue(ndev);
+
+	return nr_freebuf;
+}
+
+static int ave_rx_receive(struct net_device *ndev, int num)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	unsigned int rx_packets = 0;
+	unsigned int rx_bytes = 0;
+	u32 proc_idx, done_idx;
+	struct sk_buff *skb;
+	unsigned int pktlen;
+	int restpkt, npkts;
+	u32 ndesc, cmdsts;
+
+	proc_idx = priv->rx.proc_idx;
+	done_idx = priv->rx.done_idx;
+	ndesc    = priv->rx.ndesc;
+	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
+
+	for (npkts = 0; npkts < num; npkts++) {
+		/* we can't receive more packet, so fill desc quickly */
+		if (--restpkt < 0)
+			break;
+
+		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
+
+		/* do nothing if owner is HW (==0 for Rx) */
+		if (!(cmdsts & AVE_STS_OWN))
+			break;
+
+		if (!(cmdsts & AVE_STS_OK)) {
+			priv->stats_rx.errors++;
+			proc_idx = (proc_idx + 1) % ndesc;
+			continue;
+		}
+
+		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
+
+		/* get skbuff for rx */
+		skb = priv->rx.desc[proc_idx].skbs;
+		priv->rx.desc[proc_idx].skbs = NULL;
+
+		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
+
+		skb->dev = ndev;
+		skb_put(skb, pktlen);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		rx_packets++;
+		rx_bytes += pktlen;
+
+		netif_receive_skb(skb);
+
+		proc_idx = (proc_idx + 1) % ndesc;
+	}
+
+	priv->rx.proc_idx = proc_idx;
+
+	/* update stats */
+	u64_stats_update_begin(&priv->stats_rx.syncp);
+	priv->stats_rx.packets += rx_packets;
+	priv->stats_rx.bytes   += rx_bytes;
+	u64_stats_update_end(&priv->stats_rx.syncp);
+
+	/* refill the Rx buffers */
+	while (proc_idx != done_idx) {
+		if (ave_rxdesc_prepare(ndev, done_idx))
+			break;
+		done_idx = (done_idx + 1) % ndesc;
+	}
+
+	priv->rx.done_idx = done_idx;
+
+	return npkts;
+}
+
+static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct ave_private *priv;
+	struct net_device *ndev;
+	int num;
+
+	priv = container_of(napi, struct ave_private, napi_rx);
+	ndev = priv->ndev;
+
+	num = ave_rx_receive(ndev, budget);
+	if (num < budget) {
+		napi_complete_done(napi, num);
+
+		/* enable Rx interrupt when NAPI finishes */
+		ave_irq_enable(ndev, AVE_GI_RXIINT);
+	}
+
+	return num;
+}
+
+static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct ave_private *priv;
+	struct net_device *ndev;
+	int num;
+
+	priv = container_of(napi, struct ave_private, napi_tx);
+	ndev = priv->ndev;
+
+	num = ave_tx_complete(ndev);
+	napi_complete(napi);
+
+	/* enable Tx interrupt when NAPI finishes */
+	ave_irq_enable(ndev, AVE_GI_TX);
+
+	return num;
+}
+
+static void ave_global_reset(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 val;
+
+	/* set config register */
+	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
+	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
+		val |= AVE_CFGR_MII;
+	writel(val, priv->base + AVE_CFGR);
+
+	/* reset RMII register */
+	val = readl(priv->base + AVE_RSTCTRL);
+	val &= ~AVE_RSTCTRL_RMIIRST;
+	writel(val, priv->base + AVE_RSTCTRL);
+
+	/* assert reset */
+	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
+	msleep(20);
+
+	/* 1st, negate PHY reset only */
+	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
+	msleep(40);
+
+	/* negate reset */
+	writel(0, priv->base + AVE_GRR);
+	msleep(40);
+
+	/* negate RMII register */
+	val = readl(priv->base + AVE_RSTCTRL);
+	val |= AVE_RSTCTRL_RMIIRST;
+	writel(val, priv->base + AVE_RSTCTRL);
+
+	ave_irq_disable_all(ndev);
+}
+
+static void ave_rxfifo_reset(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 rxcr_org;
+
+	/* save and disable MAC receive op */
+	rxcr_org = readl(priv->base + AVE_RXCR);
+	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
+
+	/* suspend Rx descriptor */
+	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
+
+	/* receive all packets before descriptor starts */
+	ave_rx_receive(ndev, priv->rx.ndesc);
+
+	/* assert reset */
+	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
+	usleep_range(40, 50);
+
+	/* negate reset */
+	writel(0, priv->base + AVE_GRR);
+	usleep_range(10, 20);
+
+	/* negate interrupt status */
+	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
+
+	/* permit descriptor */
+	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
+
+	/* restore MAC reccieve op */
+	writel(rxcr_org, priv->base + AVE_RXCR);
+}
+
+static irqreturn_t ave_irq_handler(int irq, void *netdev)
+{
+	struct net_device *ndev = (struct net_device *)netdev;
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 gimr_val, gisr_val;
+
+	gimr_val = ave_irq_disable_all(ndev);
+
+	/* get interrupt status */
+	gisr_val = readl(priv->base + AVE_GISR);
+
+	/* PHY */
+	if (gisr_val & AVE_GI_PHY)
+		writel(AVE_GI_PHY, priv->base + AVE_GISR);
+
+	/* check exceeding packet */
+	if (gisr_val & AVE_GI_RXERR) {
+		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
+		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
+	}
+
+	gisr_val &= gimr_val;
+	if (!gisr_val)
+		goto exit_isr;
+
+	/* RxFIFO overflow */
+	if (gisr_val & AVE_GI_RXOVF) {
+		priv->stats_rx.fifo_errors++;
+		ave_rxfifo_reset(ndev);
+		goto exit_isr;
+	}
+
+	/* Rx drop */
+	if (gisr_val & AVE_GI_RXDROP) {
+		priv->stats_rx.dropped++;
+		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
+	}
+
+	/* Rx interval */
+	if (gisr_val & AVE_GI_RXIINT) {
+		napi_schedule(&priv->napi_rx);
+		/* still force to disable Rx interrupt until NAPI finishes */
+		gimr_val &= ~AVE_GI_RXIINT;
+	}
+
+	/* Tx completed */
+	if (gisr_val & AVE_GI_TX) {
+		napi_schedule(&priv->napi_tx);
+		/* still force to disable Tx interrupt until NAPI finishes */
+		gimr_val &= ~AVE_GI_TX;
+	}
+
+exit_isr:
+	ave_irq_restore(ndev, gimr_val);
+
+	return IRQ_HANDLED;
+}
+
+static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 val;
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return -EINVAL;
+
+	val = readl(priv->base + AVE_PFEN);
+	writel(val | BIT(entry), priv->base + AVE_PFEN);
+
+	return 0;
+}
+
+static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 val;
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return -EINVAL;
+
+	val = readl(priv->base + AVE_PFEN);
+	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
+
+	return 0;
+}
+
+static int ave_pfsel_set_macaddr(struct net_device *ndev,
+				 unsigned int entry,
+				 const unsigned char *mac_addr,
+				 unsigned int set_size)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return -EINVAL;
+	if (WARN_ON(set_size > 6))
+		return -EINVAL;
+
+	ave_pfsel_stop(ndev, entry);
+
+	/* set MAC address for the filter */
+	ave_hw_write_macaddr(ndev, mac_addr,
+			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
+
+	/* set byte mask */
+	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
+	       priv->base + AVE_PFMBYTE(entry));
+	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+	/* set bit mask filter */
+	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+	/* set selector to ring 0 */
+	writel(0, priv->base + AVE_PFSEL(entry));
+
+	/* restart filter */
+	ave_pfsel_start(ndev, entry);
+
+	return 0;
+}
+
+static void ave_pfsel_set_promisc(struct net_device *ndev,
+				  unsigned int entry, u32 rxring)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return;
+
+	ave_pfsel_stop(ndev, entry);
+
+	/* set byte mask */
+	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
+	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+	/* set bit mask filter */
+	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+	/* set selector to rxring */
+	writel(rxring, priv->base + AVE_PFSEL(entry));
+
+	ave_pfsel_start(ndev, entry);
+}
+
+static void ave_pfsel_init(struct net_device *ndev)
+{
+	unsigned char bcast_mac[ETH_ALEN];
+	int i;
+
+	eth_broadcast_addr(bcast_mac);
+
+	for (i = 0; i < AVE_PF_SIZE; i++)
+		ave_pfsel_stop(ndev, i);
+
+	/* promiscious entry, select ring 0 */
+	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
+
+	/* unicast entry */
+	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+
+	/* broadcast entry */
+	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
+}
+
+static void ave_phy_adjust_link(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+	u32 val, txcr, rxcr, rxcr_org;
+	u16 rmt_adv = 0, lcl_adv = 0;
+	u8 cap;
+
+	/* set RGMII speed */
+	val = readl(priv->base + AVE_TXCR);
+	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
+
+	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
+		val |= AVE_TXCR_TXSPD_1G;
+	else if (phydev->speed == SPEED_100)
+		val |= AVE_TXCR_TXSPD_100;
+
+	writel(val, priv->base + AVE_TXCR);
+
+	/* set RMII speed (100M/10M only) */
+	if (!phy_interface_is_rgmii(phydev)) {
+		val = readl(priv->base + AVE_LINKSEL);
+		if (phydev->speed == SPEED_10)
+			val &= ~AVE_LINKSEL_100M;
+		else
+			val |= AVE_LINKSEL_100M;
+		writel(val, priv->base + AVE_LINKSEL);
+	}
+
+	/* check current RXCR/TXCR */
+	rxcr = readl(priv->base + AVE_RXCR);
+	txcr = readl(priv->base + AVE_TXCR);
+	rxcr_org = rxcr;
+
+	if (phydev->duplex) {
+		rxcr |= AVE_RXCR_FDUPEN;
+
+		if (phydev->pause)
+			rmt_adv |= LPA_PAUSE_CAP;
+		if (phydev->asym_pause)
+			rmt_adv |= LPA_PAUSE_ASYM;
+		if (phydev->advertising & ADVERTISED_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_CAP;
+		if (phydev->advertising & ADVERTISED_Asym_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+		if (cap & FLOW_CTRL_TX)
+			txcr |= AVE_TXCR_FLOCTR;
+		else
+			txcr &= ~AVE_TXCR_FLOCTR;
+		if (cap & FLOW_CTRL_RX)
+			rxcr |= AVE_RXCR_FLOCTR;
+		else
+			rxcr &= ~AVE_RXCR_FLOCTR;
+	} else {
+		rxcr &= ~AVE_RXCR_FDUPEN;
+		rxcr &= ~AVE_RXCR_FLOCTR;
+		txcr &= ~AVE_TXCR_FLOCTR;
+	}
+
+	if (rxcr_org != rxcr) {
+		/* disable Rx mac */
+		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
+		/* change and enable TX/Rx mac */
+		writel(txcr, priv->base + AVE_TXCR);
+		writel(rxcr, priv->base + AVE_RXCR);
+	}
+
+	phy_print_status(phydev);
+}
+
+static void ave_macaddr_init(struct net_device *ndev)
+{
+	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
+
+	/* pfsel unicast entry */
+	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+}
+
+static int ave_init(struct net_device *ndev)
+{
+	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+	struct ave_private *priv = netdev_priv(ndev);
+	struct device *dev = ndev->dev.parent;
+	struct device_node *np = dev->of_node;
+	struct device_node *mdio_np;
+	struct phy_device *phydev;
+	int ret;
+
+	/* enable clk because of hw access until ndo_open */
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(dev, "can't enable clock\n");
+		return ret;
+	}
+	ret = reset_control_deassert(priv->rst);
+	if (ret) {
+		dev_err(dev, "can't deassert reset\n");
+		goto out_clk_disable;
+	}
+
+	ave_global_reset(ndev);
+
+	mdio_np = of_get_child_by_name(np, "mdio");
+	if (!mdio_np) {
+		dev_err(dev, "mdio node not found\n");
+		ret = -EINVAL;
+		goto out_reset_assert;
+	}
+	ret = of_mdiobus_register(priv->mdio, mdio_np);
+	of_node_put(mdio_np);
+	if (ret) {
+		dev_err(dev, "failed to register mdiobus\n");
+		goto out_reset_assert;
+	}
+
+	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
+	if (!phydev) {
+		dev_err(dev, "could not attach to PHY\n");
+		ret = -ENODEV;
+		goto out_mdio_unregister;
+	}
+
+	priv->phydev = phydev;
+
+	phy_ethtool_get_wol(phydev, &wol);
+	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+
+	if (!phy_interface_is_rgmii(phydev)) {
+		phydev->supported &= ~PHY_GBIT_FEATURES;
+		phydev->supported |= PHY_BASIC_FEATURES;
+	}
+	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+	phy_attached_info(phydev);
+
+	return 0;
+
+out_mdio_unregister:
+	mdiobus_unregister(priv->mdio);
+out_reset_assert:
+	reset_control_assert(priv->rst);
+out_clk_disable:
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static void ave_uninit(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	phy_disconnect(priv->phydev);
+	mdiobus_unregister(priv->mdio);
+
+	/* disable clk because of hw access after ndo_stop */
+	reset_control_assert(priv->rst);
+	clk_disable_unprepare(priv->clk);
+}
+
+static int ave_open(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	int entry;
+	int ret;
+	u32 val;
+
+	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
+			  ndev);
+	if (ret)
+		return ret;
+
+	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
+				GFP_KERNEL);
+	if (!priv->tx.desc) {
+		ret = -ENOMEM;
+		goto out_free_irq;
+	}
+
+	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
+				GFP_KERNEL);
+	if (!priv->rx.desc) {
+		kfree(priv->tx.desc);
+		ret = -ENOMEM;
+		goto out_free_irq;
+	}
+
+	/* initialize Tx work and descriptor */
+	priv->tx.proc_idx = 0;
+	priv->tx.done_idx = 0;
+	for (entry = 0; entry < priv->tx.ndesc; entry++) {
+		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
+		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
+	}
+	writel(AVE_TXDC_ADDR_START |
+	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
+	       priv->base + AVE_TXDC);
+
+	/* initialize Rx work and descriptor */
+	priv->rx.proc_idx = 0;
+	priv->rx.done_idx = 0;
+	for (entry = 0; entry < priv->rx.ndesc; entry++) {
+		if (ave_rxdesc_prepare(ndev, entry))
+			break;
+	}
+	writel(AVE_RXDC0_ADDR_START |
+	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
+	       priv->base + AVE_RXDC0);
+
+	ave_desc_switch(ndev, AVE_DESC_START);
+
+	ave_pfsel_init(ndev);
+	ave_macaddr_init(ndev);
+
+	/* set Rx configuration */
+	/* full duplex, enable pause drop, enalbe flow control */
+	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
+		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
+	writel(val, priv->base + AVE_RXCR);
+
+	/* set Tx configuration */
+	/* enable flow control, disable loopback */
+	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
+
+	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
+	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
+	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
+	writel(val, priv->base + AVE_IIRQC);
+
+	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+	ave_irq_restore(ndev, val);
+
+	napi_enable(&priv->napi_rx);
+	napi_enable(&priv->napi_tx);
+
+	phy_start(ndev->phydev);
+	phy_start_aneg(ndev->phydev);
+	netif_start_queue(ndev);
+
+	return 0;
+
+out_free_irq:
+	disable_irq(priv->irq);
+	free_irq(priv->irq, ndev);
+
+	return ret;
+}
+
+static int ave_stop(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	int entry;
+
+	ave_irq_disable_all(ndev);
+	disable_irq(priv->irq);
+	free_irq(priv->irq, ndev);
+
+	netif_tx_disable(ndev);
+	phy_stop(ndev->phydev);
+	napi_disable(&priv->napi_tx);
+	napi_disable(&priv->napi_rx);
+
+	ave_desc_switch(ndev, AVE_DESC_STOP);
+
+	/* free Tx buffer */
+	for (entry = 0; entry < priv->tx.ndesc; entry++) {
+		if (!priv->tx.desc[entry].skbs)
+			continue;
+
+		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
+		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
+		priv->tx.desc[entry].skbs = NULL;
+	}
+	priv->tx.proc_idx = 0;
+	priv->tx.done_idx = 0;
+
+	/* free Rx buffer */
+	for (entry = 0; entry < priv->rx.ndesc; entry++) {
+		if (!priv->rx.desc[entry].skbs)
+			continue;
+
+		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
+		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
+		priv->rx.desc[entry].skbs = NULL;
+	}
+	priv->rx.proc_idx = 0;
+	priv->rx.done_idx = 0;
+
+	kfree(priv->tx.desc);
+	kfree(priv->rx.desc);
+
+	return 0;
+}
+
+static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 proc_idx, done_idx, ndesc, cmdsts;
+	int ret, freepkt;
+	dma_addr_t paddr;
+
+	proc_idx = priv->tx.proc_idx;
+	done_idx = priv->tx.done_idx;
+	ndesc = priv->tx.ndesc;
+	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
+
+	/* stop queue when not enough entry */
+	if (unlikely(freepkt < 1)) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* add padding for short packet */
+	if (skb_put_padto(skb, ETH_ZLEN)) {
+		priv->stats_tx.dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/* map Tx buffer
+	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
+	 */
+	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
+			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		priv->stats_tx.dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	priv->tx.desc[proc_idx].skbs = skb;
+
+	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
+
+	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
+		(skb->len & AVE_STS_PKTLEN_TX_MASK);
+
+	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
+	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
+		cmdsts |= AVE_STS_INTR;
+
+	/* disable checksum calculation when skb doesn't calurate checksum */
+	if (skb->ip_summed == CHECKSUM_NONE ||
+	    skb->ip_summed == CHECKSUM_UNNECESSARY)
+		cmdsts |= AVE_STS_NOCSUM;
+
+	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
+
+	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
+
+	return NETDEV_TX_OK;
+}
+
+static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+static void ave_set_rx_mode(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct netdev_hw_addr *hw_adr;
+	int count, mc_cnt;
+	u32 val;
+
+	/* MAC addr filter enable for promiscious mode */
+	mc_cnt = netdev_mc_count(ndev);
+	val = readl(priv->base + AVE_RXCR);
+	if (ndev->flags & IFF_PROMISC || !mc_cnt)
+		val &= ~AVE_RXCR_AFEN;
+	else
+		val |= AVE_RXCR_AFEN;
+	writel(val, priv->base + AVE_RXCR);
+
+	/* set all multicast address */
+	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
+		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
+				      v4multi_macadr, 1);
+		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
+				      v6multi_macadr, 1);
+	} else {
+		/* stop all multicast filter */
+		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
+			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
+
+		/* set multicast addresses */
+		count = 0;
+		netdev_for_each_mc_addr(hw_adr, ndev) {
+			if (count == mc_cnt)
+				break;
+			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
+					      hw_adr->addr, 6);
+			count++;
+		}
+	}
+}
+
+static void ave_get_stats64(struct net_device *ndev,
+			    struct rtnl_link_stats64 *stats)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+		stats->rx_packets = priv->stats_rx.packets;
+		stats->rx_bytes	  = priv->stats_rx.bytes;
+	} while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+
+	do {
+		start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+		stats->tx_packets = priv->stats_tx.packets;
+		stats->tx_bytes	  = priv->stats_tx.bytes;
+	} while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+
+	stats->rx_errors      = priv->stats_rx.errors;
+	stats->tx_errors      = priv->stats_tx.errors;
+	stats->rx_dropped     = priv->stats_rx.dropped;
+	stats->tx_dropped     = priv->stats_tx.dropped;
+	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
+	stats->collisions     = priv->stats_tx.collisions;
+}
+
+static int ave_set_mac_address(struct net_device *ndev, void *p)
+{
+	int ret = eth_mac_addr(ndev, p);
+
+	if (ret)
+		return ret;
+
+	ave_macaddr_init(ndev);
+
+	return 0;
+}
+
+static const struct net_device_ops ave_netdev_ops = {
+	.ndo_init		= ave_init,
+	.ndo_uninit		= ave_uninit,
+	.ndo_open		= ave_open,
+	.ndo_stop		= ave_stop,
+	.ndo_start_xmit		= ave_start_xmit,
+	.ndo_do_ioctl		= ave_ioctl,
+	.ndo_set_rx_mode	= ave_set_rx_mode,
+	.ndo_get_stats64	= ave_get_stats64,
+	.ndo_set_mac_address	= ave_set_mac_address,
+};
+
+static int ave_probe(struct platform_device *pdev)
+{
+	const struct ave_soc_data *data;
+	struct device *dev = &pdev->dev;
+	char buf[ETHTOOL_FWVERS_LEN];
+	phy_interface_t phy_mode;
+	struct ave_private *priv;
+	struct net_device *ndev;
+	struct device_node *np;
+	struct resource	*res;
+	const void *mac_addr;
+	void __iomem *base;
+	u64 dma_mask;
+	int irq, ret;
+	u32 ave_id;
+
+	data = of_device_get_match_data(dev);
+	if (WARN_ON(!data))
+		return -EINVAL;
+
+	np = dev->of_node;
+	phy_mode = of_get_phy_mode(np);
+	if (phy_mode < 0) {
+		dev_err(dev, "phy-mode not found\n");
+		return -EINVAL;
+	}
+	if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
+	    phy_mode != PHY_INTERFACE_MODE_RMII &&
+	    phy_mode != PHY_INTERFACE_MODE_MII) {
+		dev_err(dev, "phy-mode is invalid\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "IRQ not found\n");
+		return irq;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	ndev = alloc_etherdev(sizeof(struct ave_private));
+	if (!ndev) {
+		dev_err(dev, "can't allocate ethernet device\n");
+		return -ENOMEM;
+	}
+
+	ndev->netdev_ops = &ave_netdev_ops;
+	ndev->ethtool_ops = &ave_ethtool_ops;
+	SET_NETDEV_DEV(ndev, dev);
+
+	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+
+	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
+
+	mac_addr = of_get_mac_address(np);
+	if (mac_addr)
+		ether_addr_copy(ndev->dev_addr, mac_addr);
+
+	/* if the mac address is invalid, use random mac address */
+	if (!is_valid_ether_addr(ndev->dev_addr)) {
+		eth_hw_addr_random(ndev);
+		dev_warn(dev, "Using random MAC address: %pM\n",
+			 ndev->dev_addr);
+	}
+
+	priv = netdev_priv(ndev);
+	priv->base = base;
+	priv->irq = irq;
+	priv->ndev = ndev;
+	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
+	priv->phy_mode = phy_mode;
+	priv->data = data;
+
+	if (IS_DESC_64BIT(priv)) {
+		priv->desc_size = AVE_DESC_SIZE_64;
+		priv->tx.daddr  = AVE_TXDM_64;
+		priv->rx.daddr  = AVE_RXDM_64;
+		dma_mask = DMA_BIT_MASK(64);
+	} else {
+		priv->desc_size = AVE_DESC_SIZE_32;
+		priv->tx.daddr  = AVE_TXDM_32;
+		priv->rx.daddr  = AVE_RXDM_32;
+		dma_mask = DMA_BIT_MASK(32);
+	}
+	ret = dma_set_mask(dev, dma_mask);
+	if (ret)
+		goto out_free_netdev;
+
+	priv->tx.ndesc = AVE_NR_TXDESC;
+	priv->rx.ndesc = AVE_NR_RXDESC;
+
+	u64_stats_init(&priv->stats_tx.syncp);
+	u64_stats_init(&priv->stats_rx.syncp);
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		ret = PTR_ERR(priv->clk);
+		goto out_free_netdev;
+	}
+
+	priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+	if (IS_ERR(priv->rst)) {
+		ret = PTR_ERR(priv->rst);
+		goto out_free_netdev;
+	}
+
+	priv->mdio = devm_mdiobus_alloc(dev);
+	if (!priv->mdio) {
+		ret = -ENOMEM;
+		goto out_free_netdev;
+	}
+	priv->mdio->priv = ndev;
+	priv->mdio->parent = dev;
+	priv->mdio->read = ave_mdiobus_read;
+	priv->mdio->write = ave_mdiobus_write;
+	priv->mdio->name = "uniphier-mdio";
+	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
+		 pdev->name, pdev->id);
+
+	/* Register as a NAPI supported driver */
+	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+	netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
+			  priv->tx.ndesc);
+
+	platform_set_drvdata(pdev, ndev);
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(dev, "failed to register netdevice\n");
+		goto out_del_napi;
+	}
+
+	/* get ID and version */
+	ave_id = readl(priv->base + AVE_IDR);
+	ave_hw_read_version(ndev, buf, sizeof(buf));
+
+	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
+		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
+		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
+		 buf, priv->irq, phy_modes(phy_mode));
+
+	return 0;
+
+out_del_napi:
+	netif_napi_del(&priv->napi_rx);
+	netif_napi_del(&priv->napi_tx);
+out_free_netdev:
+	free_netdev(ndev);
+
+	return ret;
+}
+
+static int ave_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct ave_private *priv = netdev_priv(ndev);
+
+	unregister_netdev(ndev);
+	netif_napi_del(&priv->napi_rx);
+	netif_napi_del(&priv->napi_tx);
+	free_netdev(ndev);
+
+	return 0;
+}
+
+static const struct ave_soc_data ave_pro4_data = {
+	.is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_pxs2_data = {
+	.is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld11_data = {
+	.is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld20_data = {
+	.is_desc_64bit = true,
+};
+
+static const struct of_device_id of_ave_match[] = {
+	{
+		.compatible = "socionext,uniphier-pro4-ave4",
+		.data = &ave_pro4_data,
+	},
+	{
+		.compatible = "socionext,uniphier-pxs2-ave4",
+		.data = &ave_pxs2_data,
+	},
+	{
+		.compatible = "socionext,uniphier-ld11-ave4",
+		.data = &ave_ld11_data,
+	},
+	{
+		.compatible = "socionext,uniphier-ld20-ave4",
+		.data = &ave_ld20_data,
+	},
+	{ /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_ave_match);
+
+static struct platform_driver ave_driver = {
+	.probe  = ave_probe,
+	.remove = ave_remove,
+	.driver	= {
+		.name = "ave",
+		.of_match_table	= of_ave_match,
+	},
+};
+module_platform_driver(ave_driver);
+
+MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ce2ea2d491ac..2ffe76c0ff74 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -474,7 +474,7 @@ struct mac_device_info;
 /* Helpers to program the MAC core */
 struct stmmac_ops {
 	/* MAC core initialization */
-	void (*core_init)(struct mac_device_info *hw, int mtu);
+	void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
 	/* Enable the MAC RX/TX */
 	void (*set_mac)(void __iomem *ioaddr, bool enable);
 	/* Enable and verify that the IPC module is supported */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4404650b32c5..5270d26f0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -40,9 +40,7 @@
 #define PRG_ETH0_CLK_M250_DIV_SHIFT	7
 #define PRG_ETH0_CLK_M250_DIV_WIDTH	3
 
-/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
-#define PRG_ETH0_CLK_M25_DIV_SHIFT	10
-#define PRG_ETH0_CLK_M25_DIV_WIDTH	1
+#define PRG_ETH0_RGMII_TX_CLK_EN	10
 
 #define PRG_ETH0_INVERTED_RMII_CLK	BIT(11)
 #define PRG_ETH0_TX_AND_PHY_REF_CLK	BIT(12)
@@ -63,8 +61,11 @@ struct meson8b_dwmac {
 	struct clk_divider	m250_div;
 	struct clk		*m250_div_clk;
 
-	struct clk_divider	m25_div;
-	struct clk		*m25_div_clk;
+	struct clk_fixed_factor	fixed_div2;
+	struct clk		*fixed_div2_clk;
+
+	struct clk_gate		rgmii_tx_en;
+	struct clk		*rgmii_tx_en_clk;
 
 	u32			tx_delay_ns;
 };
@@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
 	writel(data, dwmac->regs + reg);
 }
 
-static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
 {
 	struct clk_init_data init;
 	int i, ret;
@@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 	char clk_name[32];
 	const char *clk_div_parents[1];
 	const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
-	static const struct clk_div_table clk_25m_div_table[] = {
-		{ .val = 0, .div = 5 },
-		{ .val = 1, .div = 10 },
-		{ /* sentinel */ },
-	};
 
 	/* get the mux parents from DT */
 	for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
@@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 	snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
 	init.name = clk_name;
 	init.ops = &clk_mux_ops;
-	init.flags = 0;
+	init.flags = CLK_SET_RATE_PARENT;
 	init.parent_names = mux_parent_names;
 	init.num_parents = MUX_CLK_NUM_PARENTS;
 
@@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 	dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
 	dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
 	dwmac->m250_div.hw.init = &init;
-	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+				CLK_DIVIDER_ALLOW_ZERO |
+				CLK_DIVIDER_ROUND_CLOSEST;
 
 	dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
 	if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
 		return PTR_ERR(dwmac->m250_div_clk);
 
-	/* create the m25_div */
-	snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+	/* create the fixed_div2 */
+	snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
 	init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
-	init.ops = &clk_divider_ops;
-	init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+	init.ops = &clk_fixed_factor_ops;
+	init.flags = CLK_SET_RATE_PARENT;
 	clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
 	init.parent_names = clk_div_parents;
 	init.num_parents = ARRAY_SIZE(clk_div_parents);
 
-	dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
-	dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
-	dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
-	dwmac->m25_div.table = clk_25m_div_table;
-	dwmac->m25_div.hw.init = &init;
-	dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+	dwmac->fixed_div2.mult = 1;
+	dwmac->fixed_div2.div = 2;
+	dwmac->fixed_div2.hw.init = &init;
 
-	dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
-	if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
-		return PTR_ERR(dwmac->m25_div_clk);
+	dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
+	if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
+		return PTR_ERR(dwmac->fixed_div2_clk);
+
+	/* create the rgmii_tx_en */
+	init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
+				   dev_name(dev));
+	init.ops = &clk_gate_ops;
+	init.flags = CLK_SET_RATE_PARENT;
+	clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
+	init.parent_names = clk_div_parents;
+	init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+	dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+	dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+	dwmac->rgmii_tx_en.hw.init = &init;
+
+	dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
+						   &dwmac->rgmii_tx_en.hw);
+	if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
+		return PTR_ERR(dwmac->rgmii_tx_en_clk);
 
 	return 0;
 }
@@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 {
 	int ret;
-	unsigned long clk_rate;
 	u8 tx_dly_val = 0;
 
 	switch (dwmac->phy_mode) {
@@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 
 	case PHY_INTERFACE_MODE_RGMII_ID:
 	case PHY_INTERFACE_MODE_RGMII_TXID:
-		/* Generate a 25MHz clock for the PHY */
-		clk_rate = 25 * 1000 * 1000;
-
 		/* enable RGMII mode */
 		meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
 					PRG_ETH0_RGMII_MODE);
@@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 
 		meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
 					tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+
+		/* Configure the 125MHz RGMII TX clock, the IP block changes
+		 * the output automatically (= without us having to configure
+		 * a register) based on the line-speed (125MHz for Gbit speeds,
+		 * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+		 */
+		ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+		if (ret) {
+			dev_err(&dwmac->pdev->dev,
+				"failed to set RGMII TX clock\n");
+			return ret;
+		}
+
+		ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+		if (ret) {
+			dev_err(&dwmac->pdev->dev,
+				"failed to enable the RGMII TX clock\n");
+			return ret;
+		}
 		break;
 
 	case PHY_INTERFACE_MODE_RMII:
-		/* Use the rate of the mux clock for the internal RMII PHY */
-		clk_rate = clk_get_rate(dwmac->m250_mux_clk);
-
 		/* disable RGMII mode -> enables RMII mode */
 		meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
 					0);
@@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 		return -EINVAL;
 	}
 
-	ret = clk_prepare_enable(dwmac->m25_div_clk);
-	if (ret) {
-		dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
-		return ret;
-	}
-
-	ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
-	if (ret) {
-		clk_disable_unprepare(dwmac->m25_div_clk);
-
-		dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
-		return ret;
-	}
-
 	/* enable TX_CLK and PHY_REF_CLK generator */
 	meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
 				PRG_ETH0_TX_AND_PHY_REF_CLK);
@@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
 				 &dwmac->tx_delay_ns))
 		dwmac->tx_delay_ns = 2;
 
-	ret = meson8b_init_clk(dwmac);
+	ret = meson8b_init_rgmii_tx_clk(dwmac);
 	if (ret)
 		goto err_remove_config_dt;
 
@@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
 	return 0;
 
 err_clk_disable:
-	clk_disable_unprepare(dwmac->m25_div_clk);
+	if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+		clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
 err_remove_config_dt:
 	stmmac_remove_config_dt(pdev, plat_dat);
 
@@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev)
 {
 	struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
 
-	clk_disable_unprepare(dwmac->m25_div_clk);
+	if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+		clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
 
 	return stmmac_pltfr_remove(pdev);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 9eb7f65d8000..a3fa65b1ca8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -483,7 +483,8 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
 	return 0;
 }
 
-static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
+static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+				  struct net_device *dev)
 {
 	void __iomem *ioaddr = hw->pcsr;
 	u32 v;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8a86340ff2d3..540d21786a43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -25,18 +25,28 @@
 #include <linux/crc32.h>
 #include <linux/slab.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/io.h>
 #include "stmmac_pcs.h"
 #include "dwmac1000.h"
 
-static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac1000_core_init(struct mac_device_info *hw,
+				struct net_device *dev)
 {
 	void __iomem *ioaddr = hw->pcsr;
 	u32 value = readl(ioaddr + GMAC_CONTROL);
+	int mtu = dev->mtu;
 
 	/* Configure GMAC core */
 	value |= GMAC_CORE_INIT;
 
+	/* Clear ACS bit because Ethernet switch tagging formats such as
+	 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+	 * hardware to truncate packets on reception.
+	 */
+	if (netdev_uses_dsa(dev))
+		value &= ~GMAC_CONTROL_ACS;
+
 	if (mtu > 1500)
 		value |= GMAC_CONTROL_2K;
 	if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 8ef517356313..91b23f9db31a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -25,15 +25,26 @@
 *******************************************************************************/
 
 #include <linux/crc32.h>
+#include <net/dsa.h>
 #include <asm/io.h>
 #include "dwmac100.h"
 
-static void dwmac100_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac100_core_init(struct mac_device_info *hw,
+			       struct net_device *dev)
 {
 	void __iomem *ioaddr = hw->pcsr;
 	u32 value = readl(ioaddr + MAC_CONTROL);
 
-	writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+	value |= MAC_CORE_INIT;
+
+	/* Clear ASTP bit because Ethernet switch tagging formats such as
+	 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+	 * hardware to truncate packets on reception.
+	 */
+	if (netdev_uses_dsa(dev))
+		value &= ~MAC_CONTROL_ASTP;
+
+	writel(value, ioaddr + MAC_CONTROL);
 
 #ifdef STMMAC_VLAN_TAG_USED
 	writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f3ed8f7853eb..ed222b20fcf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,16 +17,26 @@
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/io.h>
+#include <net/dsa.h>
 #include "stmmac_pcs.h"
 #include "dwmac4.h"
 
-static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac4_core_init(struct mac_device_info *hw,
+			     struct net_device *dev)
 {
 	void __iomem *ioaddr = hw->pcsr;
 	u32 value = readl(ioaddr + GMAC_CONFIG);
+	int mtu = dev->mtu;
 
 	value |= GMAC_CORE_INIT;
 
+	/* Clear ACS bit because Ethernet switch tagging formats such as
+	 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+	 * hardware to truncate packets on reception.
+	 */
+	if (netdev_uses_dsa(dev))
+		value &= ~GMAC_CONFIG_ACS;
+
 	if (mtu > 1500)
 		value |= GMAC_CONFIG_2K;
 	if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7e089bf906b4..c728ffa095de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
 	if (tx_own)
 		tdes3 |= TDES3_OWN;
 
-	if (is_fs & tx_own)
+	if (is_fs && tx_own)
 		/* When the own bit, for the first frame, has to be set, all
 		 * descriptors for the same frame has to be set before, to
 		 * avoid race condition.
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
 	if (tx_own)
 		tdes3 |= TDES3_OWN;
 
-	if (is_fs & tx_own)
+	if (is_fs && tx_own)
 		/* When the own bit, for the first frame, has to be set, all
 		 * descriptors for the same frame has to be set before, to
 		 * avoid race condition.
@@ -406,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
 	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
 
 	for (i = 0; i < size; i++) {
-		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 			i, (unsigned int)virt_to_phys(p),
 			le32_to_cpu(p->des0), le32_to_cpu(p->des1),
 			le32_to_cpu(p->des2), le32_to_cpu(p->des3));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2a828a312814..6768a25b6aa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -341,7 +341,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
 	if (tx_own)
 		tdes0 |= ETDES0_OWN;
 
-	if (is_fs & tx_own)
+	if (is_fs && tx_own)
 		/* When the own bit, for the first frame, has to be set, all
 		 * descriptors for the same frame has to be set before, to
 		 * avoid race condition.
@@ -428,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
 		u64 x;
 
 		x = *(u64 *)ep;
-		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 			i, (unsigned int)virt_to_phys(ep),
 			(unsigned int)x, (unsigned int)(x >> 32),
 			ep->basic.des2, ep->basic.des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index db4cee57bb24..ebd9e5e00f16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
 		u64 x;
 
 		x = *(u64 *)p;
-		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
 			i, (unsigned int)virt_to_phys(p),
 			(unsigned int)x, (unsigned int)(x >> 32),
 			p->des2, p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0af0bc4e714..7ad841434ec8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2003,22 +2003,60 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
 {
 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
-	int status;
+	u32 rx_channel_count = priv->plat->rx_queues_to_use;
+	u32 channels_to_check = tx_channel_count > rx_channel_count ?
+				tx_channel_count : rx_channel_count;
 	u32 chan;
+	bool poll_scheduled = false;
+	int status[channels_to_check];
+
+	/* Each DMA channel can be used for rx and tx simultaneously, yet
+	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
+	 * stmmac_channel struct.
+	 * Because of this, stmmac_poll currently checks (and possibly wakes)
+	 * all tx queues rather than just a single tx queue.
+	 */
+	for (chan = 0; chan < channels_to_check; chan++)
+		status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
+							    &priv->xstats,
+							    chan);
 
-	for (chan = 0; chan < tx_channel_count; chan++) {
-		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+	for (chan = 0; chan < rx_channel_count; chan++) {
+		if (likely(status[chan] & handle_rx)) {
+			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
 
-		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
-						      &priv->xstats, chan);
-		if (likely((status & handle_rx)) || (status & handle_tx)) {
 			if (likely(napi_schedule_prep(&rx_q->napi))) {
 				stmmac_disable_dma_irq(priv, chan);
 				__napi_schedule(&rx_q->napi);
+				poll_scheduled = true;
+			}
+		}
+	}
+
+	/* If we scheduled poll, we already know that tx queues will be checked.
+	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
+	 * completed transmission, if so, call stmmac_poll (once).
+	 */
+	if (!poll_scheduled) {
+		for (chan = 0; chan < tx_channel_count; chan++) {
+			if (status[chan] & handle_tx) {
+				/* It doesn't matter what rx queue we choose
+				 * here. We use 0 since it always exists.
+				 */
+				struct stmmac_rx_queue *rx_q =
+					&priv->rx_queue[0];
+
+				if (likely(napi_schedule_prep(&rx_q->napi))) {
+					stmmac_disable_dma_irq(priv, chan);
+					__napi_schedule(&rx_q->napi);
+				}
+				break;
 			}
 		}
+	}
 
-		if (unlikely(status & tx_hard_error_bump_tc)) {
+	for (chan = 0; chan < tx_channel_count; chan++) {
+		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
 			/* Try to bump up the dma threshold on this failure */
 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
 			    (tc <= 256)) {
@@ -2035,7 +2073,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
 								    chan);
 				priv->xstats.threshold = tc;
 			}
-		} else if (unlikely(status == tx_hard_error)) {
+		} else if (unlikely(status[chan] == tx_hard_error)) {
 			stmmac_tx_err(priv, chan);
 		}
 	}
@@ -2489,7 +2527,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
 	}
 
 	/* Initialize the MAC Core */
-	priv->hw->mac->core_init(priv->hw, dev->mtu);
+	priv->hw->mac->core_init(priv->hw, dev);
 
 	/* Initialize MTL*/
 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
@@ -3404,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 			if (netif_msg_rx_status(priv)) {
 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
 					   p, entry, des);
-				if (frame_len > ETH_FRAME_LEN)
-					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
-						   frame_len, status);
+				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+					   frame_len, status);
 			}
 
 			/* The zero-copy is always used for all the sizes
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index d655a4261e98..eb1c6b03c329 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
 		      struct sk_buff *skb, bool tx_rx)
 {
 	struct ethhdr *eth = (struct ethhdr *)skb->data;
-	unsigned char *buf = skb->data;
 	unsigned char buffer[128];
-	unsigned int i, j;
+	unsigned int i;
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
 	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
 	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-	for (i = 0, j = 0; i < skb->len;) {
-		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-			      buf[i++]);
-
-		if ((i % 32) == 0) {
-			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-			j = 0;
-		} else if ((i % 16) == 0) {
-			buffer[j++] = ' ';
-			buffer[j++] = ' ';
-		} else if ((i % 4) == 0) {
-			buffer[j++] = ' ';
-		}
+	for (i = 0; i < skb->len; i += 32) {
+		unsigned int len = min(skb->len - i, 32U);
+
+		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+				   buffer, sizeof(buffer), false);
+		netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
 	}
-	if (i % 32)
-		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a73600dceb8b..3c85a0885f9b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -88,6 +88,7 @@ do {								\
 #define CPSW_VERSION_4		0x190112
 
 #define HOST_PORT_NUM		0
+#define CPSW_ALE_PORTS_NUM	3
 #define SLIVER_SIZE		0x40
 
 #define CPSW1_HOST_PORT_OFFSET	0x028
@@ -352,6 +353,27 @@ struct cpsw_hw_stats {
 	u32	rxdmaoverruns;
 };
 
+struct cpsw_slave_data {
+	struct device_node *phy_node;
+	char		phy_id[MII_BUS_ID_SIZE];
+	int		phy_if;
+	u8		mac_addr[ETH_ALEN];
+	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+	struct cpsw_slave_data	*slave_data;
+	u32	ss_reg_ofs;	/* Subsystem control register offset */
+	u32	channels;	/* number of cpdma channels (symmetric) */
+	u32	slaves;		/* number of slave cpgmac ports */
+	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+	u32	ale_entries;	/* ale table size */
+	u32	bd_ram_size;  /*buffer descriptor ram size */
+	u32	mac_control;	/* Mac control register */
+	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
+	bool	dual_emac;	/* Enable Dual EMAC mode */
+};
+
 struct cpsw_slave {
 	void __iomem			*regs;
 	struct cpsw_sliver_regs __iomem	*sliver;
@@ -365,12 +387,12 @@ struct cpsw_slave {
 
 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
 {
-	return __raw_readl(slave->regs + offset);
+	return readl_relaxed(slave->regs + offset);
 }
 
 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
 {
-	__raw_writel(val, slave->regs + offset);
+	writel_relaxed(val, slave->regs + offset);
 }
 
 struct cpsw_vector {
@@ -660,8 +682,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 
 static void cpsw_intr_enable(struct cpsw_common *cpsw)
 {
-	__raw_writel(0xFF, &cpsw->wr_regs->tx_en);
-	__raw_writel(0xFF, &cpsw->wr_regs->rx_en);
+	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
 
 	cpdma_ctlr_int_ctrl(cpsw->dma, true);
 	return;
@@ -669,8 +691,8 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
 
 static void cpsw_intr_disable(struct cpsw_common *cpsw)
 {
-	__raw_writel(0, &cpsw->wr_regs->tx_en);
-	__raw_writel(0, &cpsw->wr_regs->rx_en);
+	writel_relaxed(0, &cpsw->wr_regs->tx_en);
+	writel_relaxed(0, &cpsw->wr_regs->rx_en);
 
 	cpdma_ctlr_int_ctrl(cpsw->dma, false);
 	return;
@@ -949,18 +971,14 @@ static inline void soft_reset(const char *module, void __iomem *reg)
 {
 	unsigned long timeout = jiffies + HZ;
 
-	__raw_writel(1, reg);
+	writel_relaxed(1, reg);
 	do {
 		cpu_relax();
-	} while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
+	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
 
-	WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
+	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
 }
 
-#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
-			 ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
-
 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
 			       struct cpsw_priv *priv)
 {
@@ -1015,7 +1033,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
 
 	if (mac_control != slave->mac_control) {
 		phy_print_status(phy);
-		__raw_writel(mac_control, &slave->sliver->mac_control);
+		writel_relaxed(mac_control, &slave->sliver->mac_control);
 	}
 
 	slave->mac_control = mac_control;
@@ -1278,7 +1296,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 	soft_reset_slave(slave);
 
 	/* setup priority mapping */
-	__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+	writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
 
 	switch (cpsw->version) {
 	case CPSW_VERSION_1:
@@ -1304,7 +1322,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 	}
 
 	/* setup max packet size, and mac address */
-	__raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+	writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
 	cpsw_set_slave_mac(slave, priv);
 
 	slave->mac_control = 0;	/* no link yet */
@@ -1395,9 +1413,9 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
 	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
 
 	/* setup host port priority mapping */
-	__raw_writel(CPDMA_TX_PRIORITY_MAP,
-		     &cpsw->host_port_regs->cpdma_tx_pri_map);
-	__raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+		       &cpsw->host_port_regs->cpdma_tx_pri_map);
+	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
 
 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -1514,10 +1532,10 @@ static int cpsw_ndo_open(struct net_device *ndev)
 	/* initialize shared resources for every ndev */
 	if (!cpsw->usage_count) {
 		/* disable priority elevation */
-		__raw_writel(0, &cpsw->regs->ptype);
+		writel_relaxed(0, &cpsw->regs->ptype);
 
 		/* enable statistics collection only on all ports */
-		__raw_writel(0x7, &cpsw->regs->stat_port_en);
+		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
 
 		/* Enable internal fifo flow control */
 		writel(0x7, &cpsw->regs->flow_control);
@@ -1701,7 +1719,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 
 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
 	slave_write(slave, ctrl, CPSW2_CONTROL);
-	__raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
+	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
 }
 
 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2298,7 +2316,6 @@ static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
 
 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
 {
-	int (*poll)(struct napi_struct *, int);
 	struct cpsw_common *cpsw = priv->cpsw;
 	void (*handler)(void *, int, int);
 	struct netdev_queue *queue;
@@ -2309,12 +2326,10 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
 		ch = &cpsw->rx_ch_num;
 		vec = cpsw->rxv;
 		handler = cpsw_rx_handler;
-		poll = cpsw_rx_poll;
 	} else {
 		ch = &cpsw->tx_ch_num;
 		vec = cpsw->txv;
 		handler = cpsw_tx_handler;
-		poll = cpsw_tx_poll;
 	}
 
 	while (*ch < ch_num) {
@@ -3050,17 +3065,23 @@ static int cpsw_probe(struct platform_device *pdev)
 	}
 
 	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+	if (IS_ERR(cpsw->txv[0].ch)) {
+		dev_err(priv->dev, "error initializing tx dma channel\n");
+		ret = PTR_ERR(cpsw->txv[0].ch);
+		goto clean_dma_ret;
+	}
+
 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
-	if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
-		dev_err(priv->dev, "error initializing dma channels\n");
-		ret = -ENOMEM;
+	if (IS_ERR(cpsw->rxv[0].ch)) {
+		dev_err(priv->dev, "error initializing rx dma channel\n");
+		ret = PTR_ERR(cpsw->rxv[0].ch);
 		goto clean_dma_ret;
 	}
 
 	ale_params.dev			= &pdev->dev;
 	ale_params.ale_ageout		= ale_ageout;
 	ale_params.ale_entries		= data->ale_entries;
-	ale_params.ale_ports		= data->slaves;
+	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
 
 	cpsw->ale = cpsw_ale_create(&ale_params);
 	if (!cpsw->ale) {
@@ -3072,14 +3093,14 @@ static int cpsw_probe(struct platform_device *pdev)
 	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
 	if (IS_ERR(cpsw->cpts)) {
 		ret = PTR_ERR(cpsw->cpts);
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	ndev->irq = platform_get_irq(pdev, 1);
 	if (ndev->irq < 0) {
 		dev_err(priv->dev, "error getting irq resource\n");
 		ret = ndev->irq;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
@@ -3103,7 +3124,7 @@ static int cpsw_probe(struct platform_device *pdev)
 	if (ret) {
 		dev_err(priv->dev, "error registering net device\n");
 		ret = -ENODEV;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	if (cpsw->data.dual_emac) {
@@ -3126,7 +3147,7 @@ static int cpsw_probe(struct platform_device *pdev)
 	irq = platform_get_irq(pdev, 1);
 	if (irq < 0) {
 		ret = irq;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	cpsw->irqs_table[0] = irq;
@@ -3134,14 +3155,14 @@ static int cpsw_probe(struct platform_device *pdev)
 			       0, dev_name(&pdev->dev), cpsw);
 	if (ret < 0) {
 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	/* TX IRQ */
 	irq = platform_get_irq(pdev, 2);
 	if (irq < 0) {
 		ret = irq;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	cpsw->irqs_table[1] = irq;
@@ -3149,7 +3170,7 @@ static int cpsw_probe(struct platform_device *pdev)
 			       0, dev_name(&pdev->dev), cpsw);
 	if (ret < 0) {
 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	cpsw_notice(priv, probe,
@@ -3162,8 +3183,6 @@ static int cpsw_probe(struct platform_device *pdev)
 
 clean_unregister_netdev_ret:
 	unregister_netdev(ndev);
-clean_ale_ret:
-	cpsw_ale_destroy(cpsw->ale);
 clean_dma_ret:
 	cpdma_ctlr_destroy(cpsw->dma);
 clean_dt_ret:
@@ -3193,7 +3212,6 @@ static int cpsw_remove(struct platform_device *pdev)
 	unregister_netdev(ndev);
 
 	cpts_release(cpsw->cpts);
-	cpsw_ale_destroy(cpsw->ale);
 	cpdma_ctlr_destroy(cpsw->dma);
 	cpsw_remove_dt(pdev);
 	pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 6c3037aa2cd3..cf111db3dc27 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -17,26 +17,9 @@
 #include <linux/if_ether.h>
 #include <linux/phy.h>
 
-struct cpsw_slave_data {
-	struct device_node *phy_node;
-	char		phy_id[MII_BUS_ID_SIZE];
-	int		phy_if;
-	u8		mac_addr[ETH_ALEN];
-	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
-};
-
-struct cpsw_platform_data {
-	struct cpsw_slave_data	*slave_data;
-	u32	ss_reg_ofs;	/* Subsystem control register offset */
-	u32	channels;	/* number of cpdma channels (symmetric) */
-	u32	slaves;		/* number of slave cpgmac ports */
-	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
-	u32	ale_entries;	/* ale table size */
-	u32	bd_ram_size;  /*buffer descriptor ram size */
-	u32	mac_control;	/* Mac control register */
-	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
-	bool	dual_emac;	/* Enable Dual EMAC mode */
-};
+#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
+			 ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
 
 void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
 int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index b432a75fb874..93dc05c194d3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -150,11 +150,11 @@ static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
 
 	WARN_ON(idx > ale->params.ale_entries);
 
-	__raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+	writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
 
 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
-		ale_entry[i] = __raw_readl(ale->params.ale_regs +
-					   ALE_TABLE + 4 * i);
+		ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+					     ALE_TABLE + 4 * i);
 
 	return idx;
 }
@@ -166,11 +166,11 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
 	WARN_ON(idx > ale->params.ale_entries);
 
 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
-		__raw_writel(ale_entry[i], ale->params.ale_regs +
-			     ALE_TABLE + 4 * i);
+		writel_relaxed(ale_entry[i], ale->params.ale_regs +
+			       ALE_TABLE + 4 * i);
 
-	__raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
-		     ALE_TABLE_CONTROL);
+	writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+		       ALE_TABLE_CONTROL);
 
 	return idx;
 }
@@ -723,7 +723,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
 	if (info->port_offset == 0 && info->port_shift == 0)
 		port = 0; /* global, port is a dont care */
 
-	if (port < 0 || port > ale->params.ale_ports)
+	if (port < 0 || port >= ale->params.ale_ports)
 		return -EINVAL;
 
 	mask = BITMASK(info->bits);
@@ -733,9 +733,9 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
 	offset = info->offset + (port * info->port_offset);
 	shift  = info->shift  + (port * info->port_shift);
 
-	tmp = __raw_readl(ale->params.ale_regs + offset);
+	tmp = readl_relaxed(ale->params.ale_regs + offset);
 	tmp = (tmp & ~(mask << shift)) | (value << shift);
-	__raw_writel(tmp, ale->params.ale_regs + offset);
+	writel_relaxed(tmp, ale->params.ale_regs + offset);
 
 	return 0;
 }
@@ -754,13 +754,13 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
 	if (info->port_offset == 0 && info->port_shift == 0)
 		port = 0; /* global, port is a dont care */
 
-	if (port < 0 || port > ale->params.ale_ports)
+	if (port < 0 || port >= ale->params.ale_ports)
 		return -EINVAL;
 
 	offset = info->offset + (port * info->port_offset);
 	shift  = info->shift  + (port * info->port_shift);
 
-	tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
+	tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
 	return tmp & BITMASK(info->bits);
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
@@ -779,9 +779,37 @@ static void cpsw_ale_timer(struct timer_list *t)
 
 void cpsw_ale_start(struct cpsw_ale *ale)
 {
+	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+	cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+	timer_setup(&ale->timer, cpsw_ale_timer, 0);
+	if (ale->ageout) {
+		ale->timer.expires = jiffies + ale->ageout;
+		add_timer(&ale->timer);
+	}
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+	del_timer_sync(&ale->timer);
+	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+	struct cpsw_ale *ale;
 	u32 rev, ale_entries;
 
-	rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
+	ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+	if (!ale)
+		return NULL;
+
+	ale->params = *params;
+	ale->ageout = ale->params.ale_ageout * HZ;
+
+	rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
 	if (!ale->params.major_ver_mask)
 		ale->params.major_ver_mask = 0xff;
 	ale->version =
@@ -793,8 +821,8 @@ void cpsw_ale_start(struct cpsw_ale *ale)
 
 	if (!ale->params.ale_entries) {
 		ale_entries =
-			__raw_readl(ale->params.ale_regs + ALE_STATUS) &
-				    ALE_STATUS_SIZE_MASK;
+			readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+			ALE_STATUS_SIZE_MASK;
 		/* ALE available on newer NetCP switches has introduced
 		 * a register, ALE_STATUS, to indicate the size of ALE
 		 * table which shows the size as a multiple of 1024 entries.
@@ -816,9 +844,9 @@ void cpsw_ale_start(struct cpsw_ale *ale)
 		 "ALE Table size %ld\n", ale->params.ale_entries);
 
 	/* set default bits for existing h/w */
-	ale->port_mask_bits = 3;
-	ale->port_num_bits = 2;
-	ale->vlan_field_bits = 3;
+	ale->port_mask_bits = ale->params.ale_ports;
+	ale->port_num_bits = order_base_2(ale->params.ale_ports);
+	ale->vlan_field_bits = ale->params.ale_ports;
 
 	/* Set defaults override for ALE on NetCP NU switch and for version
 	 * 1R3
@@ -847,57 +875,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
 		ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
 		ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
 					ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
-		ale->port_mask_bits = ale->params.ale_ports;
-		ale->port_num_bits = ale->params.ale_ports - 1;
-		ale->vlan_field_bits = ale->params.ale_ports;
-	} else if (ale->version == ALE_VERSION_1R3) {
-		ale->port_mask_bits = ale->params.ale_ports;
-		ale->port_num_bits = 3;
-		ale->vlan_field_bits = ale->params.ale_ports;
 	}
 
-	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
-	cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
-
-	timer_setup(&ale->timer, cpsw_ale_timer, 0);
-	if (ale->ageout) {
-		ale->timer.expires = jiffies + ale->ageout;
-		add_timer(&ale->timer);
-	}
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
-
-void cpsw_ale_stop(struct cpsw_ale *ale)
-{
-	del_timer_sync(&ale->timer);
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
-
-struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
-{
-	struct cpsw_ale *ale;
-
-	ale = kzalloc(sizeof(*ale), GFP_KERNEL);
-	if (!ale)
-		return NULL;
-
-	ale->params = *params;
-	ale->ageout = ale->params.ale_ageout * HZ;
-
 	return ale;
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_create);
 
-int cpsw_ale_destroy(struct cpsw_ale *ale)
-{
-	if (!ale)
-		return -EINVAL;
-	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
-	kfree(ale);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
-
 void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
 {
 	int i;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 25d24e8d0904..d4fe9016429b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -100,7 +100,6 @@ enum cpsw_ale_port_state {
 #define ALE_ENTRY_WORDS	DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
 
 struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
-int cpsw_ale_destroy(struct cpsw_ale *ale);
 
 void cpsw_ale_start(struct cpsw_ale *ale);
 void cpsw_ale_stop(struct cpsw_ale *ale);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index e4d6edf387b3..6f9173ff9414 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -893,7 +893,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 	chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
 
 	if (__chan_linear(chan_num) >= ctlr->num_chan)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
 	if (!chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4bb561856af5..abceea802ea1 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,11 +1385,6 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
 		return -EOPNOTSUPP;
 }
 
-static int match_first_device(struct device *dev, void *data)
-{
-	return !strncmp(dev_name(dev), "davinci_mdio", 12);
-}
-
 /**
  * emac_dev_open - EMAC device open
  * @ndev: The DaVinci EMAC network adapter
@@ -1489,8 +1484,8 @@ static int emac_dev_open(struct net_device *ndev)
 
 	/* use the first phy on the bus if pdata did not give us a phy id */
 	if (!phydev && !priv->phy_id) {
-		phy = bus_find_device(&mdio_bus_type, NULL, NULL,
-				      match_first_device);
+		phy = bus_find_device_by_name(&mdio_bus_type, NULL,
+					      "davinci_mdio");
 		if (phy) {
 			priv->phy_id = dev_name(phy);
 			if (!priv->phy_id || !*priv->phy_id)
@@ -1875,10 +1870,17 @@ static int davinci_emac_probe(struct platform_device *pdev)
 
 	priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
 					 emac_tx_handler, 0);
+	if (IS_ERR(priv->txchan)) {
+		dev_err(&pdev->dev, "error initializing tx dma channel\n");
+		rc = PTR_ERR(priv->txchan);
+		goto no_cpdma_chan;
+	}
+
 	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
 					 emac_rx_handler, 1);
-	if (WARN_ON(!priv->txchan || !priv->rxchan)) {
-		rc = -ENOMEM;
+	if (IS_ERR(priv->rxchan)) {
+		dev_err(&pdev->dev, "error initializing rx dma channel\n");
+		rc = PTR_ERR(priv->rxchan);
 		goto no_cpdma_chan;
 	}
 
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index e831c49713ee..56dbc0b9fedc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -27,6 +27,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/ethtool.h>
 
+#include "cpsw.h"
 #include "cpsw_ale.h"
 #include "netcp.h"
 #include "cpts.h"
@@ -2047,10 +2048,6 @@ static const struct ethtool_ops keystone_ethtool_ops = {
 	.get_ts_info		= keystone_get_ts_info,
 };
 
-#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
-			 ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
-
 static void gbe_set_slave_mac(struct gbe_slave *slave,
 			      struct gbe_intf *gbe_intf)
 {
@@ -3692,7 +3689,6 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
 	del_timer_sync(&gbe_dev->timer);
 	cpts_release(gbe_dev->cpts);
 	cpsw_ale_stop(gbe_dev->ale);
-	cpsw_ale_destroy(gbe_dev->ale);
 	netcp_txpipe_close(&gbe_dev->tx_pipe);
 	free_secondary_ports(gbe_dev);
 
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 64fda2e1040e..b919e89a9b93 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1652,19 +1652,16 @@ static __net_init int geneve_init_net(struct net *net)
 	return 0;
 }
 
-static void __net_exit geneve_exit_net(struct net *net)
+static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_dev *geneve, *next;
 	struct net_device *dev, *aux;
-	LIST_HEAD(list);
-
-	rtnl_lock();
 
 	/* gather any geneve devices that were moved into this ns */
 	for_each_netdev_safe(net, dev, aux)
 		if (dev->rtnl_link_ops == &geneve_link_ops)
-			unregister_netdevice_queue(dev, &list);
+			unregister_netdevice_queue(dev, head);
 
 	/* now gather any other geneve devices that were created in this ns */
 	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@@ -1672,18 +1669,29 @@ static void __net_exit geneve_exit_net(struct net *net)
 		 * to the list by the previous loop.
 		 */
 		if (!net_eq(dev_net(geneve->dev), net))
-			unregister_netdevice_queue(geneve->dev, &list);
+			unregister_netdevice_queue(geneve->dev, head);
 	}
 
+	WARN_ON_ONCE(!list_empty(&gn->sock_list));
+}
+
+static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+{
+	struct net *net;
+	LIST_HEAD(list);
+
+	rtnl_lock();
+	list_for_each_entry(net, net_list, exit_list)
+		geneve_destroy_tunnels(net, &list);
+
 	/* unregister the devices gathered above */
 	unregister_netdevice_many(&list);
 	rtnl_unlock();
-	WARN_ON_ONCE(!list_empty(&gn->sock_list));
 }
 
 static struct pernet_operations geneve_net_ops = {
 	.init = geneve_init_net,
-	.exit = geneve_exit_net,
+	.exit_batch = geneve_exit_batch_net,
 	.id   = &geneve_net_id,
 	.size = sizeof(struct geneve_net),
 };
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 88ddfb92122b..0db3bd1ea06f 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -146,7 +146,6 @@ struct hv_netvsc_packet {
 
 struct netvsc_device_info {
 	unsigned char mac_adr[ETH_ALEN];
-	int  ring_size;
 	u32  num_chn;
 	u32  send_sections;
 	u32  recv_sections;
@@ -188,18 +187,22 @@ struct rndis_message;
 struct netvsc_device;
 struct net_device_context;
 
+extern u32 netvsc_ring_bytes;
+extern struct reciprocal_value netvsc_ring_reciprocal;
+
 struct netvsc_device *netvsc_device_add(struct hv_device *device,
 					const struct netvsc_device_info *info);
 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
 void netvsc_device_remove(struct hv_device *device);
-int netvsc_send(struct net_device_context *ndc,
+int netvsc_send(struct net_device *net,
 		struct hv_netvsc_packet *packet,
 		struct rndis_message *rndis_msg,
 		struct hv_page_buffer *page_buffer,
 		struct sk_buff *skb);
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
 				struct rndis_message *resp);
 int netvsc_recv_callback(struct net_device *net,
+			 struct netvsc_device *nvdev,
 			 struct vmbus_channel *channel,
 			 void  *data, u32 len,
 			 const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -220,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
 			       const u8 *key);
 int rndis_filter_receive(struct net_device *ndev,
 			 struct netvsc_device *net_dev,
-			 struct hv_device *dev,
 			 struct vmbus_channel *channel,
 			 void *data, u32 buflen);
 
@@ -635,14 +637,27 @@ struct nvsp_message {
 #define NETVSC_MTU 65535
 #define NETVSC_MTU_MIN ETH_MIN_MTU
 
-#define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16)	/* 16MB */
-#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024*1024*15)  /* 15MB */
-#define NETVSC_SEND_BUFFER_SIZE			(1024 * 1024 * 15)   /* 15MB */
+/* Max buffer sizes allowed by a host */
+#define NETVSC_RECEIVE_BUFFER_SIZE		(1024 * 1024 * 31) /* 31MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024 * 1024 * 15) /* 15MB */
+#define NETVSC_RECEIVE_BUFFER_DEFAULT		(1024 * 1024 * 16)
+
+#define NETVSC_SEND_BUFFER_SIZE			(1024 * 1024 * 15)  /* 15MB */
+#define NETVSC_SEND_BUFFER_DEFAULT		(1024 * 1024)
+
 #define NETVSC_INVALID_INDEX			-1
 
 #define NETVSC_SEND_SECTION_SIZE		6144
 #define NETVSC_RECV_SECTION_SIZE		1728
 
+/* Default size of TX buf: 1MB, RX buf: 16MB */
+#define NETVSC_MIN_TX_SECTIONS	10
+#define NETVSC_DEFAULT_TX	(NETVSC_SEND_BUFFER_DEFAULT \
+				 / NETVSC_SEND_SECTION_SIZE)
+#define NETVSC_MIN_RX_SECTIONS	10
+#define NETVSC_DEFAULT_RX	(NETVSC_RECEIVE_BUFFER_DEFAULT \
+				 / NETVSC_RECV_SECTION_SIZE)
+
 #define NETVSC_RECEIVE_BUFFER_ID		0xcafe
 #define NETVSC_SEND_BUFFER_ID			0
 
@@ -690,6 +705,7 @@ struct netvsc_ethtool_stats {
 	unsigned long tx_busy;
 	unsigned long tx_send_full;
 	unsigned long rx_comp_busy;
+	unsigned long rx_no_memory;
 	unsigned long stop_queue;
 	unsigned long wake_queue;
 };
@@ -804,13 +820,9 @@ struct netvsc_device {
 
 	struct rndis_device *extension;
 
-	int ring_size;
-
 	u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
 	u32 pkt_align; /* alignment bytes, e.g. 8 */
 
-	atomic_t open_cnt;
-
 	struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
 
 	struct rcu_head rcu;
@@ -1425,32 +1437,6 @@ struct rndis_message {
 	(sizeof(msg) + (sizeof(struct rndis_message) -	\
 	 sizeof(union rndis_message_container)))
 
-/* get pointer to info buffer with message pointer */
-#define MESSAGE_TO_INFO_BUFFER(msg)				\
-	(((unsigned char *)(msg)) + msg->info_buf_offset)
-
-/* get pointer to status buffer with message pointer */
-#define MESSAGE_TO_STATUS_BUFFER(msg)			\
-	(((unsigned char *)(msg)) + msg->status_buf_offset)
-
-/* get pointer to OOBD buffer with message pointer */
-#define MESSAGE_TO_OOBD_BUFFER(msg)				\
-	(((unsigned char *)(msg)) + msg->oob_data_offset)
-
-/* get pointer to data buffer with message pointer */
-#define MESSAGE_TO_DATA_BUFFER(msg)				\
-	(((unsigned char *)(msg)) + msg->per_pkt_info_offset)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg)		\
-	((void *) &rndis_msg->msg)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg)	\
-	((void *) rndis_msg)
-
-
-
 #define RNDIS_HEADER_SIZE	(sizeof(struct rndis_message) - \
 				 sizeof(union rndis_message_container))
 
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index bfc79698b8f4..17e529af79dc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,6 +31,7 @@
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 #include <linux/prefetch.h>
+#include <linux/reciprocal_div.h>
 
 #include <asm/sync_bitops.h>
 
@@ -72,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void)
 
 	init_waitqueue_head(&net_device->wait_drain);
 	net_device->destroy = false;
-	atomic_set(&net_device->open_cnt, 0);
+
 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
@@ -267,6 +268,11 @@ static int netvsc_init_buf(struct hv_device *device,
 	buf_size = device_info->recv_sections * device_info->recv_section_size;
 	buf_size = roundup(buf_size, PAGE_SIZE);
 
+	/* Legacy hosts only allow smaller receive buffer */
+	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+		buf_size = min_t(unsigned int, buf_size,
+				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
+
 	net_device->recv_buf = vzalloc(buf_size);
 	if (!net_device->recv_buf) {
 		netdev_err(ndev,
@@ -588,14 +594,11 @@ void netvsc_device_remove(struct hv_device *device)
  * Get the percentage of available bytes to write in the ring.
  * The return value is in range from 0 to 100.
  */
-static inline u32 hv_ringbuf_avail_percent(
-		struct hv_ring_buffer_info *ring_info)
+static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
 {
-	u32 avail_read, avail_write;
-
-	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+	u32 avail_write = hv_get_bytes_to_write(ring_info);
 
-	return avail_write * 100 / ring_info->ring_datasize;
+	return reciprocal_divide(avail_write  * 100, netvsc_ring_reciprocal);
 }
 
 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
@@ -698,26 +701,26 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 	return NETVSC_INVALID_INDEX;
 }
 
-static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
-				   unsigned int section_index,
-				   u32 pend_size,
-				   struct hv_netvsc_packet *packet,
-				   struct rndis_message *rndis_msg,
-				   struct hv_page_buffer *pb,
-				   struct sk_buff *skb)
+static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+				    unsigned int section_index,
+				    u32 pend_size,
+				    struct hv_netvsc_packet *packet,
+				    struct rndis_message *rndis_msg,
+				    struct hv_page_buffer *pb,
+				    bool xmit_more)
 {
 	char *start = net_device->send_buf;
 	char *dest = start + (section_index * net_device->send_section_size)
 		     + pend_size;
 	int i;
-	u32 msg_size = 0;
 	u32 padding = 0;
-	u32 remain = packet->total_data_buflen % net_device->pkt_align;
 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
 		packet->page_buf_cnt;
+	u32 remain;
 
 	/* Add padding */
-	if (skb->xmit_more && remain && !packet->cp_partial) {
+	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
+	if (xmit_more && remain) {
 		padding = net_device->pkt_align - remain;
 		rndis_msg->msg_len += padding;
 		packet->total_data_buflen += padding;
@@ -729,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 		u32 len = pb[i].len;
 
 		memcpy(dest, (src + offset), len);
-		msg_size += len;
 		dest += len;
 	}
 
-	if (padding) {
+	if (padding)
 		memset(dest, 0, padding);
-		msg_size += padding;
-	}
-
-	return msg_size;
 }
 
 static inline int netvsc_send_pkt(
@@ -831,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
 }
 
 /* RCU already held by caller */
-int netvsc_send(struct net_device_context *ndev_ctx,
+int netvsc_send(struct net_device *ndev,
 		struct hv_netvsc_packet *packet,
 		struct rndis_message *rndis_msg,
 		struct hv_page_buffer *pb,
 		struct sk_buff *skb)
 {
+	struct net_device_context *ndev_ctx = netdev_priv(ndev);
 	struct netvsc_device *net_device
 		= rcu_dereference_bh(ndev_ctx->nvdev);
 	struct hv_device *device = ndev_ctx->device_ctx;
@@ -847,8 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
 	struct multi_send_data *msdp;
 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
 	struct sk_buff *msd_skb = NULL;
-	bool try_batch;
-	bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
+	bool try_batch, xmit_more;
 
 	/* If device is rescinded, return error and packet will get dropped. */
 	if (unlikely(!net_device || net_device->destroy))
@@ -899,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
 		}
 	}
 
+	/* Keep aggregating only if stack says more data is coming
+	 * and not doing mixed modes send and not flow blocked
+	 */
+	xmit_more = skb->xmit_more &&
+		!packet->cp_partial &&
+		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
+
 	if (section_index != NETVSC_INVALID_INDEX) {
 		netvsc_copy_to_send_buf(net_device,
 					section_index, msd_len,
-					packet, rndis_msg, pb, skb);
+					packet, rndis_msg, pb, xmit_more);
 
 		packet->send_buf_index = section_index;
 
@@ -922,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
 		if (msdp->skb)
 			dev_consume_skb_any(msdp->skb);
 
-		if (xmit_more && !packet->cp_partial) {
+		if (xmit_more) {
 			msdp->skb = skb;
 			msdp->pkt = packet;
 			msdp->count++;
@@ -1085,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev,
 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
 
 		/* Pass it to the upper layer */
-		status = rndis_filter_receive(ndev, net_device, device,
+		status = rndis_filter_receive(ndev, net_device,
 					      channel, data, buflen);
 	}
 
@@ -1249,7 +1254,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 				const struct netvsc_device_info *device_info)
 {
 	int i, ret = 0;
-	int ring_size = device_info->ring_size;
 	struct netvsc_device *net_device;
 	struct net_device *ndev = hv_get_drvdata(device);
 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1261,8 +1265,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
 		net_device_ctx->tx_table[i] = 0;
 
-	net_device->ring_size = ring_size;
-
 	/* Because the device uses NAPI, all the interrupt batching and
 	 * control is done via Net softirq, not the channel handling
 	 */
@@ -1289,10 +1291,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 		       netvsc_poll, NAPI_POLL_WEIGHT);
 
 	/* Open the channel */
-	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
-			 ring_size * PAGE_SIZE, NULL, 0,
-			 netvsc_channel_cb,
-			 net_device->chan_table);
+	ret = vmbus_open(device->channel, netvsc_ring_bytes,
+			 netvsc_ring_bytes,  NULL, 0,
+			 netvsc_channel_cb, net_device->chan_table);
 
 	if (ret != 0) {
 		netif_napi_del(&net_device->chan_table[0].napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5129647d420c..c5584c2d440e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
 #include <linux/netpoll.h>
+#include <linux/reciprocal_div.h>
 
 #include <net/arp.h>
 #include <net/route.h>
@@ -46,17 +47,15 @@
 #include "hyperv_net.h"
 
 #define RING_SIZE_MIN		64
-#define NETVSC_MIN_TX_SECTIONS	10
-#define NETVSC_DEFAULT_TX	192	/* ~1M */
-#define NETVSC_MIN_RX_SECTIONS	10	/* ~64K */
-#define NETVSC_DEFAULT_RX	10485   /* Max ~16M */
 
 #define LINKCHANGE_INT (2 * HZ)
 #define VF_TAKEOVER_INT (HZ / 10)
 
-static int ring_size = 128;
-module_param(ring_size, int, S_IRUGO);
+static unsigned int ring_size __ro_after_init = 128;
+module_param(ring_size, uint, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+unsigned int netvsc_ring_bytes __ro_after_init;
+struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
 
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -174,17 +173,15 @@ out:
 	return ret;
 }
 
-static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
-			   int pkt_type)
+static inline void *init_ppi_data(struct rndis_message *msg,
+				  u32 ppi_size, u32 pkt_type)
 {
-	struct rndis_packet *rndis_pkt;
+	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
 	struct rndis_per_packet_info *ppi;
 
-	rndis_pkt = &msg->msg.pkt;
 	rndis_pkt->data_offset += ppi_size;
-
-	ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
-		rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+		+ rndis_pkt->per_pkt_info_len;
 
 	ppi->size = ppi_size;
 	ppi->type = pkt_type;
@@ -192,7 +189,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
 
 	rndis_pkt->per_pkt_info_len += ppi_size;
 
-	return ppi;
+	return ppi + 1;
 }
 
 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
@@ -469,10 +466,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 	int ret;
 	unsigned int num_data_pgs;
 	struct rndis_message *rndis_msg;
-	struct rndis_packet *rndis_pkt;
 	struct net_device *vf_netdev;
 	u32 rndis_msg_size;
-	struct rndis_per_packet_info *ppi;
 	u32 hash;
 	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
 
@@ -527,34 +522,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 
 	rndis_msg = (struct rndis_message *)skb->head;
 
-	memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
-
 	/* Add the rndis header */
 	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
 	rndis_msg->msg_len = packet->total_data_buflen;
-	rndis_pkt = &rndis_msg->msg.pkt;
-	rndis_pkt->data_offset = sizeof(struct rndis_packet);
-	rndis_pkt->data_len = packet->total_data_buflen;
-	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+	rndis_msg->msg.pkt = (struct rndis_packet) {
+		.data_offset = sizeof(struct rndis_packet),
+		.data_len = packet->total_data_buflen,
+		.per_pkt_info_offset = sizeof(struct rndis_packet),
+	};
 
 	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 
 	hash = skb_get_hash_raw(skb);
 	if (hash != 0 && net->real_num_tx_queues > 1) {
+		u32 *hash_info;
+
 		rndis_msg_size += NDIS_HASH_PPI_SIZE;
-		ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
-				    NBL_HASH_VALUE);
-		*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+					  NBL_HASH_VALUE);
+		*hash_info = hash;
 	}
 
 	if (skb_vlan_tag_present(skb)) {
 		struct ndis_pkt_8021q_info *vlan;
 
 		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
-		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
-				    IEEE_8021Q_INFO);
+		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+				     IEEE_8021Q_INFO);
 
-		vlan = (void *)ppi + ppi->ppi_offset;
+		vlan->value = 0;
 		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
 		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
 				VLAN_PRIO_SHIFT;
@@ -564,11 +561,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 		struct ndis_tcp_lso_info *lso_info;
 
 		rndis_msg_size += NDIS_LSO_PPI_SIZE;
-		ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
-				    TCP_LARGESEND_PKTINFO);
-
-		lso_info = (void *)ppi + ppi->ppi_offset;
+		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+					 TCP_LARGESEND_PKTINFO);
 
+		lso_info->value = 0;
 		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
 		if (skb->protocol == htons(ETH_P_IP)) {
 			lso_info->lso_v2_transmit.ip_version =
@@ -593,12 +589,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 			struct ndis_tcp_ip_checksum_info *csum_info;
 
 			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
-			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
-					    TCPIP_CHKSUM_PKTINFO);
-
-			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
-									 ppi->ppi_offset);
+			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+						  TCPIP_CHKSUM_PKTINFO);
 
+			csum_info->value = 0;
 			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
 
 			if (skb->protocol == htons(ETH_P_IP)) {
@@ -632,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 	/* timestamp packet in software */
 	skb_tx_timestamp(skb);
 
-	ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
+	ret = netvsc_send(net, packet, rndis_msg, pb, skb);
 	if (likely(ret == 0))
 		return NETDEV_TX_OK;
 
@@ -658,22 +652,14 @@ no_memory:
 /*
  * netvsc_linkstatus_callback - Link up/down notification
  */
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
 				struct rndis_message *resp)
 {
 	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
-	struct net_device *net;
-	struct net_device_context *ndev_ctx;
+	struct net_device_context *ndev_ctx = netdev_priv(net);
 	struct netvsc_reconfig *event;
 	unsigned long flags;
 
-	net = hv_get_drvdata(device_obj);
-
-	if (!net)
-		return;
-
-	ndev_ctx = netdev_priv(net);
-
 	/* Update the physical link speed when changing to another vSwitch */
 	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
 		u32 speed;
@@ -753,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
  * "wire" on the specified device.
  */
 int netvsc_recv_callback(struct net_device *net,
+			 struct netvsc_device *net_device,
 			 struct vmbus_channel *channel,
 			 void  *data, u32 len,
 			 const struct ndis_tcp_ip_checksum_info *csum_info,
 			 const struct ndis_pkt_8021q_info *vlan)
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
-	struct netvsc_device *net_device;
 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
-	struct netvsc_channel *nvchan;
+	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 	struct sk_buff *skb;
 	struct netvsc_stats *rx_stats;
 
 	if (net->reg_state != NETREG_REGISTERED)
 		return NVSP_STAT_FAIL;
 
-	rcu_read_lock();
-	net_device = rcu_dereference(net_device_ctx->nvdev);
-	if (unlikely(!net_device))
-		goto drop;
-
-	nvchan = &net_device->chan_table[q_idx];
-
 	/* Allocate a skb - TODO direct I/O to pages? */
 	skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
 				    csum_info, vlan, data, len);
 	if (unlikely(!skb)) {
-drop:
-		++net->stats.rx_dropped;
+		++net_device_ctx->eth_stats.rx_no_memory;
 		rcu_read_unlock();
 		return NVSP_STAT_FAIL;
 	}
@@ -804,8 +782,6 @@ drop:
 	u64_stats_update_end(&rx_stats->syncp);
 
 	napi_gro_receive(&nvchan->napi, skb);
-	rcu_read_unlock();
-
 	return 0;
 }
 
@@ -860,7 +836,6 @@ static int netvsc_set_channels(struct net_device *net,
 
 	memset(&device_info, 0, sizeof(device_info));
 	device_info.num_chn = count;
-	device_info.ring_size = ring_size;
 	device_info.send_sections = nvdev->send_section_cnt;
 	device_info.send_section_size = nvdev->send_section_size;
 	device_info.recv_sections = nvdev->recv_section_cnt;
@@ -975,7 +950,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 		rndis_filter_close(nvdev);
 
 	memset(&device_info, 0, sizeof(device_info));
-	device_info.ring_size = ring_size;
 	device_info.num_chn = nvdev->num_chn;
 	device_info.send_sections = nvdev->send_section_cnt;
 	device_info.send_section_size = nvdev->send_section_size;
@@ -1133,12 +1107,13 @@ static const struct {
 	u16 offset;
 } netvsc_stats[] = {
 	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
-	{ "tx_no_memory",  offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
+	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
 	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
 	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
 	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
 	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
 	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
+	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
 	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
 	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
 }, vf_stats[] = {
@@ -1539,7 +1514,6 @@ static int netvsc_set_ringparam(struct net_device *ndev,
 
 	memset(&device_info, 0, sizeof(device_info));
 	device_info.num_chn = nvdev->num_chn;
-	device_info.ring_size = ring_size;
 	device_info.send_sections = new_tx;
 	device_info.send_section_size = nvdev->send_section_size;
 	device_info.recv_sections = new_rx;
@@ -1995,7 +1969,6 @@ static int netvsc_probe(struct hv_device *dev,
 
 	/* Notify the netvsc driver of the new device */
 	memset(&device_info, 0, sizeof(device_info));
-	device_info.ring_size = ring_size;
 	device_info.num_chn = VRSS_CHANNEL_DEFAULT;
 	device_info.send_sections = NETVSC_DEFAULT_TX;
 	device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
@@ -2158,11 +2131,13 @@ static int __init netvsc_drv_init(void)
 
 	if (ring_size < RING_SIZE_MIN) {
 		ring_size = RING_SIZE_MIN;
-		pr_info("Increased ring_size to %d (min allowed)\n",
+		pr_info("Increased ring_size to %u (min allowed)\n",
 			ring_size);
 	}
-	ret = vmbus_driver_register(&netvsc_drv);
+	netvsc_ring_bytes = ring_size * PAGE_SIZE;
+	netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
 
+	ret = vmbus_driver_register(&netvsc_drv);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 7b637c7dd1e5..c3ca191fea7f 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev,
 	kfree(req);
 }
 
-static void dump_rndis_message(struct hv_device *hv_dev,
+static void dump_rndis_message(struct net_device *netdev,
 			       const struct rndis_message *rndis_msg)
 {
-	struct net_device *netdev = hv_get_drvdata(hv_dev);
-
 	switch (rndis_msg->ndis_msg_type) {
 	case RNDIS_MSG_PACKET:
 		netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
@@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
 	struct hv_netvsc_packet *packet;
 	struct hv_page_buffer page_buf[2];
 	struct hv_page_buffer *pb = page_buf;
-	struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
 	int ret;
 
 	/* Setup the packet to send it */
@@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
 	}
 
 	rcu_read_lock_bh();
-	ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
+	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
 	rcu_read_unlock_bh();
 
 	return ret;
@@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
 }
 
 static int rndis_filter_receive_data(struct net_device *ndev,
+				     struct netvsc_device *nvdev,
 				     struct rndis_device *dev,
 				     struct rndis_message *msg,
 				     struct vmbus_channel *channel,
@@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev,
 	 */
 	data = (void *)((unsigned long)data + data_offset);
 	csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
-	return netvsc_recv_callback(ndev, channel,
+
+	return netvsc_recv_callback(ndev, nvdev, channel,
 				    data, rndis_pkt->data_len,
 				    csum_info, vlan);
 }
 
 int rndis_filter_receive(struct net_device *ndev,
 			 struct netvsc_device *net_dev,
-			 struct hv_device *dev,
 			 struct vmbus_channel *channel,
 			 void *data, u32 buflen)
 {
@@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev,
 	}
 
 	if (netif_msg_rx_status(net_device_ctx))
-		dump_rndis_message(dev, rndis_msg);
+		dump_rndis_message(ndev, rndis_msg);
 
 	switch (rndis_msg->ndis_msg_type) {
 	case RNDIS_MSG_PACKET:
-		return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+		return rndis_filter_receive_data(ndev, net_dev,
+						 rndis_dev, rndis_msg,
 						 channel, data, buflen);
 	case RNDIS_MSG_INIT_C:
 	case RNDIS_MSG_QUERY_C:
@@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev,
 
 	case RNDIS_MSG_INDICATE:
 		/* notification msgs */
-		netvsc_linkstatus_callback(dev, rndis_msg);
+		netvsc_linkstatus_callback(ndev, rndis_msg);
 		break;
 	default:
 		netdev_err(ndev,
@@ -1040,8 +1039,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
 	/* Set the channel before opening.*/
 	nvchan->channel = new_sc;
 
-	ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
-			 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+	ret = vmbus_open(new_sc, netvsc_ring_bytes,
+			 netvsc_ring_bytes, NULL, 0,
 			 netvsc_channel_cb, nvchan);
 	if (ret == 0)
 		napi_enable(&nvchan->napi);
@@ -1222,7 +1221,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
 	struct ndis_recv_scale_cap rsscap;
 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
 	u32 mtu, size;
-	const struct cpumask *node_cpu_mask;
 	u32 num_possible_rss_qs;
 	int i, ret;
 
@@ -1291,14 +1289,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
 	if (ret || rsscap.num_recv_que < 2)
 		goto out;
 
-	/*
-	 * We will limit the VRSS channels to the number CPUs in the NUMA node
-	 * the primary channel is currently bound to.
-	 *
-	 * This also guarantees that num_possible_rss_qs <= num_online_cpus
-	 */
-	node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
-	num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
+	/* This guarantees that num_possible_rss_qs <= num_online_cpus */
+	num_possible_rss_qs = min_t(u32, num_online_cpus(),
 				    rsscap.num_recv_que);
 
 	net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
@@ -1362,9 +1354,6 @@ int rndis_filter_open(struct netvsc_device *nvdev)
 	if (!nvdev)
 		return -EINVAL;
 
-	if (atomic_inc_return(&nvdev->open_cnt) != 1)
-		return 0;
-
 	return rndis_filter_open_device(nvdev->extension);
 }
 
@@ -1373,13 +1362,12 @@ int rndis_filter_close(struct netvsc_device *nvdev)
 	if (!nvdev)
 		return -EINVAL;
 
-	if (atomic_dec_return(&nvdev->open_cnt) != 0)
-		return 0;
-
 	return rndis_filter_close_device(nvdev->extension);
 }
 
 bool rndis_filter_opened(const struct netvsc_device *nvdev)
 {
-	return atomic_read(&nvdev->open_cnt) > 0;
+	const struct rndis_device *dev = nvdev->extension;
+
+	return dev->state == RNDIS_DEV_DATAINITIALIZED;
 }
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 400fdbd3a120..64f1b1e77bc0 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1,7 +1,7 @@
 /*
  * Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver
  *
- * Copyright 2009-2015 Analog Devices Inc.
+ * Copyright 2009-2017 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  *
@@ -344,12 +344,18 @@ static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status,
 	return ret;
 }
 
-static int adf7242_wait_ready(struct adf7242_local *lp, int line)
+static int adf7242_wait_rc_ready(struct adf7242_local *lp, int line)
 {
 	return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY,
 				   STAT_RC_READY | STAT_SPI_READY, line);
 }
 
+static int adf7242_wait_spi_ready(struct adf7242_local *lp, int line)
+{
+	return adf7242_wait_status(lp, STAT_SPI_READY,
+				   STAT_SPI_READY, line);
+}
+
 static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
 {
 	u8 *buf = lp->buf;
@@ -369,7 +375,7 @@ static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
 	spi_message_add_tail(&xfer_head, &msg);
 	spi_message_add_tail(&xfer_buf, &msg);
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	buf[0] = CMD_SPI_PKT_WR;
@@ -401,7 +407,7 @@ static int adf7242_read_fbuf(struct adf7242_local *lp,
 	spi_message_add_tail(&xfer_head, &msg);
 	spi_message_add_tail(&xfer_buf, &msg);
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	if (packet_read) {
@@ -432,7 +438,7 @@ static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data)
 		.rx_buf = lp->buf_read_rx,
 	};
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr);
@@ -462,7 +468,7 @@ static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data)
 {
 	int status;
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr);
@@ -484,7 +490,7 @@ static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd)
 	dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd);
 
 	if (cmd != CMD_RC_PC_RESET_NO_WAIT)
-		adf7242_wait_ready(lp, __LINE__);
+		adf7242_wait_rc_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	lp->buf_cmd = cmd;
@@ -557,6 +563,22 @@ static int adf7242_verify_firmware(struct adf7242_local *lp,
 	return 0;
 }
 
+static void adf7242_clear_irqstat(struct adf7242_local *lp)
+{
+	adf7242_write_reg(lp, REG_IRQ1_SRC1, IRQ_CCA_COMPLETE | IRQ_SFD_RX |
+			  IRQ_SFD_TX | IRQ_RX_PKT_RCVD | IRQ_TX_PKT_SENT |
+			  IRQ_FRAME_VALID | IRQ_ADDRESS_VALID | IRQ_CSMA_CA);
+}
+
+static int adf7242_cmd_rx(struct adf7242_local *lp)
+{
+	/* Wait until the ACK is sent */
+	adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
+	adf7242_clear_irqstat(lp);
+
+	return adf7242_cmd(lp, CMD_RC_RX);
+}
+
 static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
 {
 	struct adf7242_local *lp = hw->priv;
@@ -660,7 +682,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
 	struct adf7242_local *lp = hw->priv;
 
 	adf7242_cmd(lp, CMD_RC_PHY_RDY);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 	enable_irq(lp->spi->irq);
 	set_bit(FLAG_START, &lp->flags);
 
@@ -671,10 +693,10 @@ static void adf7242_stop(struct ieee802154_hw *hw)
 {
 	struct adf7242_local *lp = hw->priv;
 
+	disable_irq(lp->spi->irq);
 	adf7242_cmd(lp, CMD_RC_IDLE);
 	clear_bit(FLAG_START, &lp->flags);
-	disable_irq(lp->spi->irq);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 }
 
 static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
@@ -789,9 +811,12 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 	struct adf7242_local *lp = hw->priv;
 	int ret;
 
+	/* ensure existing instances of the IRQ handler have completed */
+	disable_irq(lp->spi->irq);
 	set_bit(FLAG_XMIT, &lp->flags);
 	reinit_completion(&lp->tx_complete);
 	adf7242_cmd(lp, CMD_RC_PHY_RDY);
+	adf7242_clear_irqstat(lp);
 
 	ret = adf7242_write_fbuf(lp, skb->data, skb->len);
 	if (ret)
@@ -800,6 +825,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 	ret = adf7242_cmd(lp, CMD_RC_CSMACA);
 	if (ret)
 		goto err;
+	enable_irq(lp->spi->irq);
 
 	ret = wait_for_completion_interruptible_timeout(&lp->tx_complete,
 							HZ / 10);
@@ -822,7 +848,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 
 err:
 	clear_bit(FLAG_XMIT, &lp->flags);
-	adf7242_cmd(lp, CMD_RC_RX);
+	adf7242_cmd_rx(lp);
 
 	return ret;
 }
@@ -846,7 +872,7 @@ static int adf7242_rx(struct adf7242_local *lp)
 
 	skb = dev_alloc_skb(len);
 	if (!skb) {
-		adf7242_cmd(lp, CMD_RC_RX);
+		adf7242_cmd_rx(lp);
 		return -ENOMEM;
 	}
 
@@ -854,14 +880,14 @@ static int adf7242_rx(struct adf7242_local *lp)
 	ret = adf7242_read_fbuf(lp, data, len, true);
 	if (ret < 0) {
 		kfree_skb(skb);
-		adf7242_cmd(lp, CMD_RC_RX);
+		adf7242_cmd_rx(lp);
 		return ret;
 	}
 
 	lqi = data[len - 2];
 	lp->rssi = data[len - 1];
 
-	adf7242_cmd(lp, CMD_RC_RX);
+	ret = adf7242_cmd_rx(lp);
 
 	skb_trim(skb, len - 2);	/* Don't put RSSI/LQI or CRC into the frame */
 
@@ -870,7 +896,7 @@ static int adf7242_rx(struct adf7242_local *lp)
 	dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n",
 		__func__, ret, (int)len, (int)lqi, lp->rssi);
 
-	return 0;
+	return ret;
 }
 
 static const struct ieee802154_ops adf7242_ops = {
@@ -888,7 +914,7 @@ static const struct ieee802154_ops adf7242_ops = {
 	.set_cca_ed_level = adf7242_set_cca_ed_level,
 };
 
-static void adf7242_debug(u8 irq1)
+static void adf7242_debug(struct adf7242_local *lp, u8 irq1)
 {
 #ifdef DEBUG
 	u8 stat;
@@ -906,9 +932,12 @@ static void adf7242_debug(u8 irq1)
 		irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
 		irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
 
-	dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n",
+	dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n",
 		__func__, stat,
+		stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+		stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
 		stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+		stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
 		(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
 		(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
 		(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -923,20 +952,20 @@ static irqreturn_t adf7242_isr(int irq, void *data)
 	unsigned int xmit;
 	u8 irq1;
 
-	adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
-
 	adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1);
 
 	if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
 		dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n",
 			__func__, irq1);
 
-	adf7242_debug(irq1);
+	adf7242_debug(lp, irq1);
 
 	xmit = test_bit(FLAG_XMIT, &lp->flags);
 
 	if (xmit && (irq1 & IRQ_CSMA_CA)) {
+		adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+				    RC_STATUS_MASK, __LINE__);
+
 		if (ADF7242_REPORT_CSMA_CA_STAT) {
 			u8 astat;
 
@@ -957,6 +986,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
 			lp->tx_stat = SUCCESS;
 		}
 		complete(&lp->tx_complete);
+		adf7242_clear_irqstat(lp);
 	} else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) &&
 		   (irq1 & IRQ_FRAME_VALID)) {
 		adf7242_rx(lp);
@@ -965,16 +995,19 @@ static irqreturn_t adf7242_isr(int irq, void *data)
 		dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n",
 			__func__, __LINE__, irq1);
 		adf7242_cmd(lp, CMD_RC_PHY_RDY);
-		adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
-		adf7242_cmd(lp, CMD_RC_RX);
+		adf7242_cmd_rx(lp);
 	} else {
 		/* This can only be xmit without IRQ, likely a RX packet.
 		 * we get an TX IRQ shortly - do nothing or let the xmit
 		 * timeout handle this
 		 */
+
 		dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n",
 			__func__, __LINE__, irq1, xmit);
+		adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+				    RC_STATUS_MASK, __LINE__);
 		complete(&lp->tx_complete);
+		adf7242_clear_irqstat(lp);
 	}
 
 	return IRQ_HANDLED;
@@ -994,7 +1027,7 @@ static int adf7242_soft_reset(struct adf7242_local *lp, int line)
 	adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous);
 	adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be,
 				lp->max_cca_retries);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 
 	if (test_bit(FLAG_START, &lp->flags)) {
 		enable_irq(lp->spi->irq);
@@ -1060,7 +1093,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
 	adf7242_write_reg(lp, REG_IRQ1_EN0, 0);
 	adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA);
 
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 	adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF);
 
 	adf7242_cmd(lp, CMD_RC_IDLE);
@@ -1086,8 +1119,11 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
 		   irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
 		   irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
 
-	seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat,
+	seq_printf(file, "STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", stat,
+		   stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+		   stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
 		   stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+		   stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
 		   (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
 		   (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
 		   (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -1257,12 +1293,14 @@ static int adf7242_remove(struct spi_device *spi)
 
 static const struct of_device_id adf7242_of_match[] = {
 	{ .compatible = "adi,adf7242", },
+	{ .compatible = "adi,adf7241", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, adf7242_of_match);
 
 static const struct spi_device_id adf7242_device_id[] = {
 	{ .name = "adf7242", },
+	{ .name = "adf7241", },
 	{ },
 };
 MODULE_DEVICE_TABLE(spi, adf7242_device_id);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 77cc4fbaeace..c1f008fe4e1d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -315,13 +315,13 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
 
 		*pskb = skb;
 	}
-	ipvlan_skb_crossing_ns(skb, dev);
 
 	if (local) {
 		skb->pkt_type = PACKET_HOST;
 		if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
 			success = true;
 	} else {
+		skb->dev = dev;
 		ret = RX_HANDLER_ANOTHER;
 		success = true;
 	}
@@ -586,7 +586,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 		return NET_XMIT_SUCCESS;
 	}
 
-	ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
+	skb->dev = ipvlan->phy_dev;
 	return dev_queue_xmit(skb);
 }
 
@@ -664,8 +664,6 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
 	struct sk_buff *skb = *pskb;
 	struct ethhdr *eth = eth_hdr(skb);
 	rx_handler_result_t ret = RX_HANDLER_PASS;
-	void *lyr3h;
-	int addr_type;
 
 	if (is_multicast_ether_addr(eth->h_dest)) {
 		if (ipvlan_external_frame(skb, port)) {
@@ -683,15 +681,8 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
 			}
 		}
 	} else {
-		struct ipvl_addr *addr;
-
-		lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
-		if (!lyr3h)
-			return ret;
-
-		addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
-		if (addr)
-			ret = ipvlan_rcv_frame(addr, pskb, false);
+		/* Perform like l3 mode for non-multicast packet */
+		ret = ipvlan_handle_mode_l3(pskb, port);
 	}
 
 	return ret;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 30cb803e2fe5..2469df118fbf 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -850,6 +850,19 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
 	return ipvlan_del_addr(ipvlan, ip6_addr, true);
 }
 
+static bool ipvlan_is_valid_dev(const struct net_device *dev)
+{
+	struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+	if (!netif_is_ipvlan(dev))
+		return false;
+
+	if (!ipvlan || !ipvlan->port)
+		return false;
+
+	return true;
+}
+
 static int ipvlan_addr6_event(struct notifier_block *unused,
 			      unsigned long event, void *ptr)
 {
@@ -857,10 +870,7 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
 	struct net_device *dev = (struct net_device *)if6->idev->dev;
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
@@ -888,10 +898,7 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
 	if (in_softirq())
 		return NOTIFY_DONE;
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
@@ -932,10 +939,7 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 	struct in_addr ip4_addr;
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
@@ -961,10 +965,7 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
 	struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1d025ab9568f..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -393,7 +393,10 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 #define MACSEC_PORT_SCB (0x0000)
 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 
-#define DEFAULT_SAK_LEN 16
+#define MACSEC_GCM_AES_128_SAK_LEN 16
+#define MACSEC_GCM_AES_256_SAK_LEN 32
+
+#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 #define DEFAULT_SEND_SCI true
 #define DEFAULT_ENCRYPT false
 #define DEFAULT_ENCODING_SA 0
@@ -2362,15 +2365,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
 {
 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 	struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+	u64 csid;
 
 	if (!secy_nest)
 		return 1;
 
+	switch (secy->key_len) {
+	case MACSEC_GCM_AES_128_SAK_LEN:
+		csid = MACSEC_DEFAULT_CIPHER_ID;
+		break;
+	case MACSEC_GCM_AES_256_SAK_LEN:
+		csid = MACSEC_CIPHER_ID_GCM_AES_256;
+		break;
+	default:
+		goto cancel;
+	}
+
 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
 			MACSEC_SECY_ATTR_PAD) ||
 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
-			      MACSEC_DEFAULT_CIPHER_ID,
-			      MACSEC_SECY_ATTR_PAD) ||
+			      csid, MACSEC_SECY_ATTR_PAD) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -3015,8 +3029,8 @@ static void macsec_setup(struct net_device *dev)
 	eth_zero_addr(dev->broadcast);
 }
 
-static void macsec_changelink_common(struct net_device *dev,
-				     struct nlattr *data[])
+static int macsec_changelink_common(struct net_device *dev,
+				    struct nlattr *data[])
 {
 	struct macsec_secy *secy;
 	struct macsec_tx_sc *tx_sc;
@@ -3056,6 +3070,22 @@ static void macsec_changelink_common(struct net_device *dev,
 
 	if (data[IFLA_MACSEC_VALIDATION])
 		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
+
+	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
+		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
+		case MACSEC_CIPHER_ID_GCM_AES_128:
+		case MACSEC_DEFAULT_CIPHER_ID:
+			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+			break;
+		case MACSEC_CIPHER_ID_GCM_AES_256:
+			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
 }
 
 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3071,9 +3101,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 	    data[IFLA_MACSEC_PORT])
 		return -EINVAL;
 
-	macsec_changelink_common(dev, data);
-
-	return 0;
+	return macsec_changelink_common(dev, data);
 }
 
 static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3270,8 +3298,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 	if (err)
 		goto unlink;
 
-	if (data)
-		macsec_changelink_common(dev, data);
+	if (data) {
+		err = macsec_changelink_common(dev, data);
+		if (err)
+			goto del_dev;
+	}
 
 	err = register_macsec_dev(real_dev, dev);
 	if (err < 0)
@@ -3320,8 +3351,9 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
 	}
 
 	switch (csid) {
+	case MACSEC_CIPHER_ID_GCM_AES_128:
+	case MACSEC_CIPHER_ID_GCM_AES_256:
 	case MACSEC_DEFAULT_CIPHER_ID:
-	case MACSEC_DEFAULT_CIPHER_ALT:
 		if (icv_len < MACSEC_MIN_ICV_LEN ||
 		    icv_len > MACSEC_STD_ICV_LEN)
 			return -EINVAL;
@@ -3390,12 +3422,24 @@ static int macsec_fill_info(struct sk_buff *skb,
 {
 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+	u64 csid;
+
+	switch (secy->key_len) {
+	case MACSEC_GCM_AES_128_SAK_LEN:
+		csid = MACSEC_DEFAULT_CIPHER_ID;
+		break;
+	case MACSEC_GCM_AES_256_SAK_LEN:
+		csid = MACSEC_CIPHER_ID_GCM_AES_256;
+		break;
+	default:
+		goto nla_put_failure;
+	}
 
 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
 			IFLA_MACSEC_PAD) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
-			      MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
+			      csid, IFLA_MACSEC_PAD) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
new file mode 100644
index 000000000000..09388c06171d
--- /dev/null
+++ b/drivers/net/netdevsim/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NETDEVSIM) += netdevsim.o
+
+netdevsim-objs := \
+	netdev.o \
+
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+netdevsim-objs += \
+	bpf.o
+endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
new file mode 100644
index 000000000000..de73c1ff0939
--- /dev/null
+++ b/drivers/net/netdevsim/bpf.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+
+#include "netdevsim.h"
+
+#define pr_vlog(env, fmt, ...)	\
+	bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
+
+struct nsim_bpf_bound_prog {
+	struct netdevsim *ns;
+	struct bpf_prog *prog;
+	struct dentry *ddir;
+	const char *state;
+	bool is_loaded;
+	struct list_head l;
+};
+
+#define NSIM_BPF_MAX_KEYS		2
+
+struct nsim_bpf_bound_map {
+	struct netdevsim *ns;
+	struct bpf_offloaded_map *map;
+	struct mutex mutex;
+	struct nsim_map_entry {
+		void *key;
+		void *value;
+	} entry[NSIM_BPF_MAX_KEYS];
+	struct list_head l;
+};
+
+static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
+{
+	const char **str = file->private;
+
+	if (*str)
+		seq_printf(file, "%s\n", *str);
+
+	return 0;
+}
+
+static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private);
+}
+
+static const struct file_operations nsim_bpf_string_fops = {
+	.owner = THIS_MODULE,
+	.open = nsim_debugfs_bpf_string_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+static int
+nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	state = env->prog->aux->offload->dev_priv;
+	if (state->ns->bpf_bind_verifier_delay && !insn_idx)
+		msleep(state->ns->bpf_bind_verifier_delay);
+
+	if (insn_idx == env->prog->len - 1)
+		pr_vlog(env, "Hello from netdevsim!\n");
+
+	return 0;
+}
+
+static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
+	.insn_hook = nsim_bpf_verify_insn,
+};
+
+static bool nsim_xdp_offload_active(struct netdevsim *ns)
+{
+	return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+}
+
+static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	if (!prog || !prog->aux->offload)
+		return;
+
+	state = prog->aux->offload->dev_priv;
+	state->is_loaded = loaded;
+}
+
+static int
+nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
+{
+	nsim_prog_set_loaded(ns->bpf_offloaded, false);
+
+	WARN(!!ns->bpf_offloaded != oldprog,
+	     "bad offload state, expected offload %sto be active",
+	     oldprog ? "" : "not ");
+	ns->bpf_offloaded = prog;
+	ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
+	nsim_prog_set_loaded(prog, true);
+
+	return 0;
+}
+
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+			       void *type_data, void *cb_priv)
+{
+	struct tc_cls_bpf_offload *cls_bpf = type_data;
+	struct bpf_prog *prog = cls_bpf->prog;
+	struct netdevsim *ns = cb_priv;
+	struct bpf_prog *oldprog;
+
+	if (type != TC_SETUP_CLSBPF) {
+		NSIM_EA(cls_bpf->common.extack,
+			"only offload of BPF classifiers supported");
+		return -EOPNOTSUPP;
+	}
+
+	if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common))
+		return -EOPNOTSUPP;
+
+	if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+		NSIM_EA(cls_bpf->common.extack,
+			"only ETH_P_ALL supported as filter protocol");
+		return -EOPNOTSUPP;
+	}
+
+	if (!ns->bpf_tc_accept) {
+		NSIM_EA(cls_bpf->common.extack,
+			"netdevsim configured to reject BPF TC offload");
+		return -EOPNOTSUPP;
+	}
+	/* Note: progs without skip_sw will probably not be dev bound */
+	if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) {
+		NSIM_EA(cls_bpf->common.extack,
+			"netdevsim configured to reject unbound programs");
+		return -EOPNOTSUPP;
+	}
+
+	if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
+		return -EOPNOTSUPP;
+
+	oldprog = cls_bpf->oldprog;
+
+	/* Don't remove if oldprog doesn't match driver's state */
+	if (ns->bpf_offloaded != oldprog) {
+		oldprog = NULL;
+		if (!cls_bpf->prog)
+			return 0;
+		if (ns->bpf_offloaded) {
+			NSIM_EA(cls_bpf->common.extack,
+				"driver and netdev offload states mismatch");
+			return -EBUSY;
+		}
+	}
+
+	return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
+}
+
+int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+	if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
+		return -EBUSY;
+	return 0;
+}
+
+static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	if (!nsim_xdp_offload_active(ns) && !bpf->prog)
+		return 0;
+	if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
+		NSIM_EA(bpf->extack, "TC program is already loaded");
+		return -EBUSY;
+	}
+
+	return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
+}
+
+static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	int err;
+
+	if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
+		NSIM_EA(bpf->extack, "program loaded with different flags");
+		return -EBUSY;
+	}
+
+	if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
+		NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
+		return -EOPNOTSUPP;
+	}
+	if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
+		NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
+		return -EOPNOTSUPP;
+	}
+
+	if (bpf->command == XDP_SETUP_PROG_HW) {
+		err = nsim_xdp_offload_prog(ns, bpf);
+		if (err)
+			return err;
+	}
+
+	if (ns->xdp_prog)
+		bpf_prog_put(ns->xdp_prog);
+
+	ns->xdp_prog = bpf->prog;
+	ns->xdp_flags = bpf->flags;
+
+	if (!bpf->prog)
+		ns->xdp_prog_mode = XDP_ATTACHED_NONE;
+	else if (bpf->command == XDP_SETUP_PROG)
+		ns->xdp_prog_mode = XDP_ATTACHED_DRV;
+	else
+		ns->xdp_prog_mode = XDP_ATTACHED_HW;
+
+	return 0;
+}
+
+static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+{
+	struct nsim_bpf_bound_prog *state;
+	char name[16];
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->ns = ns;
+	state->prog = prog;
+	state->state = "verify";
+
+	/* Program id is not populated yet when we create the state. */
+	sprintf(name, "%u", ns->prog_id_gen++);
+	state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
+	if (IS_ERR_OR_NULL(state->ddir)) {
+		kfree(state);
+		return -ENOMEM;
+	}
+
+	debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
+	debugfs_create_file("state", 0400, state->ddir,
+			    &state->state, &nsim_bpf_string_fops);
+	debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+
+	list_add_tail(&state->l, &ns->bpf_bound_progs);
+
+	prog->aux->offload->dev_priv = state;
+
+	return 0;
+}
+
+static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	state = prog->aux->offload->dev_priv;
+	WARN(state->is_loaded,
+	     "offload state destroyed while program still bound");
+	debugfs_remove_recursive(state->ddir);
+	list_del(&state->l);
+	kfree(state);
+}
+
+static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	if (bpf->prog && bpf->prog->aux->offload) {
+		NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
+		return -EINVAL;
+	}
+	if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+		NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
+		return -EINVAL;
+	}
+	if (nsim_xdp_offload_active(ns)) {
+		NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int
+nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	if (!bpf->prog)
+		return 0;
+
+	if (!bpf->prog->aux->offload) {
+		NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+		return -EINVAL;
+	}
+	if (bpf->prog->aux->offload->netdev != ns->netdev) {
+		NSIM_EA(bpf->extack, "program bound to different dev");
+		return -EINVAL;
+	}
+
+	state = bpf->prog->aux->offload->dev_priv;
+	if (WARN_ON(strcmp(state->state, "xlated"))) {
+		NSIM_EA(bpf->extack, "offloading program in bad state");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static bool
+nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
+{
+	return e->key && !memcmp(key, e->key, map->key_size);
+}
+
+static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
+		if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
+			return i;
+
+	return -ENOENT;
+}
+
+static int
+nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+
+	nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+	if (!nmap->entry[idx].key)
+		return -ENOMEM;
+	nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+	if (!nmap->entry[idx].value) {
+		kfree(nmap->entry[idx].key);
+		nmap->entry[idx].key = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int
+nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
+		      void *key, void *next_key)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+	int idx = -ENOENT;
+
+	mutex_lock(&nmap->mutex);
+
+	if (key)
+		idx = nsim_map_key_find(offmap, key);
+	if (idx == -ENOENT)
+		idx = 0;
+	else
+		idx++;
+
+	for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
+		if (nmap->entry[idx].key) {
+			memcpy(next_key, nmap->entry[idx].key,
+			       offmap->map.key_size);
+			break;
+		}
+	}
+
+	mutex_unlock(&nmap->mutex);
+
+	if (idx == ARRAY_SIZE(nmap->entry))
+		return -ENOENT;
+	return 0;
+}
+
+static int
+nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+	int idx;
+
+	mutex_lock(&nmap->mutex);
+
+	idx = nsim_map_key_find(offmap, key);
+	if (idx >= 0)
+		memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
+
+	mutex_unlock(&nmap->mutex);
+
+	return idx < 0 ? idx : 0;
+}
+
+static int
+nsim_map_update_elem(struct bpf_offloaded_map *offmap,
+		     void *key, void *value, u64 flags)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+	int idx, err = 0;
+
+	mutex_lock(&nmap->mutex);
+
+	idx = nsim_map_key_find(offmap, key);
+	if (idx < 0 && flags == BPF_EXIST) {
+		err = idx;
+		goto exit_unlock;
+	}
+	if (idx >= 0 && flags == BPF_NOEXIST) {
+		err = -EEXIST;
+		goto exit_unlock;
+	}
+
+	if (idx < 0) {
+		for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
+			if (!nmap->entry[idx].key)
+				break;
+		if (idx == ARRAY_SIZE(nmap->entry)) {
+			err = -E2BIG;
+			goto exit_unlock;
+		}
+
+		err = nsim_map_alloc_elem(offmap, idx);
+		if (err)
+			goto exit_unlock;
+	}
+
+	memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
+	memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
+exit_unlock:
+	mutex_unlock(&nmap->mutex);
+
+	return err;
+}
+
+static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+	int idx;
+
+	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+		return -EINVAL;
+
+	mutex_lock(&nmap->mutex);
+
+	idx = nsim_map_key_find(offmap, key);
+	if (idx >= 0) {
+		kfree(nmap->entry[idx].key);
+		kfree(nmap->entry[idx].value);
+		memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
+	}
+
+	mutex_unlock(&nmap->mutex);
+
+	return idx < 0 ? idx : 0;
+}
+
+static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
+	.map_get_next_key	= nsim_map_get_next_key,
+	.map_lookup_elem	= nsim_map_lookup_elem,
+	.map_update_elem	= nsim_map_update_elem,
+	.map_delete_elem	= nsim_map_delete_elem,
+};
+
+static int
+nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
+{
+	struct nsim_bpf_bound_map *nmap;
+	unsigned int i;
+	int err;
+
+	if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
+		    offmap->map.map_type != BPF_MAP_TYPE_HASH))
+		return -EINVAL;
+	if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
+		return -ENOMEM;
+	if (offmap->map.map_flags)
+		return -EINVAL;
+
+	nmap = kzalloc(sizeof(*nmap), GFP_USER);
+	if (!nmap)
+		return -ENOMEM;
+
+	offmap->dev_priv = nmap;
+	nmap->ns = ns;
+	nmap->map = offmap;
+	mutex_init(&nmap->mutex);
+
+	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
+		for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+			u32 *key;
+
+			err = nsim_map_alloc_elem(offmap, i);
+			if (err)
+				goto err_free;
+			key = nmap->entry[i].key;
+			*key = i;
+		}
+	}
+
+	offmap->dev_ops = &nsim_bpf_map_ops;
+	list_add_tail(&nmap->l, &ns->bpf_bound_maps);
+
+	return 0;
+
+err_free:
+	while (--i) {
+		kfree(nmap->entry[i].key);
+		kfree(nmap->entry[i].value);
+	}
+	kfree(nmap);
+	return err;
+}
+
+static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
+{
+	struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+		kfree(nmap->entry[i].key);
+		kfree(nmap->entry[i].value);
+	}
+	list_del_init(&nmap->l);
+	mutex_destroy(&nmap->mutex);
+	kfree(nmap);
+}
+
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	struct nsim_bpf_bound_prog *state;
+	int err;
+
+	ASSERT_RTNL();
+
+	switch (bpf->command) {
+	case BPF_OFFLOAD_VERIFIER_PREP:
+		if (!ns->bpf_bind_accept)
+			return -EOPNOTSUPP;
+
+		err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
+		if (err)
+			return err;
+
+		bpf->verifier.ops = &nsim_bpf_analyzer_ops;
+		return 0;
+	case BPF_OFFLOAD_TRANSLATE:
+		state = bpf->offload.prog->aux->offload->dev_priv;
+
+		state->state = "xlated";
+		return 0;
+	case BPF_OFFLOAD_DESTROY:
+		nsim_bpf_destroy_prog(bpf->offload.prog);
+		return 0;
+	case XDP_QUERY_PROG:
+		bpf->prog_attached = ns->xdp_prog_mode;
+		bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
+		bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
+		return 0;
+	case XDP_SETUP_PROG:
+		err = nsim_setup_prog_checks(ns, bpf);
+		if (err)
+			return err;
+
+		return nsim_xdp_set_prog(ns, bpf);
+	case XDP_SETUP_PROG_HW:
+		err = nsim_setup_prog_hw_checks(ns, bpf);
+		if (err)
+			return err;
+
+		return nsim_xdp_set_prog(ns, bpf);
+	case BPF_OFFLOAD_MAP_ALLOC:
+		if (!ns->bpf_map_accept)
+			return -EOPNOTSUPP;
+
+		return nsim_bpf_map_alloc(ns, bpf->offmap);
+	case BPF_OFFLOAD_MAP_FREE:
+		nsim_bpf_map_free(bpf->offmap);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+	INIT_LIST_HEAD(&ns->bpf_bound_progs);
+	INIT_LIST_HEAD(&ns->bpf_bound_maps);
+
+	debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
+			   &ns->bpf_offloaded_id);
+
+	ns->bpf_bind_accept = true;
+	debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
+			    &ns->bpf_bind_accept);
+	debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
+			   &ns->bpf_bind_verifier_delay);
+	ns->ddir_bpf_bound_progs =
+		debugfs_create_dir("bpf_bound_progs", ns->ddir);
+	if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
+		return -ENOMEM;
+
+	ns->bpf_tc_accept = true;
+	debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
+			    &ns->bpf_tc_accept);
+	debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir,
+			    &ns->bpf_tc_non_bound_accept);
+	ns->bpf_xdpdrv_accept = true;
+	debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir,
+			    &ns->bpf_xdpdrv_accept);
+	ns->bpf_xdpoffload_accept = true;
+	debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
+			    &ns->bpf_xdpoffload_accept);
+
+	ns->bpf_map_accept = true;
+	debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
+			    &ns->bpf_map_accept);
+
+	return 0;
+}
+
+void nsim_bpf_uninit(struct netdevsim *ns)
+{
+	WARN_ON(!list_empty(&ns->bpf_bound_progs));
+	WARN_ON(!list_empty(&ns->bpf_bound_maps));
+	WARN_ON(ns->xdp_prog);
+	WARN_ON(ns->bpf_offloaded);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
new file mode 100644
index 000000000000..3fd567928f3d
--- /dev/null
+++ b/drivers/net/netdevsim/netdev.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/netlink.h>
+#include <net/pkt_cls.h>
+#include <net/rtnetlink.h>
+
+#include "netdevsim.h"
+
+struct nsim_vf_config {
+	int link_state;
+	u16 min_tx_rate;
+	u16 max_tx_rate;
+	u16 vlan;
+	__be16 vlan_proto;
+	u16 qos;
+	u8 vf_mac[ETH_ALEN];
+	bool spoofchk_enabled;
+	bool trusted;
+	bool rss_query_enabled;
+};
+
+static u32 nsim_dev_id;
+
+static int nsim_num_vf(struct device *dev)
+{
+	struct netdevsim *ns = to_nsim(dev);
+
+	return ns->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+	.name		= DRV_NAME,
+	.dev_name	= DRV_NAME,
+	.num_vf		= nsim_num_vf,
+};
+
+static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs)
+{
+	ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config),
+				GFP_KERNEL);
+	if (!ns->vfconfigs)
+		return -ENOMEM;
+	ns->num_vfs = num_vfs;
+
+	return 0;
+}
+
+static void nsim_vfs_disable(struct netdevsim *ns)
+{
+	kfree(ns->vfconfigs);
+	ns->vfconfigs = NULL;
+	ns->num_vfs = 0;
+}
+
+static ssize_t
+nsim_numvfs_store(struct device *dev, struct device_attribute *attr,
+		  const char *buf, size_t count)
+{
+	struct netdevsim *ns = to_nsim(dev);
+	unsigned int num_vfs;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &num_vfs);
+	if (ret)
+		return ret;
+
+	rtnl_lock();
+	if (ns->num_vfs == num_vfs)
+		goto exit_good;
+	if (ns->num_vfs && num_vfs) {
+		ret = -EBUSY;
+		goto exit_unlock;
+	}
+
+	if (num_vfs) {
+		ret = nsim_vfs_enable(ns, num_vfs);
+		if (ret)
+			goto exit_unlock;
+	} else {
+		nsim_vfs_disable(ns);
+	}
+exit_good:
+	ret = count;
+exit_unlock:
+	rtnl_unlock();
+
+	return ret;
+}
+
+static ssize_t
+nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct netdevsim *ns = to_nsim(dev);
+
+	return sprintf(buf, "%u\n", ns->num_vfs);
+}
+
+static struct device_attribute nsim_numvfs_attr =
+	__ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store);
+
+static struct attribute *nsim_dev_attrs[] = {
+	&nsim_numvfs_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group nsim_dev_attr_group = {
+	.attrs = nsim_dev_attrs,
+};
+
+static const struct attribute_group *nsim_dev_attr_groups[] = {
+	&nsim_dev_attr_group,
+	NULL,
+};
+
+static void nsim_dev_release(struct device *dev)
+{
+	struct netdevsim *ns = to_nsim(dev);
+
+	nsim_vfs_disable(ns);
+	free_netdev(ns->netdev);
+}
+
+static struct device_type nsim_dev_type = {
+	.groups = nsim_dev_attr_groups,
+	.release = nsim_dev_release,
+};
+
+static int nsim_init(struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	int err;
+
+	ns->netdev = dev;
+	ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
+	if (IS_ERR_OR_NULL(ns->ddir))
+		return -ENOMEM;
+
+	err = nsim_bpf_init(ns);
+	if (err)
+		goto err_debugfs_destroy;
+
+	ns->dev.id = nsim_dev_id++;
+	ns->dev.bus = &nsim_bus;
+	ns->dev.type = &nsim_dev_type;
+	err = device_register(&ns->dev);
+	if (err)
+		goto err_bpf_uninit;
+
+	SET_NETDEV_DEV(dev, &ns->dev);
+
+	return 0;
+
+err_bpf_uninit:
+	nsim_bpf_uninit(ns);
+err_debugfs_destroy:
+	debugfs_remove_recursive(ns->ddir);
+	return err;
+}
+
+static void nsim_uninit(struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	debugfs_remove_recursive(ns->ddir);
+	nsim_bpf_uninit(ns);
+}
+
+static void nsim_free(struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	device_unregister(&ns->dev);
+	/* netdev and vf state will be freed out of device_release() */
+}
+
+static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	u64_stats_update_begin(&ns->syncp);
+	ns->tx_packets++;
+	ns->tx_bytes += skb->len;
+	u64_stats_update_end(&ns->syncp);
+
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+static void nsim_set_rx_mode(struct net_device *dev)
+{
+}
+
+static int nsim_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
+	    new_mtu > NSIM_XDP_MAX_MTU)
+		return -EBUSY;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static void
+nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin(&ns->syncp);
+		stats->tx_bytes = ns->tx_bytes;
+		stats->tx_packets = ns->tx_packets;
+	} while (u64_stats_fetch_retry(&ns->syncp, start));
+}
+
+static int
+nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+	return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
+}
+
+static int
+nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+		return -EOPNOTSUPP;
+
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+		return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
+					     ns, ns);
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	/* Only refuse multicast addresses, zero address can mean unset/any. */
+	if (vf >= ns->num_vfs || is_multicast_ether_addr(mac))
+		return -EINVAL;
+	memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+
+	return 0;
+}
+
+static int nsim_set_vf_vlan(struct net_device *dev, int vf,
+			    u16 vlan, u8 qos, __be16 vlan_proto)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs || vlan > 4095 || qos > 7)
+		return -EINVAL;
+
+	ns->vfconfigs[vf].vlan = vlan;
+	ns->vfconfigs[vf].qos = qos;
+	ns->vfconfigs[vf].vlan_proto = vlan_proto;
+
+	return 0;
+}
+
+static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+
+	ns->vfconfigs[vf].min_tx_rate = min;
+	ns->vfconfigs[vf].max_tx_rate = max;
+
+	return 0;
+}
+
+static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+	ns->vfconfigs[vf].spoofchk_enabled = val;
+
+	return 0;
+}
+
+static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+	ns->vfconfigs[vf].rss_query_enabled = val;
+
+	return 0;
+}
+
+static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+	ns->vfconfigs[vf].trusted = val;
+
+	return 0;
+}
+
+static int
+nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+
+	ivi->vf = vf;
+	ivi->linkstate = ns->vfconfigs[vf].link_state;
+	ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate;
+	ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate;
+	ivi->vlan = ns->vfconfigs[vf].vlan;
+	ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto;
+	ivi->qos = ns->vfconfigs[vf].qos;
+	memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN);
+	ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled;
+	ivi->trusted = ns->vfconfigs[vf].trusted;
+	ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled;
+
+	return 0;
+}
+
+static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+
+	switch (state) {
+	case IFLA_VF_LINK_STATE_AUTO:
+	case IFLA_VF_LINK_STATE_ENABLE:
+	case IFLA_VF_LINK_STATE_DISABLE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ns->vfconfigs[vf].link_state = state;
+
+	return 0;
+}
+
+static int
+nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+	switch (type) {
+	case TC_SETUP_BLOCK:
+		return nsim_setup_tc_block(dev, type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+nsim_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
+		return nsim_bpf_disable_tc(ns);
+
+	return 0;
+}
+
+static const struct net_device_ops nsim_netdev_ops = {
+	.ndo_init		= nsim_init,
+	.ndo_uninit		= nsim_uninit,
+	.ndo_start_xmit		= nsim_start_xmit,
+	.ndo_set_rx_mode	= nsim_set_rx_mode,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= nsim_change_mtu,
+	.ndo_get_stats64	= nsim_get_stats64,
+	.ndo_set_vf_mac		= nsim_set_vf_mac,
+	.ndo_set_vf_vlan	= nsim_set_vf_vlan,
+	.ndo_set_vf_rate	= nsim_set_vf_rate,
+	.ndo_set_vf_spoofchk	= nsim_set_vf_spoofchk,
+	.ndo_set_vf_trust	= nsim_set_vf_trust,
+	.ndo_get_vf_config	= nsim_get_vf_config,
+	.ndo_set_vf_link_state	= nsim_set_vf_link_state,
+	.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
+	.ndo_setup_tc		= nsim_setup_tc,
+	.ndo_set_features	= nsim_set_features,
+	.ndo_bpf		= nsim_bpf,
+};
+
+static void nsim_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+	eth_hw_addr_random(dev);
+
+	dev->netdev_ops = &nsim_netdev_ops;
+	dev->priv_destructor = nsim_free;
+
+	dev->tx_queue_len = 0;
+	dev->flags |= IFF_NOARP;
+	dev->flags &= ~IFF_MULTICAST;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
+			   IFF_NO_QUEUE;
+	dev->features |= NETIF_F_HIGHDMA |
+			 NETIF_F_SG |
+			 NETIF_F_FRAGLIST |
+			 NETIF_F_HW_CSUM |
+			 NETIF_F_TSO;
+	dev->hw_features |= NETIF_F_HW_TC;
+	dev->max_mtu = ETH_MAX_MTU;
+}
+
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+			 struct netlink_ext_ack *extack)
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static struct rtnl_link_ops nsim_link_ops __read_mostly = {
+	.kind		= DRV_NAME,
+	.priv_size	= sizeof(struct netdevsim),
+	.setup		= nsim_setup,
+	.validate	= nsim_validate,
+};
+
+struct dentry *nsim_ddir;
+
+static int __init nsim_module_init(void)
+{
+	int err;
+
+	nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
+	if (IS_ERR_OR_NULL(nsim_ddir))
+		return -ENOMEM;
+
+	err = bus_register(&nsim_bus);
+	if (err)
+		goto err_debugfs_destroy;
+
+	err = rtnl_link_register(&nsim_link_ops);
+	if (err)
+		goto err_unreg_bus;
+
+	return 0;
+
+err_unreg_bus:
+	bus_unregister(&nsim_bus);
+err_debugfs_destroy:
+	debugfs_remove_recursive(nsim_ddir);
+	return err;
+}
+
+static void __exit nsim_module_exit(void)
+{
+	rtnl_link_unregister(&nsim_link_ops);
+	bus_unregister(&nsim_bus);
+	debugfs_remove_recursive(nsim_ddir);
+}
+
+module_init(nsim_module_init);
+module_exit(nsim_module_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
new file mode 100644
index 000000000000..ea081c10efb8
--- /dev/null
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+#define DRV_NAME	"netdevsim"
+
+#define NSIM_XDP_MAX_MTU	4000
+
+#define NSIM_EA(extack, msg)	NL_SET_ERR_MSG_MOD((extack), msg)
+
+struct bpf_prog;
+struct dentry;
+struct nsim_vf_config;
+
+struct netdevsim {
+	struct net_device *netdev;
+
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct u64_stats_sync syncp;
+
+	struct device dev;
+
+	struct dentry *ddir;
+
+	unsigned int num_vfs;
+	struct nsim_vf_config *vfconfigs;
+
+	struct bpf_prog	*bpf_offloaded;
+	u32 bpf_offloaded_id;
+
+	u32 xdp_flags;
+	int xdp_prog_mode;
+	struct bpf_prog	*xdp_prog;
+
+	u32 prog_id_gen;
+
+	bool bpf_bind_accept;
+	u32 bpf_bind_verifier_delay;
+	struct dentry *ddir_bpf_bound_progs;
+	struct list_head bpf_bound_progs;
+
+	bool bpf_tc_accept;
+	bool bpf_tc_non_bound_accept;
+	bool bpf_xdpdrv_accept;
+	bool bpf_xdpoffload_accept;
+
+	bool bpf_map_accept;
+	struct list_head bpf_bound_maps;
+};
+
+extern struct dentry *nsim_ddir;
+
+#ifdef CONFIG_BPF_SYSCALL
+int nsim_bpf_init(struct netdevsim *ns);
+void nsim_bpf_uninit(struct netdevsim *ns);
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int nsim_bpf_disable_tc(struct netdevsim *ns);
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+			       void *type_data, void *cb_priv);
+#else
+static inline int nsim_bpf_init(struct netdevsim *ns)
+{
+	return 0;
+}
+
+static inline void nsim_bpf_uninit(struct netdevsim *ns)
+{
+}
+
+static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+	return bpf->command == XDP_QUERY_PROG ? 0 : -EOPNOTSUPP;
+}
+
+static inline int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+	return 0;
+}
+
+static inline int
+nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+			   void *cb_priv)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
+static inline struct netdevsim *to_nsim(struct device *ptr)
+{
+	return container_of(ptr, struct netdevsim, dev);
+}
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 18141c022b13..6fe5dc9201d0 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -68,8 +68,6 @@ static struct phy_driver am79c_driver[] = { {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= am79c_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= am79c_ack_interrupt,
 	.config_intr	= am79c_config_intr,
 } };
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e911e4990b20..411cf1072bae 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -71,7 +71,6 @@ MODULE_LICENSE("GPL");
 
 struct at803x_priv {
 	bool phy_reset:1;
-	struct gpio_desc *gpiod_reset;
 };
 
 struct at803x_context {
@@ -216,56 +215,33 @@ static int at803x_suspend(struct phy_device *phydev)
 	int value;
 	int wol_enabled;
 
-	mutex_lock(&phydev->lock);
-
 	value = phy_read(phydev, AT803X_INTR_ENABLE);
 	wol_enabled = value & AT803X_INTR_ENABLE_WOL;
 
-	value = phy_read(phydev, MII_BMCR);
-
 	if (wol_enabled)
-		value |= BMCR_ISOLATE;
+		value = BMCR_ISOLATE;
 	else
-		value |= BMCR_PDOWN;
-
-	phy_write(phydev, MII_BMCR, value);
+		value = BMCR_PDOWN;
 
-	mutex_unlock(&phydev->lock);
+	phy_modify(phydev, MII_BMCR, 0, value);
 
 	return 0;
 }
 
 static int at803x_resume(struct phy_device *phydev)
 {
-	int value;
-
-	value = phy_read(phydev, MII_BMCR);
-	value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
-	phy_write(phydev, MII_BMCR, value);
-
-	return 0;
+	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
 }
 
 static int at803x_probe(struct phy_device *phydev)
 {
 	struct device *dev = &phydev->mdio.dev;
 	struct at803x_priv *priv;
-	struct gpio_desc *gpiod_reset;
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	if (phydev->drv->phy_id != ATH8030_PHY_ID)
-		goto does_not_require_reset_workaround;
-
-	gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
-	if (IS_ERR(gpiod_reset))
-		return PTR_ERR(gpiod_reset);
-
-	priv->gpiod_reset = gpiod_reset;
-
-does_not_require_reset_workaround:
 	phydev->priv = priv;
 
 	return 0;
@@ -339,14 +315,14 @@ static void at803x_link_change_notify(struct phy_device *phydev)
 	 * cannot recover from by software.
 	 */
 	if (phydev->state == PHY_NOLINK) {
-		if (priv->gpiod_reset && !priv->phy_reset) {
+		if (phydev->mdio.reset && !priv->phy_reset) {
 			struct at803x_context context;
 
 			at803x_context_save(phydev, &context);
 
-			gpiod_set_value(priv->gpiod_reset, 1);
+			phy_device_reset(phydev, 1);
 			msleep(1);
-			gpiod_set_value(priv->gpiod_reset, 0);
+			phy_device_reset(phydev, 0);
 			msleep(1);
 
 			at803x_context_restore(phydev, &context);
@@ -404,8 +380,6 @@ static struct phy_driver at803x_driver[] = {
 	.resume			= at803x_resume,
 	.features		= PHY_GBIT_FEATURES,
 	.flags			= PHY_HAS_INTERRUPT,
-	.config_aneg		= genphy_config_aneg,
-	.read_status		= genphy_read_status,
 	.ack_interrupt		= at803x_ack_interrupt,
 	.config_intr		= at803x_config_intr,
 }, {
@@ -422,8 +396,6 @@ static struct phy_driver at803x_driver[] = {
 	.resume			= at803x_resume,
 	.features		= PHY_BASIC_FEATURES,
 	.flags			= PHY_HAS_INTERRUPT,
-	.config_aneg		= genphy_config_aneg,
-	.read_status		= genphy_read_status,
 	.ack_interrupt		= at803x_ack_interrupt,
 	.config_intr		= at803x_config_intr,
 }, {
@@ -439,8 +411,6 @@ static struct phy_driver at803x_driver[] = {
 	.resume			= at803x_resume,
 	.features		= PHY_GBIT_FEATURES,
 	.flags			= PHY_HAS_INTERRUPT,
-	.config_aneg		= genphy_config_aneg,
-	.read_status		= genphy_read_status,
 	.aneg_done		= at803x_aneg_done,
 	.ack_interrupt		= &at803x_ack_interrupt,
 	.config_intr		= &at803x_config_intr,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 3fe8cc5c177e..6838129839ca 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -136,8 +136,6 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
 	.name          = "Broadcom Cygnus PHY",
 	.features      = PHY_GBIT_FEATURES,
 	.config_init   = bcm_cygnus_config_init,
-	.config_aneg   = genphy_config_aneg,
-	.read_status   = genphy_read_status,
 	.ack_interrupt = bcm_phy_ack_intr,
 	.config_intr   = bcm_phy_config_intr,
 	.suspend       = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index b0492ef2cdaa..cf14613745c9 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -69,8 +69,6 @@ static struct phy_driver bcm63xx_driver[] = {
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
 	.config_init	= bcm63xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm63xx_config_intr,
 }, {
@@ -81,8 +79,6 @@ static struct phy_driver bcm63xx_driver[] = {
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
 	.config_init	= bcm63xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm63xx_config_intr,
 } };
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8b33f688ac8a..421feb8f92fe 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -611,8 +611,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 	.features	= PHY_GBIT_FEATURES,				\
 	.flags		= PHY_IS_INTERNAL,				\
 	.config_init	= bcm7xxx_28nm_config_init,			\
-	.config_aneg	= genphy_config_aneg,				\
-	.read_status	= genphy_read_status,				\
 	.resume		= bcm7xxx_28nm_resume,				\
 	.get_tunable	= bcm7xxx_28nm_get_tunable,			\
 	.set_tunable	= bcm7xxx_28nm_set_tunable,			\
@@ -630,8 +628,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 	.features	= PHY_BASIC_FEATURES,				\
 	.flags		= PHY_IS_INTERNAL,				\
 	.config_init	= bcm7xxx_28nm_ephy_config_init,		\
-	.config_aneg	= genphy_config_aneg,				\
-	.read_status	= genphy_read_status,				\
 	.resume		= bcm7xxx_28nm_ephy_resume,			\
 	.get_sset_count	= bcm_phy_get_sset_count,			\
 	.get_strings	= bcm_phy_get_strings,				\
@@ -647,8 +643,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 	.features       = PHY_BASIC_FEATURES,				\
 	.flags          = PHY_IS_INTERNAL,				\
 	.config_init    = bcm7xxx_config_init,				\
-	.config_aneg    = genphy_config_aneg,				\
-	.read_status    = genphy_read_status,				\
 	.suspend        = bcm7xxx_suspend,				\
 	.resume         = bcm7xxx_config_init,				\
 }
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d7ed69deabfb..3bb6b66dc7bf 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -540,6 +540,37 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
 	return err;
 }
 
+struct bcm53xx_phy_priv {
+	u64	*stats;
+};
+
+static int bcm53xx_phy_probe(struct phy_device *phydev)
+{
+	struct bcm53xx_phy_priv *priv;
+
+	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	phydev->priv = priv;
+
+	priv->stats = devm_kcalloc(&phydev->mdio.dev,
+				   bcm_phy_get_sset_count(phydev), sizeof(u64),
+				   GFP_KERNEL);
+	if (!priv->stats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void bcm53xx_phy_get_stats(struct phy_device *phydev,
+				  struct ethtool_stats *stats, u64 *data)
+{
+	struct bcm53xx_phy_priv *priv = phydev->priv;
+
+	bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
 static struct phy_driver broadcom_drivers[] = {
 {
 	.phy_id		= PHY_ID_BCM5411,
@@ -548,8 +579,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -559,8 +588,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -570,8 +597,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -581,8 +606,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -592,8 +615,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -603,8 +624,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -614,8 +633,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -626,7 +643,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= bcm5481_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -637,7 +653,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = bcm54xx_config_init,
 	.config_aneg    = bcm5481_config_aneg,
-	.read_status    = genphy_read_status,
 	.ack_interrupt  = bcm_phy_ack_intr,
 	.config_intr    = bcm_phy_config_intr,
 }, {
@@ -647,7 +662,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm5482_config_init,
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= bcm5482_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -658,8 +672,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -669,8 +681,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -680,8 +690,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -691,8 +699,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= brcm_fet_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= brcm_fet_ack_interrupt,
 	.config_intr	= brcm_fet_config_intr,
 }, {
@@ -702,10 +708,18 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= brcm_fet_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= brcm_fet_ack_interrupt,
 	.config_intr	= brcm_fet_config_intr,
+}, {
+	.phy_id		= PHY_ID_BCM5395,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM5395",
+	.flags		= PHY_IS_INTERNAL,
+	.features	= PHY_GBIT_FEATURES,
+	.get_sset_count	= bcm_phy_get_sset_count,
+	.get_strings	= bcm_phy_get_strings,
+	.get_stats	= bcm53xx_phy_get_stats,
+	.probe		= bcm53xx_phy_probe,
 } };
 
 module_phy_driver(broadcom_drivers);
@@ -726,6 +740,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
 	{ PHY_ID_BCM57780, 0xfffffff0 },
 	{ PHY_ID_BCMAC131, 0xfffffff0 },
 	{ PHY_ID_BCM5241, 0xfffffff0 },
+	{ PHY_ID_BCM5395, 0xfffffff0 },
 	{ }
 };
 
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d339c1afea77..c05af00bf4b6 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -110,8 +110,6 @@ static struct phy_driver cis820x_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &cis820x_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &cis820x_ack_interrupt,
 	.config_intr	= &cis820x_config_intr,
 }, {
@@ -121,8 +119,6 @@ static struct phy_driver cis820x_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &cis820x_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &cis820x_ack_interrupt,
 	.config_intr	= &cis820x_config_intr,
 } };
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index e28913d9ea7e..5ee99b3b428c 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -153,7 +153,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 }, {
@@ -164,7 +163,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 }, {
@@ -175,7 +173,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 }, {
@@ -184,8 +181,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 } };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cbd629822f04..654f42d00092 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1502,8 +1502,6 @@ static struct phy_driver dp83640_driver = {
 	.probe		= dp83640_probe,
 	.remove		= dp83640_remove,
 	.config_init	= dp83640_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt  = dp83640_ack_interrupt,
 	.config_intr    = dp83640_config_intr,
 	.ts_info	= dp83640_ts_info,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 14335d14e9e4..6e8a2a4f3a6e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -325,8 +325,6 @@ static struct phy_driver dp83822_driver[] = {
 		.set_wol = dp83822_set_wol,
 		.ack_interrupt = dp83822_ack_interrupt,
 		.config_intr = dp83822_config_intr,
-		.config_aneg = genphy_config_aneg,
-		.read_status = genphy_read_status,
 		.suspend = dp83822_suspend,
 		.resume = dp83822_resume,
 	 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 3966d43c5146..cd09c3af2117 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -95,8 +95,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
 		.config_init	= genphy_config_init,		\
 		.suspend	= genphy_suspend,		\
 		.resume		= genphy_resume,		\
-		.config_aneg	= genphy_config_aneg,		\
-		.read_status	= genphy_read_status,		\
 								\
 		/* IRQ related */				\
 		.ack_interrupt	= dp83848_ack_interrupt,	\
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c1ab976cc800..ab58224f897f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -324,8 +324,6 @@ static struct phy_driver dp83867_driver[] = {
 		.ack_interrupt	= dp83867_ack_interrupt,
 		.config_intr	= dp83867_config_intr,
 
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
 	},
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index eb5167210681..001fe1df7557 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
-int fixed_phy_update_state(struct phy_device *phydev,
-			   const struct fixed_phy_status *status,
-			   const struct fixed_phy_status *changed)
-{
-	struct fixed_mdio_bus *fmb = &platform_fmb;
-	struct fixed_phy *fp;
-
-	if (!phydev || phydev->mdio.bus != fmb->mii_bus)
-		return -EINVAL;
-
-	list_for_each_entry(fp, &fmb->phys, node) {
-		if (fp->addr == phydev->mdio.addr) {
-			write_seqcount_begin(&fp->seqcount);
-#define _UPD(x) if (changed->x) \
-	fp->status.x = status->x
-			_UPD(link);
-			_UPD(speed);
-			_UPD(duplex);
-			_UPD(pause);
-			_UPD(asym_pause);
-#undef _UPD
-			fixed_phy_update(fp);
-			write_seqcount_end(&fp->seqcount);
-			return 0;
-		}
-	}
-
-	return -ENOENT;
-}
-EXPORT_SYMBOL(fixed_phy_update_state);
-
 int fixed_phy_add(unsigned int irq, int phy_addr,
 		  struct fixed_phy_status *status,
 		  int link_gpio)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 567280a72241..791587a49215 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -227,8 +227,6 @@ static struct phy_driver icplus_driver[] = {
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_GBIT_FEATURES,
 	.config_init	= &ip1001_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 }, {
@@ -239,8 +237,6 @@ static struct phy_driver icplus_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.ack_interrupt	= ip101a_g_ack_interrupt,
 	.config_init	= &ip101a_g_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 } };
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 55f8c52dd2f1..a11f80cb5388 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -243,7 +243,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -257,7 +256,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -271,7 +269,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -285,7 +282,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -298,8 +294,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -312,8 +306,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_BASIC_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -326,8 +318,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -340,8 +330,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_BASIC_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 09d215177fff..c14b254b2879 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -259,8 +259,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= lxt970_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= lxt970_ack_interrupt,
 	.config_intr	= lxt970_config_intr,
 }, {
@@ -269,8 +267,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= lxt971_ack_interrupt,
 	.config_intr	= lxt971_config_intr,
 }, {
@@ -290,7 +286,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.flags		= 0,
 	.probe		= lxt973_probe,
 	.config_aneg	= lxt973_config_aneg,
-	.read_status	= genphy_read_status,
 } };
 
 module_phy_driver(lxt97x_driver);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 82104edca393..22d9bc9c33a4 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -83,7 +83,7 @@
 #define MII_88E1121_PHY_MSCR_REG	21
 #define MII_88E1121_PHY_MSCR_RX_DELAY	BIT(5)
 #define MII_88E1121_PHY_MSCR_TX_DELAY	BIT(4)
-#define MII_88E1121_PHY_MSCR_DELAY_MASK	(~(BIT(5) | BIT(4)))
+#define MII_88E1121_PHY_MSCR_DELAY_MASK	(BIT(5) | BIT(4))
 
 #define MII_88E1121_MISC_TEST				0x1a
 #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK	0x1f00
@@ -96,6 +96,17 @@
 #define MII_88E1510_TEMP_SENSOR		0x1b
 #define MII_88E1510_TEMP_SENSOR_MASK	0xff
 
+#define MII_88E6390_MISC_TEST		0x1b
+#define MII_88E6390_MISC_TEST_SAMPLE_1S		0
+#define MII_88E6390_MISC_TEST_SAMPLE_10MS	BIT(14)
+#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE	BIT(15)
+#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE	0
+#define MII_88E6390_MISC_TEST_SAMPLE_MASK	(0x3 << 14)
+
+#define MII_88E6390_TEMP_SENSOR		0x1c
+#define MII_88E6390_TEMP_SENSOR_MASK	0xff
+#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
+
 #define MII_88E1318S_PHY_MSCR1_REG	16
 #define MII_88E1318S_PHY_MSCR1_PAD_ODD	BIT(6)
 
@@ -177,27 +188,19 @@ struct marvell_priv {
 	struct device *hwmon_dev;
 };
 
-static int marvell_get_page(struct phy_device *phydev)
+static int marvell_read_page(struct phy_device *phydev)
 {
-	return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+	return __phy_read(phydev, MII_MARVELL_PHY_PAGE);
 }
 
-static int marvell_set_page(struct phy_device *phydev, int page)
+static int marvell_write_page(struct phy_device *phydev, int page)
 {
-	return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
+	return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
 }
 
-static int marvell_get_set_page(struct phy_device *phydev, int page)
+static int marvell_set_page(struct phy_device *phydev, int page)
 {
-	int oldpage = marvell_get_page(phydev);
-
-	if (oldpage < 0)
-		return oldpage;
-
-	if (page != oldpage)
-		return marvell_set_page(phydev, page);
-
-	return 0;
+	return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
 }
 
 static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -399,7 +402,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
 static int marvell_of_reg_init(struct phy_device *phydev)
 {
 	const __be32 *paddr;
-	int len, i, saved_page, current_page, ret;
+	int len, i, saved_page, current_page, ret = 0;
 
 	if (!phydev->mdio.dev.of_node)
 		return 0;
@@ -409,12 +412,11 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 	if (!paddr || len < (4 * sizeof(*paddr)))
 		return 0;
 
-	saved_page = marvell_get_page(phydev);
+	saved_page = phy_save_page(phydev);
 	if (saved_page < 0)
-		return saved_page;
+		goto err;
 	current_page = saved_page;
 
-	ret = 0;
 	len /= sizeof(*paddr);
 	for (i = 0; i < len - 3; i += 4) {
 		u16 page = be32_to_cpup(paddr + i);
@@ -425,14 +427,14 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 
 		if (page != current_page) {
 			current_page = page;
-			ret = marvell_set_page(phydev, page);
+			ret = marvell_write_page(phydev, page);
 			if (ret < 0)
 				goto err;
 		}
 
 		val = 0;
 		if (mask) {
-			val = phy_read(phydev, reg);
+			val = __phy_read(phydev, reg);
 			if (val < 0) {
 				ret = val;
 				goto err;
@@ -441,17 +443,12 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 		}
 		val |= val_bits;
 
-		ret = phy_write(phydev, reg, val);
+		ret = __phy_write(phydev, reg, val);
 		if (ret < 0)
 			goto err;
 	}
 err:
-	if (current_page != saved_page) {
-		i = marvell_set_page(phydev, saved_page);
-		if (ret == 0)
-			ret = i;
-	}
-	return ret;
+	return phy_restore_page(phydev, saved_page, ret);
 }
 #else
 static int marvell_of_reg_init(struct phy_device *phydev)
@@ -462,34 +459,21 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 
 static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
 {
-	int err, oldpage, mscr;
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
-	if (oldpage < 0)
-		return oldpage;
-
-	mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG);
-	if (mscr < 0) {
-		err = mscr;
-		goto out;
-	}
-
-	mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK;
+	int mscr;
 
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
-		mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
-			 MII_88E1121_PHY_MSCR_TX_DELAY);
+		mscr = MII_88E1121_PHY_MSCR_RX_DELAY |
+		       MII_88E1121_PHY_MSCR_TX_DELAY;
 	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
-		mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+		mscr = MII_88E1121_PHY_MSCR_RX_DELAY;
 	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
-		mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
-
-	err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
-
-out:
-	marvell_set_page(phydev, oldpage);
+		mscr = MII_88E1121_PHY_MSCR_TX_DELAY;
+	else
+		mscr = 0;
 
-	return err;
+	return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+				MII_88E1121_PHY_MSCR_REG,
+				MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
 }
 
 static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -498,7 +482,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
 
 	if (phy_interface_is_rgmii(phydev)) {
 		err = m88e1121_config_aneg_rgmii_delays(phydev);
-		if (err)
+		if (err < 0)
 			return err;
 	}
 
@@ -515,20 +499,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
 
 static int m88e1318_config_aneg(struct phy_device *phydev)
 {
-	int err, oldpage, mscr;
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
-	if (oldpage < 0)
-		return oldpage;
-
-	mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
-	mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
-
-	err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
-	if (err < 0)
-		return err;
+	int err;
 
-	err = marvell_set_page(phydev, oldpage);
+	err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+			       MII_88E1318S_PHY_MSCR1_REG,
+			       0, MII_88E1318S_PHY_MSCR1_PAD_ODD);
 	if (err < 0)
 		return err;
 
@@ -700,19 +675,14 @@ static int m88e1116r_config_init(struct phy_device *phydev)
 
 static int m88e3016_config_init(struct phy_device *phydev)
 {
-	int reg;
+	int ret;
 
 	/* Enable Scrambler and Auto-Crossover */
-	reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
-	if (reg < 0)
-		return reg;
-
-	reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
-	reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
-
-	reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
-	if (reg < 0)
-		return reg;
+	ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL,
+			 MII_88E3016_DISABLE_SCRAMBLER,
+			 MII_88E3016_AUTO_MDIX_CROSSOVER);
+	if (ret < 0)
+		return ret;
 
 	return marvell_config_init(phydev);
 }
@@ -721,42 +691,33 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev,
 					   u16 mode,
 					   int fibre_copper_auto)
 {
-	int temp;
-
-	temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
-	if (temp < 0)
-		return temp;
-
-	temp &= ~(MII_M1111_HWCFG_MODE_MASK |
-		  MII_M1111_HWCFG_FIBER_COPPER_AUTO |
-		  MII_M1111_HWCFG_FIBER_COPPER_RES);
-	temp |= mode;
-
 	if (fibre_copper_auto)
-		temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+		mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
 
-	return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+	return phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+			  MII_M1111_HWCFG_MODE_MASK |
+			  MII_M1111_HWCFG_FIBER_COPPER_AUTO |
+			  MII_M1111_HWCFG_FIBER_COPPER_RES,
+			  mode);
 }
 
 static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
 {
-	int temp;
-
-	temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
-	if (temp < 0)
-		return temp;
+	int delay;
 
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
-		temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY);
+		delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
 	} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
-		temp &= ~MII_M1111_RGMII_TX_DELAY;
-		temp |= MII_M1111_RGMII_RX_DELAY;
+		delay = MII_M1111_RGMII_RX_DELAY;
 	} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
-		temp &= ~MII_M1111_RGMII_RX_DELAY;
-		temp |= MII_M1111_RGMII_TX_DELAY;
+		delay = MII_M1111_RGMII_TX_DELAY;
+	} else {
+		delay = 0;
 	}
 
-	return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+	return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+			  MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY,
+			  delay);
 }
 
 static int m88e1111_config_init_rgmii(struct phy_device *phydev)
@@ -802,7 +763,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
 	int err;
 
 	err = m88e1111_config_init_rgmii_delays(phydev);
-	if (err)
+	if (err < 0)
 		return err;
 
 	err = m88e1111_config_init_hwcfg_mode(
@@ -829,7 +790,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
 
 	if (phy_interface_is_rgmii(phydev)) {
 		err = m88e1111_config_init_rgmii(phydev);
-		if (err)
+		if (err < 0)
 			return err;
 	}
 
@@ -854,20 +815,15 @@ static int m88e1111_config_init(struct phy_device *phydev)
 
 static int m88e1121_config_init(struct phy_device *phydev)
 {
-	int err, oldpage;
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
-	if (oldpage < 0)
-		return oldpage;
+	int err;
 
 	/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
-	err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
-			MII_88E1121_PHY_LED_DEF);
+	err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+			      MII_88E1121_PHY_LED_CTRL,
+			      MII_88E1121_PHY_LED_DEF);
 	if (err < 0)
 		return err;
 
-	marvell_set_page(phydev, oldpage);
-
 	/* Set marvell,reg-init configuration from device tree */
 	return marvell_config_init(phydev);
 }
@@ -875,7 +831,6 @@ static int m88e1121_config_init(struct phy_device *phydev)
 static int m88e1510_config_init(struct phy_device *phydev)
 {
 	int err;
-	int temp;
 
 	/* SGMII-to-Copper mode initialization */
 	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
@@ -887,16 +842,15 @@ static int m88e1510_config_init(struct phy_device *phydev)
 			return err;
 
 		/* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
-		temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1);
-		temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK;
-		temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII;
-		err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+		err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+				 MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+				 MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII);
 		if (err < 0)
 			return err;
 
 		/* PHY reset is necessary after changing MODE[2:0] */
-		temp |= MII_88E1510_GEN_CTRL_REG_1_RESET;
-		err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+		err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0,
+				 MII_88E1510_GEN_CTRL_REG_1_RESET);
 		if (err < 0)
 			return err;
 
@@ -1002,7 +956,6 @@ static int m88e1149_config_init(struct phy_device *phydev)
 
 static int m88e1145_config_init_rgmii(struct phy_device *phydev)
 {
-	int temp;
 	int err;
 
 	err = m88e1111_config_init_rgmii_delays(phydev);
@@ -1014,15 +967,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev)
 		if (err < 0)
 			return err;
 
-		temp = phy_read(phydev, 0x1e);
-		if (temp < 0)
-			return temp;
-
-		temp &= 0xf03f;
-		temp |= 2 << 9;	/* 36 ohm */
-		temp |= 2 << 6;	/* 39 ohm */
-
-		err = phy_write(phydev, 0x1e, temp);
+		err = phy_modify(phydev, 0x1e, 0x0fc0,
+				 2 << 9 | /* 36 ohm */
+				 2 << 6); /* 39 ohm */
 		if (err < 0)
 			return err;
 
@@ -1398,100 +1345,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
 static void m88e1318_get_wol(struct phy_device *phydev,
 			     struct ethtool_wolinfo *wol)
 {
+	int oldpage, ret = 0;
+
 	wol->supported = WAKE_MAGIC;
 	wol->wolopts = 0;
 
-	if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
-		return;
+	oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
+	if (oldpage < 0)
+		goto error;
 
-	if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
-	    MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+	ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+	if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
 		wol->wolopts |= WAKE_MAGIC;
 
-	if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
-		return;
+error:
+	phy_restore_page(phydev, oldpage, ret);
 }
 
 static int m88e1318_set_wol(struct phy_device *phydev,
 			    struct ethtool_wolinfo *wol)
 {
-	int err, oldpage, temp;
+	int err = 0, oldpage;
 
-	oldpage = marvell_get_page(phydev);
+	oldpage = phy_save_page(phydev);
+	if (oldpage < 0)
+		goto error;
 
 	if (wol->wolopts & WAKE_MAGIC) {
 		/* Explicitly switch to page 0x00, just to be sure */
-		err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Enable the WOL interrupt */
-		temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
-		temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
-		err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
+				   MII_88E1318S_PHY_CSIER_WOL_EIE);
 		if (err < 0)
-			return err;
+			goto error;
 
-		err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Setup LED[2] as interrupt pin (active low) */
-		temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
-		temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
-		temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
-		temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
-		err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR,
+				   MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+				   MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+				   MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
 		if (err < 0)
-			return err;
+			goto error;
 
-		err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Store the device address for the magic packet */
-		err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+		err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
 				((phydev->attached_dev->dev_addr[5] << 8) |
 				 phydev->attached_dev->dev_addr[4]));
 		if (err < 0)
-			return err;
-		err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+			goto error;
+		err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
 				((phydev->attached_dev->dev_addr[3] << 8) |
 				 phydev->attached_dev->dev_addr[2]));
 		if (err < 0)
-			return err;
-		err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+			goto error;
+		err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
 				((phydev->attached_dev->dev_addr[1] << 8) |
 				 phydev->attached_dev->dev_addr[0]));
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Clear WOL status and enable magic packet matching */
-		temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
-		temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
-		temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
-		err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
+				   MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+				   MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
 		if (err < 0)
-			return err;
+			goto error;
 	} else {
-		err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Clear WOL status and disable magic packet matching */
-		temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
-		temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
-		temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
-		err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
+				   MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
+				   MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
 		if (err < 0)
-			return err;
+			goto error;
 	}
 
-	err = marvell_set_page(phydev, oldpage);
-	if (err < 0)
-		return err;
-
-	return 0;
+error:
+	return phy_restore_page(phydev, oldpage, err);
 }
 
 static int marvell_get_sset_count(struct phy_device *phydev)
@@ -1519,14 +1464,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 {
 	struct marvell_hw_stat stat = marvell_hw_stats[i];
 	struct marvell_priv *priv = phydev->priv;
-	int oldpage, val;
+	int val;
 	u64 ret;
 
-	oldpage = marvell_get_set_page(phydev, stat.page);
-	if (oldpage < 0)
-		return UINT64_MAX;
-
-	val = phy_read(phydev, stat.reg);
+	val = phy_read_paged(phydev, stat.page, stat.reg);
 	if (val < 0) {
 		ret = UINT64_MAX;
 	} else {
@@ -1535,8 +1476,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 		ret = priv->stats[i];
 	}
 
-	marvell_set_page(phydev, oldpage);
-
 	return ret;
 }
 
@@ -1553,51 +1492,44 @@ static void marvell_get_stats(struct phy_device *phydev,
 static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
 {
 	int oldpage;
-	int ret;
+	int ret = 0;
 	int val;
 
 	*temp = 0;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
+	oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+	if (oldpage < 0)
+		goto error;
 
 	/* Enable temperature sensor */
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+	ret = __phy_read(phydev, MII_88E1121_MISC_TEST);
 	if (ret < 0)
 		goto error;
 
-	ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-			ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+	ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+			  ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
 	if (ret < 0)
 		goto error;
 
 	/* Wait for temperature to stabilize */
 	usleep_range(10000, 12000);
 
-	val = phy_read(phydev, MII_88E1121_MISC_TEST);
+	val = __phy_read(phydev, MII_88E1121_MISC_TEST);
 	if (val < 0) {
 		ret = val;
 		goto error;
 	}
 
 	/* Disable temperature sensor */
-	ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-			ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+	ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+			  ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
 	if (ret < 0)
 		goto error;
 
 	*temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
 
 error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return phy_restore_page(phydev, oldpage, ret);
 }
 
 static int m88e1121_hwmon_read(struct device *dev,
@@ -1671,118 +1603,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
 
 static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
 {
-	int oldpage;
 	int ret;
 
 	*temp = 0;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+	ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+			     MII_88E1510_TEMP_SENSOR);
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	*temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return 0;
 }
 
 static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
 {
-	int oldpage;
 	int ret;
 
 	*temp = 0;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+	ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+			     MII_88E1121_MISC_TEST);
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	*temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
 		  MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
 	/* convert to mC */
 	*temp *= 1000;
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return 0;
 }
 
 static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
 {
-	int oldpage;
-	int ret;
-
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
-	if (ret < 0)
-		goto error;
-
 	temp = temp / 1000;
 	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-	ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-			(ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
-			(temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
-
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
 
-	return ret;
+	return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+				MII_88E1121_MISC_TEST,
+				MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK,
+				temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT);
 }
 
 static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
 {
-	int oldpage;
 	int ret;
 
 	*alarm = false;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+	ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+			     MII_88E1121_MISC_TEST);
 	if (ret < 0)
-		goto error;
-	*alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+		return ret;
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
+	*alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
 
-	return ret;
+	return 0;
 }
 
 static int m88e1510_hwmon_read(struct device *dev,
@@ -1871,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
 	.info = m88e1510_hwmon_info,
 };
 
+static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
+{
+	int sum = 0;
+	int oldpage;
+	int ret = 0;
+	int i;
+
+	*temp = 0;
+
+	oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+	if (oldpage < 0)
+		goto error;
+
+	/* Enable temperature sensor */
+	ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+	if (ret < 0)
+		goto error;
+
+	ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+	ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE |
+		MII_88E6390_MISC_TEST_SAMPLE_1S;
+
+	ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+	if (ret < 0)
+		goto error;
+
+	/* Wait for temperature to stabilize */
+	usleep_range(10000, 12000);
+
+	/* Reading the temperature sense has an errata. You need to read
+	 * a number of times and take an average.
+	 */
+	for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) {
+		ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR);
+		if (ret < 0)
+			goto error;
+		sum += ret & MII_88E6390_TEMP_SENSOR_MASK;
+	}
+
+	sum /= MII_88E6390_TEMP_SENSOR_SAMPLES;
+	*temp = (sum  - 75) * 1000;
+
+	/* Disable temperature sensor */
+	ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+	if (ret < 0)
+		goto error;
+
+	ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+	ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE;
+
+	ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+
+error:
+	phy_restore_page(phydev, oldpage, ret);
+
+	return ret;
+}
+
+static int m88e6390_hwmon_read(struct device *dev,
+			       enum hwmon_sensor_types type,
+			       u32 attr, int channel, long *temp)
+{
+	struct phy_device *phydev = dev_get_drvdata(dev);
+	int err;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		err = m88e6390_get_temp(phydev, temp);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return err;
+}
+
+static umode_t m88e6390_hwmon_is_visible(const void *data,
+					 enum hwmon_sensor_types type,
+					 u32 attr, int channel)
+{
+	if (type != hwmon_temp)
+		return 0;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		return 0444;
+	default:
+		return 0;
+	}
+}
+
+static u32 m88e6390_hwmon_temp_config[] = {
+	HWMON_T_INPUT,
+	0
+};
+
+static const struct hwmon_channel_info m88e6390_hwmon_temp = {
+	.type = hwmon_temp,
+	.config = m88e6390_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e6390_hwmon_info[] = {
+	&m88e1121_hwmon_chip,
+	&m88e6390_hwmon_temp,
+	NULL
+};
+
+static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = {
+	.is_visible = m88e6390_hwmon_is_visible,
+	.read = m88e6390_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e6390_hwmon_chip_info = {
+	.ops = &m88e6390_hwmon_hwmon_ops,
+	.info = m88e6390_hwmon_info,
+};
+
 static int marvell_hwmon_name(struct phy_device *phydev)
 {
 	struct marvell_priv *priv = phydev->priv;
@@ -1917,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
 {
 	return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
 }
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+	return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info);
+}
 #else
 static int m88e1121_hwmon_probe(struct phy_device *phydev)
 {
@@ -1927,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
 {
 	return 0;
 }
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+	return 0;
+}
 #endif
 
 static int marvell_probe(struct phy_device *phydev)
@@ -1964,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev)
 	return m88e1510_hwmon_probe(phydev);
 }
 
+static int m88e6390_probe(struct phy_device *phydev)
+{
+	int err;
+
+	err = marvell_probe(phydev);
+	if (err)
+		return err;
+
+	return m88e6390_hwmon_probe(phydev);
+}
+
 static struct phy_driver marvell_drivers[] = {
 	{
 		.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,11 +1990,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1101_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -1992,11 +2009,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2015,6 +2033,8 @@ static struct phy_driver marvell_drivers[] = {
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2028,11 +2048,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1118_config_init,
 		.config_aneg = &m88e1118_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2052,6 +2073,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2073,6 +2096,8 @@ static struct phy_driver marvell_drivers[] = {
 		.set_wol = &m88e1318_set_wol,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2091,6 +2116,8 @@ static struct phy_driver marvell_drivers[] = {
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2104,11 +2131,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1149_config_init,
 		.config_aneg = &m88e1118_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2122,11 +2150,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2139,12 +2168,12 @@ static struct phy_driver marvell_drivers[] = {
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1116r_config_init,
-		.config_aneg = &genphy_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2166,6 +2195,8 @@ static struct phy_driver marvell_drivers[] = {
 		.set_wol = &m88e1318_set_wol,
 		.resume = &marvell_resume,
 		.suspend = &marvell_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2186,6 +2217,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2205,6 +2238,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2216,7 +2251,6 @@ static struct phy_driver marvell_drivers[] = {
 		.features = PHY_BASIC_FEATURES,
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
-		.config_aneg = &genphy_config_aneg,
 		.config_init = &m88e3016_config_init,
 		.aneg_done = &marvell_aneg_done,
 		.read_status = &marvell_read_status,
@@ -2225,6 +2259,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2235,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = {
 		.name = "Marvell 88E6390",
 		.features = PHY_GBIT_FEATURES,
 		.flags = PHY_HAS_INTERRUPT,
-		.probe = m88e1510_probe,
+		.probe = m88e6390_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1510_config_aneg,
 		.read_status = &marvell_read_status,
@@ -2244,6 +2280,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 21b3f36e023a..8a0bd98fdec7 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -6,12 +6,18 @@
  *
  * There appears to be several different data paths through the PHY which
  * are automatically managed by the PHY.  The following has been determined
- * via observation and experimentation:
+ * via observation and experimentation for a setup using single-lane Serdes:
  *
  *       SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G)
  *  10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G)
  *  10GBASE-KR PHYXS -- BASE-R PCS -- Fiber
  *
+ * With XAUI, observation shows:
+ *
+ *        XAUI PHYXS -- <appropriate PCS as above>
+ *
+ * and no switching of the host interface mode occurs.
+ *
  * If both the fiber and copper ports are connected, the first to gain
  * link takes priority and the other port is completely locked out.
  */
@@ -23,19 +29,17 @@ enum {
 	MV_PCS_BASE_R		= 0x1000,
 	MV_PCS_1000BASEX	= 0x2000,
 
+	MV_PCS_PAIRSWAP		= 0x8182,
+	MV_PCS_PAIRSWAP_MASK	= 0x0003,
+	MV_PCS_PAIRSWAP_AB	= 0x0002,
+	MV_PCS_PAIRSWAP_NONE	= 0x0003,
+
 	/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
 	 * registers appear to set themselves to the 0x800X when AN is
 	 * restarted, but status registers appear readable from either.
 	 */
 	MV_AN_CTRL1000		= 0x8000, /* 1000base-T control register */
 	MV_AN_STAT1000		= 0x8001, /* 1000base-T status register */
-
-	/* This register appears to reflect the copper status */
-	MV_AN_RESULT		= 0xa016,
-	MV_AN_RESULT_SPD_10	= BIT(12),
-	MV_AN_RESULT_SPD_100	= BIT(13),
-	MV_AN_RESULT_SPD_1000	= BIT(14),
-	MV_AN_RESULT_SPD_10000	= BIT(15),
 };
 
 static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
@@ -84,7 +88,6 @@ static int mv3310_config_init(struct phy_device *phydev)
 
 	/* Check that the PHY interface type is compatible */
 	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
-	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
 	    phydev->interface != PHY_INTERFACE_MODE_XAUI &&
 	    phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
 	    phydev->interface != PHY_INTERFACE_MODE_10GKR)
@@ -150,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev)
 		if (val & MDIO_PMA_EXTABLE_1000BKX)
 			__set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
 				  supported);
-		if (val & MDIO_PMA_EXTABLE_100BTX)
+		if (val & MDIO_PMA_EXTABLE_100BTX) {
 			__set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
 				  supported);
-		if (val & MDIO_PMA_EXTABLE_10BT)
+			__set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				  supported);
+		}
+		if (val & MDIO_PMA_EXTABLE_10BT) {
 			__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 				  supported);
+			__set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+				  supported);
+		}
 	}
 
 	if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
@@ -175,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev)
 	u32 advertising;
 	int ret;
 
+	/* We don't support manual MDI control */
+	phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
 	if (phydev->autoneg == AUTONEG_DISABLE) {
 		ret = genphy_c45_pma_setup_forced(phydev);
 		if (ret < 0)
@@ -233,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev)
 	return genphy_c45_aneg_done(phydev);
 }
 
+static void mv3310_update_interface(struct phy_device *phydev)
+{
+	if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+	     phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+		/* The PHY automatically switches its serdes interface (and
+		 * active PHYXS instance) between Cisco SGMII and 10GBase-KR
+		 * modes according to the speed.  Florian suggests setting
+		 * phydev->interface to communicate this to the MAC. Only do
+		 * this if we are already in either SGMII or 10GBase-KR mode.
+		 */
+		if (phydev->speed == SPEED_10000)
+			phydev->interface = PHY_INTERFACE_MODE_10GKR;
+		else if (phydev->speed >= SPEED_10 &&
+			 phydev->speed < SPEED_10000)
+			phydev->interface = PHY_INTERFACE_MODE_SGMII;
+	}
+}
+
 /* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
 static int mv3310_read_10gbr_status(struct phy_device *phydev)
 {
@@ -240,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev)
 	phydev->speed = SPEED_10000;
 	phydev->duplex = DUPLEX_FULL;
 
-	if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
-		phydev->interface = PHY_INTERFACE_MODE_10GKR;
+	mv3310_update_interface(phydev);
 
 	return 0;
 }
@@ -264,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev)
 	phydev->link = 0;
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
+	phydev->mdix = 0;
 
 	val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
 	if (val < 0)
@@ -294,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev)
 
 		phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
 
-		if (phydev->autoneg == AUTONEG_ENABLE) {
-			val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT);
-			if (val < 0)
-				return val;
-
-			if (val & MV_AN_RESULT_SPD_10000)
-				phydev->speed = SPEED_10000;
-			else if (val & MV_AN_RESULT_SPD_1000)
-				phydev->speed = SPEED_1000;
-			else if (val & MV_AN_RESULT_SPD_100)
-				phydev->speed = SPEED_100;
-			else if (val & MV_AN_RESULT_SPD_10)
-				phydev->speed = SPEED_10;
-
-			phydev->duplex = DUPLEX_FULL;
-		}
+		if (phydev->autoneg == AUTONEG_ENABLE)
+			phy_resolve_aneg_linkmode(phydev);
 	}
 
 	if (phydev->autoneg != AUTONEG_ENABLE) {
@@ -318,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev)
 			return val;
 	}
 
-	if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
-	     phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
-		/* The PHY automatically switches its serdes interface (and
-		 * active PHYXS instance) between Cisco SGMII and 10GBase-KR
-		 * modes according to the speed.  Florian suggests setting
-		 * phydev->interface to communicate this to the MAC. Only do
-		 * this if we are already in either SGMII or 10GBase-KR mode.
-		 */
-		if (phydev->speed == SPEED_10000)
-			phydev->interface = PHY_INTERFACE_MODE_10GKR;
-		else if (phydev->speed >= SPEED_10 &&
-			 phydev->speed < SPEED_10000)
-			phydev->interface = PHY_INTERFACE_MODE_SGMII;
+	if (phydev->speed == SPEED_10000) {
+		val = genphy_c45_read_mdix(phydev);
+		if (val < 0)
+			return val;
+	} else {
+		val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
+		if (val < 0)
+			return val;
+
+		switch (val & MV_PCS_PAIRSWAP_MASK) {
+		case MV_PCS_PAIRSWAP_AB:
+			phydev->mdix = ETH_TP_MDI_X;
+			break;
+		case MV_PCS_PAIRSWAP_NONE:
+			phydev->mdix = ETH_TP_MDI;
+			break;
+		default:
+			phydev->mdix = ETH_TP_MDI_INVALID;
+			break;
+		}
 	}
 
+	mv3310_update_interface(phydev);
+
 	return 0;
 }
 
@@ -342,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = {
 		.phy_id_mask	= MARVELL_PHY_ID_MASK,
 		.name		= "mv88x3310",
 		.features	= SUPPORTED_10baseT_Full |
+				  SUPPORTED_10baseT_Half |
 				  SUPPORTED_100baseT_Full |
+				  SUPPORTED_100baseT_Half |
 				  SUPPORTED_1000baseT_Full |
 				  SUPPORTED_Autoneg |
 				  SUPPORTED_TP |
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 08e0647b85e2..8d370667fa1b 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -EINVAL;
 
 	/* Just ioremap, as this MDIO block is usually integrated into an
 	 * Ethernet MAC controller register range
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 54d00a1d2bef..88272b3ac2e2 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,6 +38,7 @@
 #include <linux/phy.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/gpio/consumer.h>
 
 #include <asm/irq.h>
 
@@ -46,11 +47,41 @@
 
 #include "mdio-boardinfo.h"
 
+static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
+{
+	struct gpio_desc *gpiod = NULL;
+
+	/* Deassert the optional reset signal */
+	if (mdiodev->dev.of_node)
+		gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
+					       "reset-gpios", 0, GPIOD_OUT_LOW,
+					       "PHY reset");
+	if (PTR_ERR(gpiod) == -ENOENT)
+		gpiod = NULL;
+	else if (IS_ERR(gpiod))
+		return PTR_ERR(gpiod);
+
+	mdiodev->reset = gpiod;
+
+	/* Assert the reset signal again */
+	mdio_device_reset(mdiodev, 1);
+
+	return 0;
+}
+
 int mdiobus_register_device(struct mdio_device *mdiodev)
 {
+	int err;
+
 	if (mdiodev->bus->mdio_map[mdiodev->addr])
 		return -EBUSY;
 
+	if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
+		err = mdiobus_register_gpiod(mdiodev);
+		if (err)
+			return err;
+	}
+
 	mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
 
 	return 0;
@@ -421,6 +452,9 @@ void mdiobus_unregister(struct mii_bus *bus)
 		if (!mdiodev)
 			continue;
 
+		if (mdiodev->reset)
+			gpiod_put(mdiodev->reset);
+
 		mdiodev->device_remove(mdiodev);
 		mdiodev->device_free(mdiodev);
 	}
@@ -494,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
 EXPORT_SYMBOL(mdiobus_scan);
 
 /**
+ * __mdiobus_read - Unlocked version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+	int retval;
+
+	WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+	retval = bus->read(bus, addr, regnum);
+
+	trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
+	return retval;
+}
+EXPORT_SYMBOL(__mdiobus_read);
+
+/**
+ * __mdiobus_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+	int err;
+
+	WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+	err = bus->write(bus, addr, regnum, val);
+
+	trace_mdio_access(bus, 0, addr, regnum, val, err);
+
+	return err;
+}
+EXPORT_SYMBOL(__mdiobus_write);
+
+/**
  * mdiobus_read_nested - Nested version of the mdiobus_read function
  * @bus: the mii_bus struct
  * @addr: the phy address
@@ -513,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
 	BUG_ON(in_interrupt());
 
 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-	retval = bus->read(bus, addr, regnum);
+	retval = __mdiobus_read(bus, addr, regnum);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
 	return retval;
 }
 EXPORT_SYMBOL(mdiobus_read_nested);
@@ -539,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
 	BUG_ON(in_interrupt());
 
 	mutex_lock(&bus->mdio_lock);
-	retval = bus->read(bus, addr, regnum);
+	retval = __mdiobus_read(bus, addr, regnum);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
 	return retval;
 }
 EXPORT_SYMBOL(mdiobus_read);
@@ -569,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
 	BUG_ON(in_interrupt());
 
 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-	err = bus->write(bus, addr, regnum, val);
+	err = __mdiobus_write(bus, addr, regnum, val);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 0, addr, regnum, val, err);
-
 	return err;
 }
 EXPORT_SYMBOL(mdiobus_write_nested);
@@ -596,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
 	BUG_ON(in_interrupt());
 
 	mutex_lock(&bus->mdio_lock);
-	err = bus->write(bus, addr, regnum, val);
+	err = __mdiobus_write(bus, addr, regnum, val);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 0, addr, regnum, val, err);
-
 	return err;
 }
 EXPORT_SYMBOL(mdiobus_write);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e24f28924af8..c924700cf37b 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -12,6 +12,8 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
@@ -22,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/unistd.h>
+#include <linux/delay.h>
 
 void mdio_device_free(struct mdio_device *mdiodev)
 {
@@ -114,6 +117,21 @@ void mdio_device_remove(struct mdio_device *mdiodev)
 }
 EXPORT_SYMBOL(mdio_device_remove);
 
+void mdio_device_reset(struct mdio_device *mdiodev, int value)
+{
+	unsigned int d;
+
+	if (!mdiodev->reset)
+		return;
+
+	gpiod_set_value(mdiodev->reset, value);
+
+	d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
+	if (d)
+		usleep_range(d, d + max_t(unsigned int, d / 10, 100));
+}
+EXPORT_SYMBOL(mdio_device_reset);
+
 /**
  * mdio_probe - probe an MDIO device
  * @dev: device to probe
@@ -128,8 +146,16 @@ static int mdio_probe(struct device *dev)
 	struct mdio_driver *mdiodrv = to_mdio_driver(drv);
 	int err = 0;
 
-	if (mdiodrv->probe)
+	if (mdiodrv->probe) {
+		/* Deassert the reset signal */
+		mdio_device_reset(mdiodev, 0);
+
 		err = mdiodrv->probe(mdiodev);
+		if (err) {
+			/* Assert the reset signal */
+			mdio_device_reset(mdiodev, 1);
+		}
+	}
 
 	return err;
 }
@@ -140,9 +166,13 @@ static int mdio_remove(struct device *dev)
 	struct device_driver *drv = mdiodev->dev.driver;
 	struct mdio_driver *mdiodrv = to_mdio_driver(drv);
 
-	if (mdiodrv->remove)
+	if (mdiodrv->remove) {
 		mdiodrv->remove(mdiodev);
 
+		/* Assert the reset signal */
+		mdio_device_reset(mdiodev, 1);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 842eb871a6e3..ddc2c5ea3787 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -24,31 +24,129 @@
 #include <linux/netdevice.h>
 #include <linux/bitfield.h>
 
-static int meson_gxl_config_init(struct phy_device *phydev)
+#define TSTCNTL		20
+#define  TSTCNTL_READ		BIT(15)
+#define  TSTCNTL_WRITE		BIT(14)
+#define  TSTCNTL_REG_BANK_SEL	GENMASK(12, 11)
+#define  TSTCNTL_TEST_MODE	BIT(10)
+#define  TSTCNTL_READ_ADDRESS	GENMASK(9, 5)
+#define  TSTCNTL_WRITE_ADDRESS	GENMASK(4, 0)
+#define TSTREAD1	21
+#define TSTWRITE	23
+#define INTSRC_FLAG	29
+#define  INTSRC_ANEG_PR		BIT(1)
+#define  INTSRC_PARALLEL_FAULT	BIT(2)
+#define  INTSRC_ANEG_LP_ACK	BIT(3)
+#define  INTSRC_LINK_DOWN	BIT(4)
+#define  INTSRC_REMOTE_FAULT	BIT(5)
+#define  INTSRC_ANEG_COMPLETE	BIT(6)
+#define INTSRC_MASK	30
+
+#define BANK_ANALOG_DSP		0
+#define BANK_WOL		1
+#define BANK_BIST		3
+
+/* WOL Registers */
+#define LPI_STATUS	0xc
+#define  LPI_STATUS_RSV12	BIT(12)
+
+/* BIST Registers */
+#define FR_PLL_CONTROL	0x1b
+#define FR_PLL_DIV0	0x1c
+#define FR_PLL_DIV1	0x1d
+
+static int meson_gxl_open_banks(struct phy_device *phydev)
 {
-	/* Enable Analog and DSP register Bank access by */
-	phy_write(phydev, 0x14, 0x0000);
-	phy_write(phydev, 0x14, 0x0400);
-	phy_write(phydev, 0x14, 0x0000);
-	phy_write(phydev, 0x14, 0x0400);
+	int ret;
+
+	/* Enable Analog and DSP register Bank access by
+	 * toggling TSTCNTL_TEST_MODE bit in the TSTCNTL register
+	 */
+	ret = phy_write(phydev, TSTCNTL, 0);
+	if (ret)
+		return ret;
+	ret = phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+	if (ret)
+		return ret;
+	ret = phy_write(phydev, TSTCNTL, 0);
+	if (ret)
+		return ret;
+	return phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+}
+
+static void meson_gxl_close_banks(struct phy_device *phydev)
+{
+	phy_write(phydev, TSTCNTL, 0);
+}
+
+static int meson_gxl_read_reg(struct phy_device *phydev,
+			      unsigned int bank, unsigned int reg)
+{
+	int ret;
+
+	ret = meson_gxl_open_banks(phydev);
+	if (ret)
+		goto out;
+
+	ret = phy_write(phydev, TSTCNTL, TSTCNTL_READ |
+			FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+			TSTCNTL_TEST_MODE |
+			FIELD_PREP(TSTCNTL_READ_ADDRESS, reg));
+	if (ret)
+		goto out;
+
+	ret = phy_read(phydev, TSTREAD1);
+out:
+	/* Close the bank access on our way out */
+	meson_gxl_close_banks(phydev);
+	return ret;
+}
+
+static int meson_gxl_write_reg(struct phy_device *phydev,
+			       unsigned int bank, unsigned int reg,
+			       uint16_t value)
+{
+	int ret;
+
+	ret = meson_gxl_open_banks(phydev);
+	if (ret)
+		goto out;
+
+	ret = phy_write(phydev, TSTWRITE, value);
+	if (ret)
+		goto out;
+
+	ret = phy_write(phydev, TSTCNTL, TSTCNTL_WRITE |
+			FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+			TSTCNTL_TEST_MODE |
+			FIELD_PREP(TSTCNTL_WRITE_ADDRESS, reg));
 
-	/* Write Analog register 23 */
-	phy_write(phydev, 0x17, 0x8E0D);
-	phy_write(phydev, 0x14, 0x4417);
+out:
+	/* Close the bank access on our way out */
+	meson_gxl_close_banks(phydev);
+	return ret;
+}
+
+static int meson_gxl_config_init(struct phy_device *phydev)
+{
+	int ret;
 
 	/* Enable fractional PLL */
-	phy_write(phydev, 0x17, 0x0005);
-	phy_write(phydev, 0x14, 0x5C1B);
+	ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_CONTROL, 0x5);
+	if (ret)
+		return ret;
 
 	/* Program fraction FR_PLL_DIV1 */
-	phy_write(phydev, 0x17, 0x029A);
-	phy_write(phydev, 0x14, 0x5C1D);
+	ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV1, 0x029a);
+	if (ret)
+		return ret;
 
 	/* Program fraction FR_PLL_DIV1 */
-	phy_write(phydev, 0x17, 0xAAAA);
-	phy_write(phydev, 0x14, 0x5C1C);
+	ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV0, 0xaaaa);
+	if (ret)
+		return ret;
 
-	return 0;
+	return genphy_config_init(phydev);
 }
 
 /* This function is provided to cope with the possible failures of this phy
@@ -78,27 +176,8 @@ static int meson_gxl_read_status(struct phy_device *phydev)
 		else if (!ret)
 			goto read_status_continue;
 
-		/* Need to access WOL bank, make sure the access is open */
-		ret = phy_write(phydev, 0x14, 0x0000);
-		if (ret)
-			return ret;
-		ret = phy_write(phydev, 0x14, 0x0400);
-		if (ret)
-			return ret;
-		ret = phy_write(phydev, 0x14, 0x0000);
-		if (ret)
-			return ret;
-		ret = phy_write(phydev, 0x14, 0x0400);
-		if (ret)
-			return ret;
-
-		/* Request LPI_STATUS WOL register */
-		ret = phy_write(phydev, 0x14, 0x8D80);
-		if (ret)
-			return ret;
-
-		/* Read LPI_STATUS value */
-		wol = phy_read(phydev, 0x15);
+		/* Aneg is done, let's check everything is fine */
+		wol = meson_gxl_read_reg(phydev, BANK_WOL, LPI_STATUS);
 		if (wol < 0)
 			return wol;
 
@@ -110,7 +189,7 @@ static int meson_gxl_read_status(struct phy_device *phydev)
 		if (exp < 0)
 			return exp;
 
-		if (!(wol & BIT(12)) ||
+		if (!(wol & LPI_STATUS_RSV12) ||
 		    ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
 			/* Looks like aneg failed after all */
 			phydev_dbg(phydev, "LPA corruption - aneg restart\n");
@@ -122,17 +201,43 @@ read_status_continue:
 	return genphy_read_status(phydev);
 }
 
+static int meson_gxl_ack_interrupt(struct phy_device *phydev)
+{
+	int ret = phy_read(phydev, INTSRC_FLAG);
+
+	return ret < 0 ? ret : 0;
+}
+
+static int meson_gxl_config_intr(struct phy_device *phydev)
+{
+	u16 val;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		val = INTSRC_ANEG_PR
+			| INTSRC_PARALLEL_FAULT
+			| INTSRC_ANEG_LP_ACK
+			| INTSRC_LINK_DOWN
+			| INTSRC_REMOTE_FAULT
+			| INTSRC_ANEG_COMPLETE;
+	} else {
+		val = 0;
+	}
+
+	return phy_write(phydev, INTSRC_MASK, val);
+}
+
 static struct phy_driver meson_gxl_phy[] = {
 	{
 		.phy_id		= 0x01814400,
 		.phy_id_mask	= 0xfffffff0,
 		.name		= "Meson GXL Internal PHY",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_IS_INTERNAL,
+		.flags		= PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
 		.config_init	= meson_gxl_config_init,
-		.config_aneg	= genphy_config_aneg,
 		.aneg_done      = genphy_aneg_done,
 		.read_status	= meson_gxl_read_status,
+		.ack_interrupt	= meson_gxl_ack_interrupt,
+		.config_intr	= meson_gxl_config_intr,
 		.suspend        = genphy_suspend,
 		.resume         = genphy_resume,
 	},
@@ -150,4 +255,5 @@ MODULE_DEVICE_TABLE(mdio, meson_gxl_tbl);
 MODULE_DESCRIPTION("Amlogic Meson GXL Internal PHY driver");
 MODULE_AUTHOR("Baoqi wang");
 MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 422ff6333c52..0f45310300f6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -802,8 +802,6 @@ static struct phy_driver ksphy_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ks8737_type,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
@@ -817,8 +815,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8021_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -835,8 +831,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8021_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -854,7 +848,6 @@ static struct phy_driver ksphy_driver[] = {
 	.probe		= kszphy_probe,
 	.config_init	= ksz8041_config_init,
 	.config_aneg	= ksz8041_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -871,8 +864,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -889,8 +880,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8051_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -907,8 +896,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -925,8 +912,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8081_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -941,8 +926,6 @@ static struct phy_driver ksphy_driver[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
@@ -956,8 +939,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9021_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -976,7 +957,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9031_config_init,
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= ksz9031_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
@@ -1001,8 +981,6 @@ static struct phy_driver ksphy_driver[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 }, {
@@ -1022,8 +1000,6 @@ static struct phy_driver ksphy_driver[] = {
 	.name		= "Microchip KSZ9477",
 	.features	= PHY_GBIT_FEATURES,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 } };
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 37ee856c7680..0f293ef28935 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -153,7 +153,6 @@ static struct phy_driver microchip_phy_driver[] = {
 
 	.config_init	= genphy_config_init,
 	.config_aneg	= lan88xx_config_aneg,
-	.read_status	= genphy_read_status,
 
 	.ack_interrupt	= lan88xx_phy_ack_interrupt,
 	.config_intr	= lan88xx_phy_config_intr,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2addf1d3f619..2b1e336961f9 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -136,8 +136,6 @@ static struct phy_driver dp83865_driver[] = { {
 	.features = PHY_GBIT_FEATURES,
 	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ns_config_init,
-	.config_aneg = genphy_config_aneg,
-	.read_status = genphy_read_status,
 	.ack_interrupt = ns_ack_interrupt,
 	.config_intr = ns_config_intr,
 } };
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index dada819c6b78..a4576859afae 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev)
 }
 EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
 
+/**
+ * genphy_c45_read_mdix - read mdix status from PMA
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_mdix(struct phy_device *phydev)
+{
+	int val;
+
+	if (phydev->speed == SPEED_10000) {
+		val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+				   MDIO_PMA_10GBT_SWAPPOL);
+		if (val < 0)
+			return val;
+
+		switch (val) {
+		case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+			phydev->mdix = ETH_TP_MDI;
+			break;
+
+		case 0:
+			phydev->mdix = ETH_TP_MDI_X;
+			break;
+
+		default:
+			phydev->mdix = ETH_TP_MDI_INVALID;
+			break;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
+
 /* The gen10g_* functions are the old Clause 45 stub */
 
 static int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 21f75ae244b3..4083f00c97a5 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
 	return count;
 }
 
+/**
+ * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings
+ * @phydev: The phy_device struct
+ *
+ * Resolve our and the link partner advertisments into their corresponding
+ * speed and duplex. If full duplex was negotiated, extract the pause mode
+ * from the link partner mask.
+ */
+void phy_resolve_aneg_linkmode(struct phy_device *phydev)
+{
+	u32 common = phydev->lp_advertising & phydev->advertising;
+
+	if (common & ADVERTISED_10000baseT_Full) {
+		phydev->speed = SPEED_10000;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_1000baseT_Full) {
+		phydev->speed = SPEED_1000;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_1000baseT_Half) {
+		phydev->speed = SPEED_1000;
+		phydev->duplex = DUPLEX_HALF;
+	} else if (common & ADVERTISED_100baseT_Full) {
+		phydev->speed = SPEED_100;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_100baseT_Half) {
+		phydev->speed = SPEED_100;
+		phydev->duplex = DUPLEX_HALF;
+	} else if (common & ADVERTISED_10baseT_Full) {
+		phydev->speed = SPEED_10;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_10baseT_Half) {
+		phydev->speed = SPEED_10;
+		phydev->duplex = DUPLEX_HALF;
+	}
+
+	if (phydev->duplex == DUPLEX_FULL) {
+		phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
+		phydev->asym_pause = !!(phydev->lp_advertising &
+					ADVERTISED_Asym_Pause);
+	}
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+
 static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
 			     u16 regnum)
 {
 	/* Write the desired MMD Devad */
-	bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad);
 
 	/* Write the desired MMD register address */
-	bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+	__mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
 
 	/* Select the Function : DATA with no post increment */
-	bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+			devad | MII_MMD_CTRL_NOINCR);
 }
 
 /**
@@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
 		mmd_phy_indirect(bus, phy_addr, devad, regnum);
 
 		/* Read the content of the MMD's selected register */
-		val = bus->read(bus, phy_addr, MII_MMD_DATA);
+		val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
 		mutex_unlock(&bus->mdio_lock);
 	}
 	return val;
@@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
 		mmd_phy_indirect(bus, phy_addr, devad, regnum);
 
 		/* Write the data into MMD's selected register */
-		bus->write(bus, phy_addr, MII_MMD_DATA, val);
+		__mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
 		mutex_unlock(&bus->mdio_lock);
 
 		ret = 0;
@@ -279,3 +323,207 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
 	return ret;
 }
 EXPORT_SYMBOL(phy_write_mmd);
+
+/**
+ * __phy_modify() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ */
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+	int ret;
+
+	ret = __phy_read(phydev, regnum);
+	if (ret < 0)
+		return ret;
+
+	ret = __phy_write(phydev, regnum, (ret & ~mask) | set);
+
+	return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(__phy_modify);
+
+/**
+ * phy_modify - Convenience function for modifying a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+	int ret;
+
+	mutex_lock(&phydev->mdio.bus->mdio_lock);
+	ret = __phy_modify(phydev, regnum, mask, set);
+	mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify);
+
+static int __phy_read_page(struct phy_device *phydev)
+{
+	return phydev->drv->read_page(phydev);
+}
+
+static int __phy_write_page(struct phy_device *phydev, int page)
+{
+	return phydev->drv->write_page(phydev, page);
+}
+
+/**
+ * phy_save_page() - take the bus lock and save the current page
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Take the MDIO bus lock, and return the current page number. On error,
+ * returns a negative errno. phy_restore_page() must always be called
+ * after this, irrespective of success or failure of this call.
+ */
+int phy_save_page(struct phy_device *phydev)
+{
+	mutex_lock(&phydev->mdio.bus->mdio_lock);
+	return __phy_read_page(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_save_page);
+
+/**
+ * phy_select_page() - take the bus lock, save the current page, and set a page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: desired page
+ *
+ * Take the MDIO bus lock to protect against concurrent access, save the
+ * current PHY page, and set the current page.  On error, returns a
+ * negative errno, otherwise returns the previous page number.
+ * phy_restore_page() must always be called after this, irrespective
+ * of success or failure of this call.
+ */
+int phy_select_page(struct phy_device *phydev, int page)
+{
+	int ret, oldpage;
+
+	oldpage = ret = phy_save_page(phydev);
+	if (ret < 0)
+		return ret;
+
+	if (oldpage != page) {
+		ret = __phy_write_page(phydev, page);
+		if (ret < 0)
+			return ret;
+	}
+
+	return oldpage;
+}
+EXPORT_SYMBOL_GPL(phy_select_page);
+
+/**
+ * phy_restore_page() - restore the page register and release the bus lock
+ * @phydev: a pointer to a &struct phy_device
+ * @oldpage: the old page, return value from phy_save_page() or phy_select_page()
+ * @ret: operation's return code
+ *
+ * Release the MDIO bus lock, restoring @oldpage if it is a valid page.
+ * This function propagates the earliest error code from the group of
+ * operations.
+ *
+ * Returns:
+ *   @oldpage if it was a negative value, otherwise
+ *   @ret if it was a negative errno value, otherwise
+ *   phy_write_page()'s negative value if it were in error, otherwise
+ *   @ret.
+ */
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
+{
+	int r;
+
+	if (oldpage >= 0) {
+		r = __phy_write_page(phydev, oldpage);
+
+		/* Propagate the operation return code if the page write
+		 * was successful.
+		 */
+		if (ret >= 0 && r < 0)
+			ret = r;
+	} else {
+		/* Propagate the phy page selection error code */
+		ret = oldpage;
+	}
+
+	mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(phy_restore_page);
+
+/**
+ * phy_read_paged() - Convenience function for reading a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ *
+ * Same rules as for phy_read().
+ */
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum)
+{
+	int ret = 0, oldpage;
+
+	oldpage = phy_select_page(phydev, page);
+	if (oldpage >= 0)
+		ret = __phy_read(phydev, regnum);
+
+	return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_read_paged);
+
+/**
+ * phy_write_paged() - Convenience function for writing a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @val: value to write
+ *
+ * Same rules as for phy_write().
+ */
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
+{
+	int ret = 0, oldpage;
+
+	oldpage = phy_select_page(phydev, page);
+	if (oldpage >= 0)
+		ret = __phy_write(phydev, regnum, val);
+
+	return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_write_paged);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+		     u16 mask, u16 set)
+{
+	int ret = 0, oldpage;
+
+	oldpage = phy_select_page(phydev, page);
+	if (oldpage >= 0)
+		ret = __phy_modify(phydev, regnum, mask, set);
+
+	return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_modify_paged);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ed10d1fc8f59..f3313a129531 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -493,7 +493,10 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
 	/* Invalidate LP advertising flags */
 	phydev->lp_advertising = 0;
 
-	err = phydev->drv->config_aneg(phydev);
+	if (phydev->drv->config_aneg)
+		err = phydev->drv->config_aneg(phydev);
+	else
+		err = genphy_config_aneg(phydev);
 	if (err < 0)
 		goto out_unlock;
 
@@ -629,9 +632,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
 	if (PHY_HALTED == phydev->state)
 		return IRQ_NONE;		/* It can't be ours.  */
 
-	disable_irq_nosync(irq);
-	atomic_inc(&phydev->irq_disable);
-
 	phy_change(phydev);
 
 	return IRQ_HANDLED;
@@ -689,7 +689,6 @@ phy_err:
  */
 int phy_start_interrupts(struct phy_device *phydev)
 {
-	atomic_set(&phydev->irq_disable, 0);
 	if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
 				 IRQF_ONESHOT | IRQF_SHARED,
 				 phydev_name(phydev), phydev) < 0) {
@@ -716,13 +715,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
 
 	free_irq(phydev->irq, phydev);
 
-	/* If work indeed has been cancelled, disable_irq() will have
-	 * been left unbalanced from phy_interrupt() and enable_irq()
-	 * has to be called so that other devices on the line work.
-	 */
-	while (atomic_dec_return(&phydev->irq_disable) >= 0)
-		enable_irq(phydev->irq);
-
 	return err;
 }
 EXPORT_SYMBOL(phy_stop_interrupts);
@@ -736,10 +728,11 @@ void phy_change(struct phy_device *phydev)
 	if (phy_interrupt_is_valid(phydev)) {
 		if (phydev->drv->did_interrupt &&
 		    !phydev->drv->did_interrupt(phydev))
-			goto ignore;
+			return;
 
-		if (phy_disable_interrupts(phydev))
-			goto phy_err;
+		if (phydev->state == PHY_HALTED)
+			if (phy_disable_interrupts(phydev))
+				goto phy_err;
 	}
 
 	mutex_lock(&phydev->lock);
@@ -747,28 +740,13 @@ void phy_change(struct phy_device *phydev)
 		phydev->state = PHY_CHANGELINK;
 	mutex_unlock(&phydev->lock);
 
-	if (phy_interrupt_is_valid(phydev)) {
-		atomic_dec(&phydev->irq_disable);
-		enable_irq(phydev->irq);
-
-		/* Reenable interrupts */
-		if (PHY_HALTED != phydev->state &&
-		    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
-			goto irq_enable_err;
-	}
-
 	/* reschedule state queue work to run as soon as possible */
 	phy_trigger_machine(phydev, true);
-	return;
 
-ignore:
-	atomic_dec(&phydev->irq_disable);
-	enable_irq(phydev->irq);
+	if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+		goto phy_err;
 	return;
 
-irq_enable_err:
-	disable_irq(phydev->irq);
-	atomic_inc(&phydev->irq_disable);
 phy_err:
 	phy_error(phydev);
 }
@@ -1003,10 +981,6 @@ void phy_state_machine(struct work_struct *work)
 			phydev->state = PHY_NOLINK;
 			phy_link_down(phydev, true);
 		}
-
-		if (phy_interrupt_is_valid(phydev))
-			err = phy_config_interrupt(phydev,
-						   PHY_INTERRUPT_ENABLED);
 		break;
 	case PHY_HALTED:
 		if (phydev->link) {
@@ -1083,16 +1057,12 @@ void phy_state_machine(struct work_struct *work)
 /**
  * phy_mac_interrupt - MAC says the link has changed
  * @phydev: phy_device struct with changed link
- * @new_link: Link is Up/Down.
  *
- * Description: The MAC layer is able indicate there has been a change
- *   in the PHY link status. Set the new link status, and trigger the
- *   state machine, work a work queue.
+ * The MAC layer is able to indicate there has been a change in the PHY link
+ * status. Trigger the state machine and work a work queue.
  */
-void phy_mac_interrupt(struct phy_device *phydev, int new_link)
+void phy_mac_interrupt(struct phy_device *phydev)
 {
-	phydev->link = new_link;
-
 	/* Trigger a state machine change */
 	queue_work(system_power_efficient_wq, &phydev->phy_queue);
 }
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b15b31ca2618..b13eed21c87d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -634,6 +634,9 @@ int phy_device_register(struct phy_device *phydev)
 	if (err)
 		return err;
 
+	/* Deassert the reset signal */
+	phy_device_reset(phydev, 0);
+
 	/* Run all of the fixups for this PHY */
 	err = phy_scan_fixups(phydev);
 	if (err) {
@@ -652,6 +655,9 @@ int phy_device_register(struct phy_device *phydev)
 	return 0;
 
  out:
+	/* Assert the reset signal */
+	phy_device_reset(phydev, 1);
+
 	mdiobus_unregister_device(&phydev->mdio);
 	return err;
 }
@@ -668,6 +674,10 @@ EXPORT_SYMBOL(phy_device_register);
 void phy_device_remove(struct phy_device *phydev)
 {
 	device_del(&phydev->mdio.dev);
+
+	/* Assert the reset signal */
+	phy_device_reset(phydev, 1);
+
 	mdiobus_unregister_device(&phydev->mdio);
 }
 EXPORT_SYMBOL(phy_device_remove);
@@ -851,6 +861,9 @@ int phy_init_hw(struct phy_device *phydev)
 {
 	int ret = 0;
 
+	/* Deassert the reset signal */
+	phy_device_reset(phydev, 0);
+
 	if (!phydev->drv || !phydev->drv->config_init)
 		return 0;
 
@@ -1130,6 +1143,9 @@ void phy_detach(struct phy_device *phydev)
 	put_device(&phydev->mdio.dev);
 	if (ndev_owner != bus->owner)
 		module_put(bus->owner);
+
+	/* Assert the reset signal */
+	phy_device_reset(phydev, 1);
 }
 EXPORT_SYMBOL(phy_detach);
 
@@ -1208,6 +1224,30 @@ out:
 }
 EXPORT_SYMBOL(phy_loopback);
 
+/**
+ * phy_reset_after_clk_enable - perform a PHY reset if needed
+ * @phydev: target phy_device struct
+ *
+ * Description: Some PHYs are known to need a reset after their refclk was
+ *   enabled. This function evaluates the flags and perform the reset if it's
+ *   needed. Returns < 0 on error, 0 if the phy wasn't reset and 1 if the phy
+ *   was reset.
+ */
+int phy_reset_after_clk_enable(struct phy_device *phydev)
+{
+	if (!phydev || !phydev->drv)
+		return -ENODEV;
+
+	if (phydev->drv->flags & PHY_RST_AFTER_CLK_EN) {
+		phy_device_reset(phydev, 1);
+		phy_device_reset(phydev, 0);
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_reset_after_clk_enable);
+
 /* Generic PHY support and helper functions */
 
 /**
@@ -1328,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
-	int ctl = phy_read(phydev, MII_BMCR);
+	u16 ctl = 0;
 
-	ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
 
@@ -1342,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev)
 	if (DUPLEX_FULL == phydev->duplex)
 		ctl |= BMCR_FULLDPLX;
 
-	return phy_write(phydev, MII_BMCR, ctl);
+	return phy_modify(phydev, MII_BMCR,
+			  BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
 }
 EXPORT_SYMBOL(genphy_setup_forced);
 
@@ -1352,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced);
  */
 int genphy_restart_aneg(struct phy_device *phydev)
 {
-	int ctl = phy_read(phydev, MII_BMCR);
-
-	if (ctl < 0)
-		return ctl;
-
-	ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
-
 	/* Don't isolate the PHY if we're negotiating */
-	ctl &= ~BMCR_ISOLATE;
-
-	return phy_write(phydev, MII_BMCR, ctl);
+	return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE,
+			  BMCR_ANENABLE | BMCR_ANRESTART);
 }
 EXPORT_SYMBOL(genphy_restart_aneg);
 
@@ -1628,44 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init);
 
 int genphy_suspend(struct phy_device *phydev)
 {
-	int value;
-
-	mutex_lock(&phydev->lock);
-
-	value = phy_read(phydev, MII_BMCR);
-	phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
-
-	mutex_unlock(&phydev->lock);
-
-	return 0;
+	return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
 }
 EXPORT_SYMBOL(genphy_suspend);
 
 int genphy_resume(struct phy_device *phydev)
 {
-	int value;
-
-	value = phy_read(phydev, MII_BMCR);
-	phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
-
-	return 0;
+	return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
 }
 EXPORT_SYMBOL(genphy_resume);
 
 int genphy_loopback(struct phy_device *phydev, bool enable)
 {
-	int value;
-
-	value = phy_read(phydev, MII_BMCR);
-	if (value < 0)
-		return value;
-
-	if (enable)
-		value |= BMCR_LOOPBACK;
-	else
-		value &= ~BMCR_LOOPBACK;
-
-	return phy_write(phydev, MII_BMCR, value);
+	return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+			  enable ? BMCR_LOOPBACK : 0);
 }
 EXPORT_SYMBOL(genphy_loopback);
 
@@ -1813,8 +1821,16 @@ static int phy_probe(struct device *dev)
 	/* Set the state to READY by default */
 	phydev->state = PHY_READY;
 
-	if (phydev->drv->probe)
+	if (phydev->drv->probe) {
+		/* Deassert the reset signal */
+		phy_device_reset(phydev, 0);
+
 		err = phydev->drv->probe(phydev);
+		if (err) {
+			/* Assert the reset signal */
+			phy_device_reset(phydev, 1);
+		}
+	}
 
 	mutex_unlock(&phydev->lock);
 
@@ -1831,8 +1847,12 @@ static int phy_remove(struct device *dev)
 	phydev->state = PHY_DOWN;
 	mutex_unlock(&phydev->lock);
 
-	if (phydev->drv && phydev->drv->remove)
+	if (phydev->drv && phydev->drv->remove) {
 		phydev->drv->remove(phydev);
+
+		/* Assert the reset signal */
+		phy_device_reset(phydev, 1);
+	}
 	phydev->drv = NULL;
 
 	return 0;
@@ -1909,9 +1929,7 @@ static struct phy_driver genphy_driver = {
 	.features	= PHY_GBIT_FEATURES | SUPPORTED_MII |
 			  SUPPORTED_AUI | SUPPORTED_FIBRE |
 			  SUPPORTED_BNC,
-	.config_aneg	= genphy_config_aneg,
 	.aneg_done	= genphy_aneg_done,
-	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 	.set_loopback   = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 249ce5cbea22..6ac8b29b2dc3 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -36,7 +36,11 @@ enum {
 	PHYLINK_DISABLE_LINK,
 };
 
+/**
+ * struct phylink - internal data type for phylink
+ */
 struct phylink {
+	/* private: */
 	struct net_device *netdev;
 	const struct phylink_mac_ops *ops;
 
@@ -50,6 +54,8 @@ struct phylink {
 	/* The link configuration settings */
 	struct phylink_link_state link_config;
 	struct gpio_desc *link_gpio;
+	void (*get_fixed_state)(struct net_device *dev,
+				struct phylink_link_state *s);
 
 	struct mutex state_mutex;
 	struct phylink_link_state phy_state;
@@ -87,6 +93,13 @@ static inline bool linkmode_empty(const unsigned long *src)
 	return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
+/**
+ * phylink_set_port_modes() - set the port type modes in the ethtool mask
+ * @mask: ethtool link mode mask
+ *
+ * Sets all the port type modes in the ethtool mask.  MAC drivers should
+ * use this in their 'validate' callback.
+ */
 void phylink_set_port_modes(unsigned long *mask)
 {
 	phylink_set(mask, TP);
@@ -117,8 +130,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
 	static const char *modestr[] = {
 		[MLO_AN_PHY] = "phy",
 		[MLO_AN_FIXED] = "fixed",
-		[MLO_AN_SGMII] = "SGMII",
-		[MLO_AN_8023Z] = "802.3z",
+		[MLO_AN_INBAND] = "inband",
 	};
 
 	return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
@@ -132,59 +144,64 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
 	return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
 }
 
-static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
+static int phylink_parse_fixedlink(struct phylink *pl,
+				   struct fwnode_handle *fwnode)
 {
-	struct device_node *fixed_node;
+	struct fwnode_handle *fixed_node;
 	const struct phy_setting *s;
 	struct gpio_desc *desc;
-	const __be32 *fixed_prop;
 	u32 speed;
-	int ret, len;
+	int ret;
 
-	fixed_node = of_get_child_by_name(np, "fixed-link");
+	fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
 	if (fixed_node) {
-		ret = of_property_read_u32(fixed_node, "speed", &speed);
+		ret = fwnode_property_read_u32(fixed_node, "speed", &speed);
 
 		pl->link_config.speed = speed;
 		pl->link_config.duplex = DUPLEX_HALF;
 
-		if (of_property_read_bool(fixed_node, "full-duplex"))
+		if (fwnode_property_read_bool(fixed_node, "full-duplex"))
 			pl->link_config.duplex = DUPLEX_FULL;
 
 		/* We treat the "pause" and "asym-pause" terminology as
 		 * defining the link partner's ability. */
-		if (of_property_read_bool(fixed_node, "pause"))
+		if (fwnode_property_read_bool(fixed_node, "pause"))
 			pl->link_config.pause |= MLO_PAUSE_SYM;
-		if (of_property_read_bool(fixed_node, "asym-pause"))
+		if (fwnode_property_read_bool(fixed_node, "asym-pause"))
 			pl->link_config.pause |= MLO_PAUSE_ASYM;
 
 		if (ret == 0) {
-			desc = fwnode_get_named_gpiod(&fixed_node->fwnode,
-						      "link-gpios", 0,
-						      GPIOD_IN, "?");
+			desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
+						      0, GPIOD_IN, "?");
 
 			if (!IS_ERR(desc))
 				pl->link_gpio = desc;
 			else if (desc == ERR_PTR(-EPROBE_DEFER))
 				ret = -EPROBE_DEFER;
 		}
-		of_node_put(fixed_node);
+		fwnode_handle_put(fixed_node);
 
 		if (ret)
 			return ret;
 	} else {
-		fixed_prop = of_get_property(np, "fixed-link", &len);
-		if (!fixed_prop) {
+		u32 prop[5];
+
+		ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+						     NULL, 0);
+		if (ret != ARRAY_SIZE(prop)) {
 			netdev_err(pl->netdev, "broken fixed-link?\n");
 			return -EINVAL;
 		}
-		if (len == 5 * sizeof(*fixed_prop)) {
-			pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ?
+
+		ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+						     prop, ARRAY_SIZE(prop));
+		if (!ret) {
+			pl->link_config.duplex = prop[1] ?
 						DUPLEX_FULL : DUPLEX_HALF;
-			pl->link_config.speed = be32_to_cpu(fixed_prop[2]);
-			if (be32_to_cpu(fixed_prop[3]))
+			pl->link_config.speed = prop[2];
+			if (prop[3])
 				pl->link_config.pause |= MLO_PAUSE_SYM;
-			if (be32_to_cpu(fixed_prop[4]))
+			if (prop[4])
 				pl->link_config.pause |= MLO_PAUSE_ASYM;
 		}
 	}
@@ -220,17 +237,17 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
 	return 0;
 }
 
-static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
+static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
 {
-	struct device_node *dn;
+	struct fwnode_handle *dn;
 	const char *managed;
 
-	dn = of_get_child_by_name(np, "fixed-link");
-	if (dn || of_find_property(np, "fixed-link", NULL))
+	dn = fwnode_get_named_child_node(fwnode, "fixed-link");
+	if (dn || fwnode_property_present(fwnode, "fixed-link"))
 		pl->link_an_mode = MLO_AN_FIXED;
-	of_node_put(dn);
+	fwnode_handle_put(dn);
 
-	if (of_property_read_string(np, "managed", &managed) == 0 &&
+	if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
 	    strcmp(managed, "in-band-status") == 0) {
 		if (pl->link_an_mode == MLO_AN_FIXED) {
 			netdev_err(pl->netdev,
@@ -244,6 +261,7 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
 		phylink_set(pl->supported, Asym_Pause);
 		phylink_set(pl->supported, Pause);
 		pl->link_config.an_enabled = true;
+		pl->link_an_mode = MLO_AN_INBAND;
 
 		switch (pl->link_config.interface) {
 		case PHY_INTERFACE_MODE_SGMII:
@@ -253,17 +271,14 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
 			phylink_set(pl->supported, 100baseT_Full);
 			phylink_set(pl->supported, 1000baseT_Half);
 			phylink_set(pl->supported, 1000baseT_Full);
-			pl->link_an_mode = MLO_AN_SGMII;
 			break;
 
 		case PHY_INTERFACE_MODE_1000BASEX:
 			phylink_set(pl->supported, 1000baseX_Full);
-			pl->link_an_mode = MLO_AN_8023Z;
 			break;
 
 		case PHY_INTERFACE_MODE_2500BASEX:
 			phylink_set(pl->supported, 2500baseX_Full);
-			pl->link_an_mode = MLO_AN_8023Z;
 			break;
 
 		case PHY_INTERFACE_MODE_10GKR:
@@ -280,7 +295,6 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
 			phylink_set(pl->supported, 10000baseLR_Full);
 			phylink_set(pl->supported, 10000baseLRM_Full);
 			phylink_set(pl->supported, 10000baseER_Full);
-			pl->link_an_mode = MLO_AN_SGMII;
 			break;
 
 		default:
@@ -320,8 +334,7 @@ static void phylink_mac_config(struct phylink *pl,
 static void phylink_mac_an_restart(struct phylink *pl)
 {
 	if (pl->link_config.an_enabled &&
-	    (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX ||
-	     pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX))
+	    phy_interface_mode_is_8023z(pl->link_config.interface))
 		pl->ops->mac_an_restart(pl->netdev);
 }
 
@@ -339,12 +352,14 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
 }
 
 /* The fixed state is... fixed except for the link state,
- * which may be determined by a GPIO.
+ * which may be determined by a GPIO or a callback.
  */
 static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
 {
 	*state = pl->link_config;
-	if (pl->link_gpio)
+	if (pl->get_fixed_state)
+		pl->get_fixed_state(pl->netdev, state);
+	else if (pl->link_gpio)
 		state->link = !!gpiod_get_value(pl->link_gpio);
 }
 
@@ -423,7 +438,7 @@ static void phylink_resolve(struct work_struct *w)
 			phylink_mac_config(pl, &link_state);
 			break;
 
-		case MLO_AN_SGMII:
+		case MLO_AN_INBAND:
 			phylink_get_mac_state(pl, &link_state);
 			if (pl->phydev) {
 				bool changed = false;
@@ -449,10 +464,6 @@ static void phylink_resolve(struct work_struct *w)
 				}
 			}
 			break;
-
-		case MLO_AN_8023Z:
-			phylink_get_mac_state(pl, &link_state);
-			break;
 		}
 	}
 
@@ -489,15 +500,27 @@ static void phylink_run_resolve(struct phylink *pl)
 
 static const struct sfp_upstream_ops sfp_phylink_ops;
 
-static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
+static int phylink_register_sfp(struct phylink *pl,
+				struct fwnode_handle *fwnode)
 {
-	struct device_node *sfp_np;
+	struct fwnode_reference_args ref;
+	int ret;
 
-	sfp_np = of_parse_phandle(np, "sfp", 0);
-	if (!sfp_np)
+	if (!fwnode)
 		return 0;
 
-	pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl,
+	ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+						 0, 0, &ref);
+	if (ret < 0) {
+		if (ret == -ENOENT)
+			return 0;
+
+		netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
+			   ret);
+		return ret;
+	}
+
+	pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
 					    &sfp_phylink_ops);
 	if (!pl->sfp_bus)
 		return -ENOMEM;
@@ -505,7 +528,22 @@ static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
 	return 0;
 }
 
-struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
+/**
+ * phylink_create() - create a phylink instance
+ * @ndev: a pointer to the &struct net_device
+ * @fwnode: a pointer to a &struct fwnode_handle describing the network
+ *	interface
+ * @iface: the desired link mode defined by &typedef phy_interface_t
+ * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+ *
+ * Create a new phylink instance, and parse the link parameters found in @np.
+ * This will parse in-band modes, fixed-link or SFP configuration.
+ *
+ * Returns a pointer to a &struct phylink, or an error-pointer value. Users
+ * must use IS_ERR() to check for errors from this function.
+ */
+struct phylink *phylink_create(struct net_device *ndev,
+			       struct fwnode_handle *fwnode,
 			       phy_interface_t iface,
 			       const struct phylink_mac_ops *ops)
 {
@@ -521,7 +559,10 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 	pl->netdev = ndev;
 	pl->phy_state.interface = iface;
 	pl->link_interface = iface;
-	pl->link_port = PORT_MII;
+	if (iface == PHY_INTERFACE_MODE_MOCA)
+		pl->link_port = PORT_BNC;
+	else
+		pl->link_port = PORT_MII;
 	pl->link_config.interface = iface;
 	pl->link_config.pause = MLO_PAUSE_AN;
 	pl->link_config.speed = SPEED_UNKNOWN;
@@ -534,21 +575,21 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 	linkmode_copy(pl->link_config.advertising, pl->supported);
 	phylink_validate(pl, pl->supported, &pl->link_config);
 
-	ret = phylink_parse_mode(pl, np);
+	ret = phylink_parse_mode(pl, fwnode);
 	if (ret < 0) {
 		kfree(pl);
 		return ERR_PTR(ret);
 	}
 
 	if (pl->link_an_mode == MLO_AN_FIXED) {
-		ret = phylink_parse_fixedlink(pl, np);
+		ret = phylink_parse_fixedlink(pl, fwnode);
 		if (ret < 0) {
 			kfree(pl);
 			return ERR_PTR(ret);
 		}
 	}
 
-	ret = phylink_register_sfp(pl, np);
+	ret = phylink_register_sfp(pl, fwnode);
 	if (ret < 0) {
 		kfree(pl);
 		return ERR_PTR(ret);
@@ -558,6 +599,13 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(phylink_create);
 
+/**
+ * phylink_destroy() - cleanup and destroy the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Destroy a phylink instance. Any PHY that has been attached must have been
+ * cleaned up via phylink_disconnect_phy() prior to calling this function.
+ */
 void phylink_destroy(struct phylink *pl)
 {
 	if (pl->sfp_bus)
@@ -654,10 +702,39 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
 	return 0;
 }
 
+/**
+ * phylink_connect_phy() - connect a PHY to the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @phy: a pointer to a &struct phy_device.
+ *
+ * Connect @phy to the phylink instance specified by @pl by calling
+ * phy_attach_direct(). Configure the @phy according to the MAC driver's
+ * capabilities, start the PHYLIB state machine and enable any interrupts
+ * that the PHY supports.
+ *
+ * This updates the phylink's ethtool supported and advertising link mode
+ * masks.
+ *
+ * Returns 0 on success or a negative errno.
+ */
 int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 {
 	int ret;
 
+	if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+		    (pl->link_an_mode == MLO_AN_INBAND &&
+		     phy_interface_mode_is_8023z(pl->link_interface))))
+		return -EINVAL;
+
+	if (pl->phydev)
+		return -EBUSY;
+
+	/* Use PHY device/driver interface */
+	if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+		pl->link_interface = phy->interface;
+		pl->link_config.interface = pl->link_interface;
+	}
+
 	ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
 	if (ret)
 		return ret;
@@ -670,14 +747,29 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 }
 EXPORT_SYMBOL_GPL(phylink_connect_phy);
 
-int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
+/**
+ * phylink_of_phy_connect() - connect the PHY specified in the DT mode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @dn: a pointer to a &struct device_node.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified in the device node @dn to the phylink instance
+ * specified by @pl. Actions specified in phylink_connect_phy() will be
+ * performed.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
+			   u32 flags)
 {
 	struct device_node *phy_node;
 	struct phy_device *phy_dev;
 	int ret;
 
-	/* Fixed links are handled without needing a PHY */
-	if (pl->link_an_mode == MLO_AN_FIXED)
+	/* Fixed links and 802.3z are handled without needing a PHY */
+	if (pl->link_an_mode == MLO_AN_FIXED ||
+	    (pl->link_an_mode == MLO_AN_INBAND &&
+	     phy_interface_mode_is_8023z(pl->link_interface)))
 		return 0;
 
 	phy_node = of_parse_phandle(dn, "phy-handle", 0);
@@ -687,14 +779,13 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
 		phy_node = of_parse_phandle(dn, "phy-device", 0);
 
 	if (!phy_node) {
-		if (pl->link_an_mode == MLO_AN_PHY) {
-			netdev_err(pl->netdev, "unable to find PHY node\n");
+		if (pl->link_an_mode == MLO_AN_PHY)
 			return -ENODEV;
-		}
 		return 0;
 	}
 
-	phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface);
+	phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
+				pl->link_interface);
 	/* We're done with the phy_node handle */
 	of_node_put(phy_node);
 
@@ -709,11 +800,18 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
 }
 EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
 
+/**
+ * phylink_disconnect_phy() - disconnect any PHY attached to the phylink
+ *   instance.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disconnect any current PHY from the phylink instance described by @pl.
+ */
 void phylink_disconnect_phy(struct phylink *pl)
 {
 	struct phy_device *phy;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	phy = pl->phydev;
 	if (phy) {
@@ -730,6 +828,40 @@ void phylink_disconnect_phy(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
 
+/**
+ * phylink_fixed_state_cb() - allow setting a fixed link callback
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @cb: callback to execute to determine the fixed link state.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * can be determined through e.g: an out of band MMIO register.
+ */
+int phylink_fixed_state_cb(struct phylink *pl,
+			   void (*cb)(struct net_device *dev,
+				      struct phylink_link_state *state))
+{
+	/* It does not make sense to let the link be overriden unless we use
+	 * MLO_AN_FIXED
+	 */
+	if (pl->link_an_mode != MLO_AN_FIXED)
+		return -EINVAL;
+
+	mutex_lock(&pl->state_mutex);
+	pl->get_fixed_state = cb;
+	mutex_unlock(&pl->state_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
+
+/**
+ * phylink_mac_change() - notify phylink of a change in MAC state
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @up: indicates whether the link is currently up.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * changes (eg, link failure, new negotiation results, etc.)
+ */
 void phylink_mac_change(struct phylink *pl, bool up)
 {
 	if (!up)
@@ -739,9 +871,17 @@ void phylink_mac_change(struct phylink *pl, bool up)
 }
 EXPORT_SYMBOL_GPL(phylink_mac_change);
 
+/**
+ * phylink_start() - start a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Start the phylink instance specified by @pl, configuring the MAC for the
+ * desired link mode(s) and negotiation style. This should be called from the
+ * network device driver's &struct net_device_ops ndo_open() method.
+ */
 void phylink_start(struct phylink *pl)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
 		    phylink_an_mode_str(pl->link_an_mode),
@@ -754,6 +894,12 @@ void phylink_start(struct phylink *pl)
 	phylink_resolve_flow(pl, &pl->link_config);
 	phylink_mac_config(pl, &pl->link_config);
 
+	/* Restart autonegotiation if using 802.3z to ensure that the link
+	 * parameters are properly negotiated.  This is necessary for DSA
+	 * switches using 802.3z negotiation to ensure they see our modes.
+	 */
+	phylink_mac_an_restart(pl);
+
 	clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
 	phylink_run_resolve(pl);
 
@@ -764,9 +910,18 @@ void phylink_start(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_start);
 
+/**
+ * phylink_stop() - stop a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Stop the phylink instance specified by @pl. This should be called from the
+ * network device driver's &struct net_device_ops ndo_stop() method.  The
+ * network device's carrier state should not be changed prior to calling this
+ * function.
+ */
 void phylink_stop(struct phylink *pl)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		phy_stop(pl->phydev);
@@ -779,9 +934,18 @@ void phylink_stop(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
 
+/**
+ * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
+ *
+ * Read the wake on lan parameters from the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is currently attached, report no
+ * support for wake on lan.
+ */
 void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	wol->supported = 0;
 	wol->wolopts = 0;
@@ -791,11 +955,22 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
 
+/**
+ * phylink_ethtool_set_wol() - set wake on lan parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo for the desired parameters
+ *
+ * Set the wake on lan parameters for the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is attached, returns %EOPNOTSUPP
+ * error.
+ *
+ * Returns zero on success or negative errno code.
+ */
 int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
 {
 	int ret = -EOPNOTSUPP;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_ethtool_set_wol(pl->phydev, wol);
@@ -826,12 +1001,21 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
 				AUTONEG_DISABLE;
 }
 
+/**
+ * phylink_ethtool_ksettings_get() - get the current link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings to hold link settings
+ *
+ * Read the current link settings for the phylink instance specified by @pl.
+ * This will be the link settings read from the MAC, PHY or fixed link
+ * settings depending on the current negotiation mode.
+ */
 int phylink_ethtool_ksettings_get(struct phylink *pl,
 				  struct ethtool_link_ksettings *kset)
 {
 	struct phylink_link_state link_state;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev) {
 		phy_ethtool_ksettings_get(pl->phydev, kset);
@@ -851,14 +1035,13 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
 		phylink_get_ksettings(&link_state, kset);
 		break;
 
-	case MLO_AN_SGMII:
+	case MLO_AN_INBAND:
 		/* If there is a phy attached, then use the reported
 		 * settings from the phy with no modification.
 		 */
 		if (pl->phydev)
 			break;
 
-	case MLO_AN_8023Z:
 		phylink_get_mac_state(pl, &link_state);
 
 		/* The MAC is reporting the link results from its own PCS
@@ -873,6 +1056,11 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
 
+/**
+ * phylink_ethtool_ksettings_set() - set the link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings for the desired modes
+ */
 int phylink_ethtool_ksettings_set(struct phylink *pl,
 				  const struct ethtool_link_ksettings *kset)
 {
@@ -880,7 +1068,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 	struct phylink_link_state config;
 	int ret;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (kset->base.autoneg != AUTONEG_DISABLE &&
 	    kset->base.autoneg != AUTONEG_ENABLE)
@@ -967,11 +1155,22 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set);
 
+/**
+ * phylink_ethtool_nway_reset() - restart negotiation
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Restart negotiation for the phylink instance specified by @pl. This will
+ * cause any attached phy to restart negotiation with the link partner, and
+ * if the MAC is in a BaseX mode, the MAC will also be requested to restart
+ * negotiation.
+ *
+ * Returns zero on success, or negative error code.
+ */
 int phylink_ethtool_nway_reset(struct phylink *pl)
 {
 	int ret = 0;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_restart_aneg(pl->phydev);
@@ -981,10 +1180,15 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset);
 
+/**
+ * phylink_ethtool_get_pauseparam() - get the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
 void phylink_ethtool_get_pauseparam(struct phylink *pl,
 				    struct ethtool_pauseparam *pause)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN);
 	pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX);
@@ -992,12 +1196,17 @@ void phylink_ethtool_get_pauseparam(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam);
 
+/**
+ * phylink_ethtool_set_pauseparam() - set the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
 int phylink_ethtool_set_pauseparam(struct phylink *pl,
 				   struct ethtool_pauseparam *pause)
 {
 	struct phylink_link_state *config = &pl->link_config;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (!phylink_test(pl->supported, Pause) &&
 	    !phylink_test(pl->supported, Asym_Pause))
@@ -1030,8 +1239,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
 			phylink_mac_config(pl, config);
 			break;
 
-		case MLO_AN_SGMII:
-		case MLO_AN_8023Z:
+		case MLO_AN_INBAND:
 			phylink_mac_config(pl, config);
 			phylink_mac_an_restart(pl);
 			break;
@@ -1070,24 +1278,21 @@ int phylink_ethtool_get_module_eeprom(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom);
 
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
-	int ret = -EPROTONOSUPPORT;
-
-	WARN_ON(!lockdep_rtnl_is_held());
-
-	if (pl->phydev)
-		ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
+/**
+ * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+ *   counter
+ * @pl: a pointer to a &struct phylink returned from phylink_create().
+ *
+ * Read the Energy Efficient Ethernet error counter from the PHY associated
+ * with the phylink instance specified by @pl.
+ *
+ * Returns positive error counter value, or negative error code.
+ */
 int phylink_get_eee_err(struct phylink *pl)
 {
 	int ret = 0;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_get_eee_err(pl->phydev);
@@ -1096,11 +1301,16 @@ int phylink_get_eee_err(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_get_eee_err);
 
+/**
+ * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ */
 int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
 {
 	int ret = -EOPNOTSUPP;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_ethtool_get_eee(pl->phydev, eee);
@@ -1109,11 +1319,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
 
+/**
+ * phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ */
 int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
 {
 	int ret = -EOPNOTSUPP;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_ethtool_set_eee(pl->phydev, eee);
@@ -1248,9 +1463,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
 	case MLO_AN_PHY:
 		return -EOPNOTSUPP;
 
-	case MLO_AN_SGMII:
-		/* No phy, fall through to 8023z method */
-	case MLO_AN_8023Z:
+	case MLO_AN_INBAND:
 		if (phy_id == 0) {
 			val = phylink_get_mac_state(pl, &state);
 			if (val < 0)
@@ -1275,24 +1488,40 @@ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
 	case MLO_AN_PHY:
 		return -EOPNOTSUPP;
 
-	case MLO_AN_SGMII:
-		/* No phy, fall through to 8023z method */
-	case MLO_AN_8023Z:
+	case MLO_AN_INBAND:
 		break;
 	}
 
 	return 0;
 }
 
+/**
+ * phylink_mii_ioctl() - generic mii ioctl interface
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @ifr: a pointer to a &struct ifreq for socket ioctls
+ * @cmd: ioctl cmd to execute
+ *
+ * Perform the specified MII ioctl on the PHY attached to the phylink instance
+ * specified by @pl. If no PHY is attached, emulate the presence of the PHY.
+ *
+ * Returns: zero on success or negative error code.
+ *
+ * %SIOCGMIIPHY:
+ *  read register from the current PHY.
+ * %SIOCGMIIREG:
+ *  read register from the specified PHY.
+ * %SIOCSMIIREG:
+ *  set a register on the specified PHY.
+ */
 int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
 {
 	struct mii_ioctl_data *mii = if_mii(ifr);
 	int  ret;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev) {
-		/* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */
+		/* PHYs only exist for MLO_AN_PHY and SGMII */
 		switch (cmd) {
 		case SIOCGMIIPHY:
 			mii->phy_id = pl->phydev->mdio.addr;
@@ -1351,7 +1580,7 @@ static int phylink_sfp_module_insert(void *upstream,
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
 	struct phylink_link_state config;
 	phy_interface_t iface;
-	int mode, ret = 0;
+	int ret = 0;
 	bool changed;
 	u8 port;
 
@@ -1359,14 +1588,13 @@ static int phylink_sfp_module_insert(void *upstream,
 	port = sfp_parse_port(pl->sfp_bus, id, support);
 	iface = sfp_parse_interface(pl->sfp_bus, id);
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	switch (iface) {
 	case PHY_INTERFACE_MODE_SGMII:
-		mode = MLO_AN_SGMII;
-		break;
 	case PHY_INTERFACE_MODE_1000BASEX:
-		mode = MLO_AN_8023Z;
+	case PHY_INTERFACE_MODE_2500BASEX:
+	case PHY_INTERFACE_MODE_10GKR:
 		break;
 	default:
 		return -EINVAL;
@@ -1384,16 +1612,18 @@ static int phylink_sfp_module_insert(void *upstream,
 	ret = phylink_validate(pl, support, &config);
 	if (ret) {
 		netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
-			   phylink_an_mode_str(mode), phy_modes(config.interface),
+			   phylink_an_mode_str(MLO_AN_INBAND),
+			   phy_modes(config.interface),
 			   __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
 		return ret;
 	}
 
 	netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
-		   phylink_an_mode_str(mode), phy_modes(config.interface),
+		   phylink_an_mode_str(MLO_AN_INBAND),
+		   phy_modes(config.interface),
 		   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
 
-	if (mode == MLO_AN_8023Z && pl->phydev)
+	if (phy_interface_mode_is_8023z(iface) && pl->phydev)
 		return -EINVAL;
 
 	changed = !bitmap_equal(pl->supported, support,
@@ -1403,15 +1633,15 @@ static int phylink_sfp_module_insert(void *upstream,
 		linkmode_copy(pl->link_config.advertising, config.advertising);
 	}
 
-	if (pl->link_an_mode != mode ||
+	if (pl->link_an_mode != MLO_AN_INBAND ||
 	    pl->link_config.interface != config.interface) {
 		pl->link_config.interface = config.interface;
-		pl->link_an_mode = mode;
+		pl->link_an_mode = MLO_AN_INBAND;
 
 		changed = true;
 
 		netdev_info(pl->netdev, "switched to %s/%s link mode\n",
-			    phylink_an_mode_str(mode),
+			    phylink_an_mode_str(MLO_AN_INBAND),
 			    phy_modes(config.interface));
 	}
 
@@ -1428,7 +1658,7 @@ static void phylink_sfp_link_down(void *upstream)
 {
 	struct phylink *pl = upstream;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
 	queue_work(system_power_efficient_wq, &pl->resolve);
@@ -1439,7 +1669,7 @@ static void phylink_sfp_link_up(void *upstream)
 {
 	struct phylink *pl = upstream;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
 	phylink_run_resolve(pl);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index dbef8002bc28..889a4dce1648 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -118,8 +118,6 @@ static struct phy_driver qs6612_driver[] = { {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= qs6612_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= qs6612_ack_interrupt,
 	.config_intr	= qs6612_config_intr,
 } };
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index eda0a6e86918..ee3ca4a2f12b 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -13,29 +13,44 @@
  * option) any later version.
  *
  */
+#include <linux/bitops.h>
 #include <linux/phy.h>
 #include <linux/module.h>
 
-#define RTL821x_PHYSR		0x11
-#define RTL821x_PHYSR_DUPLEX	0x2000
-#define RTL821x_PHYSR_SPEED	0xc000
-#define RTL821x_INER		0x12
-#define RTL821x_INER_INIT	0x6400
-#define RTL821x_INSR		0x13
-#define RTL821x_PAGE_SELECT	0x1f
-#define RTL8211E_INER_LINK_STATUS 0x400
+#define RTL821x_PHYSR				0x11
+#define RTL821x_PHYSR_DUPLEX			BIT(13)
+#define RTL821x_PHYSR_SPEED			GENMASK(15, 14)
 
-#define RTL8211F_INER_LINK_STATUS 0x0010
-#define RTL8211F_INSR		0x1d
-#define RTL8211F_TX_DELAY	0x100
+#define RTL821x_INER				0x12
+#define RTL8211B_INER_INIT			0x6400
+#define RTL8211E_INER_LINK_STATUS		BIT(10)
+#define RTL8211F_INER_LINK_STATUS		BIT(4)
 
-#define RTL8201F_ISR		0x1e
-#define RTL8201F_IER		0x13
+#define RTL821x_INSR				0x13
+
+#define RTL821x_PAGE_SELECT			0x1f
+
+#define RTL8211F_INSR				0x1d
+
+#define RTL8211F_TX_DELAY			BIT(8)
+
+#define RTL8201F_ISR				0x1e
+#define RTL8201F_IER				0x13
 
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
 MODULE_LICENSE("GPL");
 
+static int rtl821x_read_page(struct phy_device *phydev)
+{
+	return __phy_read(phydev, RTL821x_PAGE_SELECT);
+}
+
+static int rtl821x_write_page(struct phy_device *phydev, int page)
+{
+	return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
+}
+
 static int rtl8201_ack_interrupt(struct phy_device *phydev)
 {
 	int err;
@@ -58,31 +73,21 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
 {
 	int err;
 
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43);
-	err = phy_read(phydev, RTL8211F_INSR);
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+	err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
 
 	return (err < 0) ? err : 0;
 }
 
 static int rtl8201_config_intr(struct phy_device *phydev)
 {
-	int err;
-
-	/* switch to page 7 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x7);
+	u16 val;
 
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, RTL8201F_IER,
-				BIT(13) | BIT(12) | BIT(11));
+		val = BIT(13) | BIT(12) | BIT(11);
 	else
-		err = phy_write(phydev, RTL8201F_IER, 0);
-
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+		val = 0;
 
-	return err;
+	return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
 }
 
 static int rtl8211b_config_intr(struct phy_device *phydev)
@@ -91,7 +96,7 @@ static int rtl8211b_config_intr(struct phy_device *phydev)
 
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
 		err = phy_write(phydev, RTL821x_INER,
-				RTL821x_INER_INIT);
+				RTL8211B_INER_INIT);
 	else
 		err = phy_write(phydev, RTL821x_INER, 0);
 
@@ -113,43 +118,31 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
 
 static int rtl8211f_config_intr(struct phy_device *phydev)
 {
-	int err;
+	u16 val;
 
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42);
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, RTL821x_INER,
-				RTL8211F_INER_LINK_STATUS);
+		val = RTL8211F_INER_LINK_STATUS;
 	else
-		err = phy_write(phydev, RTL821x_INER, 0);
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0);
+		val = 0;
 
-	return err;
+	return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
 }
 
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
 	int ret;
-	u16 reg;
+	u16 val = 0;
 
 	ret = genphy_config_init(phydev);
 	if (ret < 0)
 		return ret;
 
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08);
-	reg = phy_read(phydev, 0x11);
-
 	/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
-		reg |= RTL8211F_TX_DELAY;
-	else
-		reg &= ~RTL8211F_TX_DELAY;
-
-	phy_write(phydev, 0x11, reg);
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+		val = RTL8211F_TX_DELAY;
 
-	return 0;
+	return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
 }
 
 static struct phy_driver realtek_drvs[] = {
@@ -159,28 +152,24 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask    = 0x0000ffff,
 		.features       = PHY_BASIC_FEATURES,
 		.flags          = PHY_HAS_INTERRUPT,
-		.config_aneg    = &genphy_config_aneg,
-		.read_status    = &genphy_read_status,
 	}, {
 		.phy_id		= 0x001cc816,
 		.name		= "RTL8201F 10/100Mbps Ethernet",
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_BASIC_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl8201_ack_interrupt,
 		.config_intr	= &rtl8201_config_intr,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
+		.read_page	= rtl821x_read_page,
+		.write_page	= rtl821x_write_page,
 	}, {
 		.phy_id		= 0x001cc912,
 		.name		= "RTL8211B Gigabit Ethernet",
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl821x_ack_interrupt,
 		.config_intr	= &rtl8211b_config_intr,
 	}, {
@@ -189,8 +178,6 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= rtl821x_ack_interrupt,
 		.config_intr	= rtl8211e_config_intr,
 		.suspend	= genphy_suspend,
@@ -201,8 +188,6 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl821x_ack_interrupt,
 		.config_intr	= &rtl8211e_config_intr,
 		.suspend	= genphy_suspend,
@@ -213,13 +198,13 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
 		.config_init	= &rtl8211f_config_init,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl8211f_ack_interrupt,
 		.config_intr	= &rtl8211f_config_intr,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
+		.read_page	= rtl821x_read_page,
+		.write_page	= rtl821x_write_page,
 	},
 };
 
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index c092af137056..f1da70b9b55f 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -213,7 +213,6 @@ static struct phy_driver rockchip_phy_driver[] = {
 	.soft_reset		= genphy_soft_reset,
 	.config_init		= rockchip_integrated_phy_config_init,
 	.config_aneg		= rockchip_config_aneg,
-	.read_status		= genphy_read_status,
 	.suspend		= genphy_suspend,
 	.resume			= rockchip_phy_resume,
 },
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ab64a142b832..8961209ee949 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -8,10 +8,14 @@
 
 #include "sfp.h"
 
+/**
+ * struct sfp_bus - internal representation of a sfp bus
+ */
 struct sfp_bus {
+	/* private: */
 	struct kref kref;
 	struct list_head node;
-	struct device_node *device_node;
+	struct fwnode_handle *fwnode;
 
 	const struct sfp_socket_ops *socket_ops;
 	struct device *sfp_dev;
@@ -26,6 +30,20 @@ struct sfp_bus {
 	bool started;
 };
 
+/**
+ * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: optional pointer to an array of unsigned long for the
+ *   ethtool support mask
+ *
+ * Parse the EEPROM identification given in @id, and return one of
+ * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
+ * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
+ * the connector type.
+ *
+ * If the port type is not known, returns %PORT_OTHER.
+ */
 int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		   unsigned long *support)
 {
@@ -39,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 	case SFP_CONNECTOR_MT_RJ:
 	case SFP_CONNECTOR_MU:
 	case SFP_CONNECTOR_OPTICAL_PIGTAIL:
-		if (support)
-			phylink_set(support, FIBRE);
 		port = PORT_FIBRE;
 		break;
 
 	case SFP_CONNECTOR_RJ45:
-		if (support)
-			phylink_set(support, TP);
 		port = PORT_TP;
 		break;
 
+	case SFP_CONNECTOR_COPPER_PIGTAIL:
+		port = PORT_DA;
+		break;
+
 	case SFP_CONNECTOR_UNSPEC:
 		if (id->base.e1000_base_t) {
-			if (support)
-				phylink_set(support, TP);
 			port = PORT_TP;
 			break;
 		}
@@ -62,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 	case SFP_CONNECTOR_MPO_1X12:
 	case SFP_CONNECTOR_MPO_2X16:
 	case SFP_CONNECTOR_HSSDC_II:
-	case SFP_CONNECTOR_COPPER_PIGTAIL:
 	case SFP_CONNECTOR_NOSEPARATE:
 	case SFP_CONNECTOR_MXC_2X16:
 		port = PORT_OTHER;
@@ -74,10 +89,40 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		break;
 	}
 
+	if (support) {
+		switch (port) {
+		case PORT_FIBRE:
+			phylink_set(support, FIBRE);
+			break;
+
+		case PORT_TP:
+			phylink_set(support, TP);
+			break;
+		}
+	}
+
 	return port;
 }
 EXPORT_SYMBOL_GPL(sfp_parse_port);
 
+/**
+ * sfp_parse_interface() - Parse the phy_interface_t
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Derive the phy_interface_t mode for the information found in the
+ * module's identifying EEPROM. There is no standard or defined way
+ * to derive this information, so we use some heuristics.
+ *
+ * If the encoding is 64b66b, then the module must be >= 10G, so
+ * return %PHY_INTERFACE_MODE_10GKR.
+ *
+ * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre
+ * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return
+ * %PHY_INTERFACE_MODE_SGMII mode.
+ *
+ * If the encoding is not known, return %PHY_INTERFACE_MODE_NA.
+ */
 phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
 				    const struct sfp_eeprom_id *id)
 {
@@ -107,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
 		break;
 
 	default:
+		if (id->base.e1000_base_cx) {
+			iface = PHY_INTERFACE_MODE_1000BASEX;
+			break;
+		}
+
 		iface = PHY_INTERFACE_MODE_NA;
 		dev_err(bus->sfp_dev,
 			"SFP module encoding does not support 8b10b nor 64b66b\n");
@@ -117,13 +167,38 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
 }
 EXPORT_SYMBOL_GPL(sfp_parse_interface);
 
+/**
+ * sfp_parse_support() - Parse the eeprom id for supported link modes
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: pointer to an array of unsigned long for the ethtool support mask
+ *
+ * Parse the EEPROM identification information and derive the supported
+ * ethtool link modes for the module.
+ */
 void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		       unsigned long *support)
 {
+	unsigned int br_min, br_nom, br_max;
+
 	phylink_set(support, Autoneg);
 	phylink_set(support, Pause);
 	phylink_set(support, Asym_Pause);
 
+	/* Decode the bitrate information to MBd */
+	br_min = br_nom = br_max = 0;
+	if (id->base.br_nominal) {
+		if (id->base.br_nominal != 255) {
+			br_nom = id->base.br_nominal * 100;
+			br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+			br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+		} else if (id->ext.br_max) {
+			br_nom = 250 * id->ext.br_max;
+			br_max = br_nom + br_nom * id->ext.br_min / 100;
+			br_min = br_nom - br_nom * id->ext.br_min / 100;
+		}
+	}
+
 	/* Set ethtool support from the compliance fields. */
 	if (id->base.e10g_base_sr)
 		phylink_set(support, 10000baseSR_Full);
@@ -142,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		phylink_set(support, 1000baseT_Full);
 	}
 
+	/* 1000Base-PX or 1000Base-BX10 */
+	if ((id->base.e_base_px || id->base.e_base_bx10) &&
+	    br_min <= 1300 && br_max >= 1200)
+		phylink_set(support, 1000baseX_Full);
+
+	/* For active or passive cables, select the link modes
+	 * based on the bit rates and the cable compliance bytes.
+	 */
+	if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
+		/* This may look odd, but some manufacturers use 12000MBd */
+		if (br_min <= 12000 && br_max >= 10300)
+			phylink_set(support, 10000baseCR_Full);
+		if (br_min <= 3200 && br_max >= 3100)
+			phylink_set(support, 2500baseX_Full);
+		if (br_min <= 1300 && br_max >= 1200)
+			phylink_set(support, 1000baseX_Full);
+	}
+	if (id->base.sfp_ct_passive) {
+		if (id->base.passive.sff8431_app_e)
+			phylink_set(support, 10000baseCR_Full);
+	}
+	if (id->base.sfp_ct_active) {
+		if (id->base.active.sff8431_app_e ||
+		    id->base.active.sff8431_lim) {
+			phylink_set(support, 10000baseCR_Full);
+		}
+	}
+
 	switch (id->base.extended_cc) {
 	case 0x00: /* Unspecified */
 		break;
@@ -175,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		if (id->base.br_nominal >= 12)
 			phylink_set(support, 1000baseX_Full);
 	}
-
-	switch (id->base.connector) {
-	case SFP_CONNECTOR_SC:
-	case SFP_CONNECTOR_FIBERJACK:
-	case SFP_CONNECTOR_LC:
-	case SFP_CONNECTOR_MT_RJ:
-	case SFP_CONNECTOR_MU:
-	case SFP_CONNECTOR_OPTICAL_PIGTAIL:
-		break;
-
-	case SFP_CONNECTOR_UNSPEC:
-		if (id->base.e1000_base_t)
-			break;
-
-	case SFP_CONNECTOR_SG: /* guess */
-	case SFP_CONNECTOR_MPO_1X12:
-	case SFP_CONNECTOR_MPO_2X16:
-	case SFP_CONNECTOR_HSSDC_II:
-	case SFP_CONNECTOR_COPPER_PIGTAIL:
-	case SFP_CONNECTOR_NOSEPARATE:
-	case SFP_CONNECTOR_MXC_2X16:
-	default:
-		/* a guess at the supported link modes */
-		dev_warn(bus->sfp_dev,
-			 "Guessing link modes, please report...\n");
-		phylink_set(support, 1000baseT_Half);
-		phylink_set(support, 1000baseT_Full);
-		break;
-	}
 }
 EXPORT_SYMBOL_GPL(sfp_parse_support);
 
@@ -215,7 +289,7 @@ static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus)
 	return bus->registered ? bus->upstream_ops : NULL;
 }
 
-static struct sfp_bus *sfp_bus_get(struct device_node *np)
+static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode)
 {
 	struct sfp_bus *sfp, *new, *found = NULL;
 
@@ -224,7 +298,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
 	mutex_lock(&sfp_mutex);
 
 	list_for_each_entry(sfp, &sfp_buses, node) {
-		if (sfp->device_node == np) {
+		if (sfp->fwnode == fwnode) {
 			kref_get(&sfp->kref);
 			found = sfp;
 			break;
@@ -233,7 +307,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
 
 	if (!found && new) {
 		kref_init(&new->kref);
-		new->device_node = np;
+		new->fwnode = fwnode;
 		list_add(&new->node, &sfp_buses);
 		found = new;
 		new = NULL;
@@ -246,7 +320,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
 	return found;
 }
 
-static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex)
+static void sfp_bus_release(struct kref *kref)
 {
 	struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref);
 
@@ -293,6 +367,16 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
 	bus->registered = false;
 }
 
+/**
+ * sfp_get_module_info() - Get the ethtool_modinfo for a SFP module
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @modinfo: a &struct ethtool_modinfo
+ *
+ * Fill in the type and eeprom_len parameters in @modinfo for a module on
+ * the sfp bus specified by @bus.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
 int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
 {
 	if (!bus->registered)
@@ -301,6 +385,17 @@ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
 }
 EXPORT_SYMBOL_GPL(sfp_get_module_info);
 
+/**
+ * sfp_get_module_eeprom() - Read the SFP module EEPROM
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @ee: a &struct ethtool_eeprom
+ * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes)
+ *
+ * Read the EEPROM as specified by the supplied @ee. See the documentation
+ * for &struct ethtool_eeprom for the region to be read.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
 int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
 			  u8 *data)
 {
@@ -310,6 +405,15 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
 }
 EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
 
+/**
+ * sfp_upstream_start() - Inform the SFP that the network device is up
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be enabled by allowing TX_DISABLE to be deasserted. This
+ * should be called from the network device driver's &struct net_device_ops
+ * ndo_open() method.
+ */
 void sfp_upstream_start(struct sfp_bus *bus)
 {
 	if (bus->registered)
@@ -318,6 +422,15 @@ void sfp_upstream_start(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_start);
 
+/**
+ * sfp_upstream_stop() - Inform the SFP that the network device is down
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be disabled by asserting TX_DISABLE, disabling the laser
+ * in optical modules. This should be called from the network device
+ * driver's &struct net_device_ops ndo_stop() method.
+ */
 void sfp_upstream_stop(struct sfp_bus *bus)
 {
 	if (bus->registered)
@@ -326,11 +439,24 @@ void sfp_upstream_stop(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_stop);
 
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+/**
+ * sfp_register_upstream() - Register the neighbouring device
+ * @fwnode: firmware node for the SFP bus
+ * @ndev: network device associated with the interface
+ * @upstream: the upstream private data
+ * @ops: the upstream's &struct sfp_upstream_ops
+ *
+ * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
+ * should use phylink, which will call this function for them. Returns
+ * a pointer to the allocated &struct sfp_bus.
+ *
+ * On error, returns %NULL.
+ */
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
 				      struct net_device *ndev, void *upstream,
 				      const struct sfp_upstream_ops *ops)
 {
-	struct sfp_bus *bus = sfp_bus_get(np);
+	struct sfp_bus *bus = sfp_bus_get(fwnode);
 	int ret = 0;
 
 	if (bus) {
@@ -353,6 +479,13 @@ struct sfp_bus *sfp_register_upstream(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(sfp_register_upstream);
 
+/**
+ * sfp_unregister_upstream() - Unregister sfp bus
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Unregister a previously registered upstream connection for the SFP
+ * module. @bus is returned from sfp_register_upstream().
+ */
 void sfp_unregister_upstream(struct sfp_bus *bus)
 {
 	rtnl_lock();
@@ -434,7 +567,7 @@ EXPORT_SYMBOL_GPL(sfp_module_remove);
 struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
 				    const struct sfp_socket_ops *ops)
 {
-	struct sfp_bus *bus = sfp_bus_get(dev->of_node);
+	struct sfp_bus *bus = sfp_bus_get(dev->fwnode);
 	int ret = 0;
 
 	if (bus) {
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 9dfc1c4c954f..6c7d9289078d 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -98,12 +98,18 @@ static const enum gpiod_flags gpio_flags[] = {
 
 static DEFINE_MUTEX(sfp_mutex);
 
+struct sff_data {
+	unsigned int gpios;
+	bool (*module_supported)(const struct sfp_eeprom_id *id);
+};
+
 struct sfp {
 	struct device *dev;
 	struct i2c_adapter *i2c;
 	struct mii_bus *i2c_mii;
 	struct sfp_bus *sfp_bus;
 	struct phy_device *mod_phy;
+	const struct sff_data *type;
 
 	unsigned int (*get_state)(struct sfp *);
 	void (*set_state)(struct sfp *, unsigned int);
@@ -123,6 +129,36 @@ struct sfp {
 	struct sfp_eeprom_id id;
 };
 
+static bool sff_module_supported(const struct sfp_eeprom_id *id)
+{
+	return id->base.phys_id == SFP_PHYS_ID_SFF &&
+	       id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sff_data = {
+	.gpios = SFP_F_LOS | SFP_F_TX_FAULT | SFP_F_TX_DISABLE,
+	.module_supported = sff_module_supported,
+};
+
+static bool sfp_module_supported(const struct sfp_eeprom_id *id)
+{
+	return id->base.phys_id == SFP_PHYS_ID_SFP &&
+	       id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sfp_data = {
+	.gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT |
+		 SFP_F_TX_DISABLE | SFP_F_RATE_SELECT,
+	.module_supported = sfp_module_supported,
+};
+
+static const struct of_device_id sfp_of_match[] = {
+	{ .compatible = "sff,sff", .data = &sff_data, },
+	{ .compatible = "sff,sfp", .data = &sfp_data, },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sfp_of_match);
+
 static unsigned long poll_jiffies;
 
 static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -141,6 +177,11 @@ static unsigned int sfp_gpio_get_state(struct sfp *sfp)
 	return state;
 }
 
+static unsigned int sff_gpio_get_state(struct sfp *sfp)
+{
+	return sfp_gpio_get_state(sfp) | SFP_F_PRESENT;
+}
+
 static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
 {
 	if (state & SFP_F_PRESENT) {
@@ -315,12 +356,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
 	msleep(T_PHY_RESET_MS);
 
 	phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
-	if (IS_ERR(phy)) {
-		dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+	if (phy == ERR_PTR(-ENODEV)) {
+		dev_info(sfp->dev, "no PHY detected\n");
 		return;
 	}
-	if (!phy) {
-		dev_info(sfp->dev, "no PHY detected\n");
+	if (IS_ERR(phy)) {
+		dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
 		return;
 	}
 
@@ -425,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
 {
 	/* SFP module inserted - read I2C data */
 	struct sfp_eeprom_id id;
-	char vendor[17];
-	char part[17];
-	char sn[17];
-	char date[9];
-	char rev[5];
 	u8 check;
 	int err;
 
@@ -465,24 +501,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
 
 	sfp->id = id;
 
-	memcpy(vendor, sfp->id.base.vendor_name, 16);
-	vendor[16] = '\0';
-	memcpy(part, sfp->id.base.vendor_pn, 16);
-	part[16] = '\0';
-	memcpy(rev, sfp->id.base.vendor_rev, 4);
-	rev[4] = '\0';
-	memcpy(sn, sfp->id.ext.vendor_sn, 16);
-	sn[16] = '\0';
-	memcpy(date, sfp->id.ext.datecode, 8);
-	date[8] = '\0';
-
-	dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n",
-		 vendor, part, rev, sn, date);
-
-	/* We only support SFP modules, not the legacy GBIC modules. */
-	if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP ||
-	    sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) {
-		dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n",
+	dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n",
+		 (int)sizeof(id.base.vendor_name), id.base.vendor_name,
+		 (int)sizeof(id.base.vendor_pn), id.base.vendor_pn,
+		 (int)sizeof(id.base.vendor_rev), id.base.vendor_rev,
+		 (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn,
+		 (int)sizeof(id.ext.datecode), id.ext.datecode);
+
+	/* Check whether we support this module */
+	if (!sfp->type->module_supported(&sfp->id)) {
+		dev_err(sfp->dev,
+			"module is not supported - phys id 0x%02x 0x%02x\n",
 			sfp->id.base.phys_id, sfp->id.base.phys_ext_id);
 		return -EINVAL;
 	}
@@ -683,20 +712,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
 		len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN);
 		len -= first;
 
-		ret = sfp->read(sfp, false, first, data, len);
+		ret = sfp_read(sfp, false, first, data, len);
 		if (ret < 0)
 			return ret;
 
 		first += len;
 		data += len;
 	}
-	if (first >= ETH_MODULE_SFF_8079_LEN &&
-	    first < ETH_MODULE_SFF_8472_LEN) {
+	if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
 		len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN);
 		len -= first;
 		first -= ETH_MODULE_SFF_8079_LEN;
 
-		ret = sfp->read(sfp, true, first, data, len);
+		ret = sfp_read(sfp, true, first, data, len);
 		if (ret < 0)
 			return ret;
 	}
@@ -801,6 +829,7 @@ static void sfp_cleanup(void *data)
 
 static int sfp_probe(struct platform_device *pdev)
 {
+	const struct sff_data *sff;
 	struct sfp *sfp;
 	bool poll = false;
 	int irq, err, i;
@@ -815,10 +844,19 @@ static int sfp_probe(struct platform_device *pdev)
 	if (err < 0)
 		return err;
 
+	sff = sfp->type = &sfp_data;
+
 	if (pdev->dev.of_node) {
 		struct device_node *node = pdev->dev.of_node;
+		const struct of_device_id *id;
 		struct device_node *np;
 
+		id = of_match_node(sfp_of_match, node);
+		if (WARN_ON(!id))
+			return -EINVAL;
+
+		sff = sfp->type = id->data;
+
 		np = of_parse_phandle(node, "i2c-bus", 0);
 		if (np) {
 			struct i2c_adapter *i2c;
@@ -834,17 +872,22 @@ static int sfp_probe(struct platform_device *pdev)
 				return err;
 			}
 		}
+	}
 
-		for (i = 0; i < GPIO_MAX; i++) {
+	for (i = 0; i < GPIO_MAX; i++)
+		if (sff->gpios & BIT(i)) {
 			sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev,
 					   gpio_of_names[i], gpio_flags[i]);
 			if (IS_ERR(sfp->gpio[i]))
 				return PTR_ERR(sfp->gpio[i]);
 		}
 
-		sfp->get_state = sfp_gpio_get_state;
-		sfp->set_state = sfp_gpio_set_state;
-	}
+	sfp->get_state = sfp_gpio_get_state;
+	sfp->set_state = sfp_gpio_set_state;
+
+	/* Modules that have no detect signal are always present */
+	if (!(sfp->gpio[GPIO_MODDEF0]))
+		sfp->get_state = sff_gpio_get_state;
 
 	sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
 	if (!sfp->sfp_bus)
@@ -899,12 +942,6 @@ static int sfp_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static const struct of_device_id sfp_of_match[] = {
-	{ .compatible = "sff,sfp", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, sfp_of_match);
-
 static struct platform_driver sfp_driver = {
 	.probe = sfp_probe,
 	.remove = sfp_remove,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2306bfae057f..be399d645224 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -227,8 +227,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
 
@@ -249,8 +247,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
 
@@ -276,7 +272,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= lan87xx_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
@@ -303,8 +298,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.config_init	= lan911x_config_init,
 
 	/* IRQ related */
@@ -319,12 +312,11 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN8710/LAN8720",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
+	.flags		= PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
 
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= lan87xx_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
@@ -351,7 +343,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= lan87xx_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index d00cfb64529e..fbd548a1ad84 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -89,8 +89,6 @@ static struct phy_driver ste10xp_pdriver[] = {
 	.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
 	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ste10Xp_config_init,
-	.config_aneg = genphy_config_aneg,
-	.read_status = genphy_read_status,
 	.ack_interrupt = ste10Xp_ack_interrupt,
 	.config_intr = ste10Xp_config_intr,
 	.suspend = genphy_suspend,
@@ -102,8 +100,6 @@ static struct phy_driver ste10xp_pdriver[] = {
 	.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
 	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ste10Xp_config_init,
-	.config_aneg = genphy_config_aneg,
-	.read_status = genphy_read_status,
 	.ack_interrupt = ste10Xp_ack_interrupt,
 	.config_intr = ste10Xp_config_intr,
 	.suspend = genphy_suspend,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 96b33475ea5e..55f48ee3595a 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -95,7 +95,6 @@ static struct phy_driver upd60620_driver[1] = { {
 	.features       = PHY_BASIC_FEATURES,
 	.flags          = 0,
 	.config_init    = upd60620_config_init,
-	.config_aneg    = genphy_config_aneg,
 	.read_status    = upd60620_read_status,
 } };
 
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index f78ff0279648..d9dd8fbfffc7 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -267,7 +267,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -278,7 +277,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc824x_config_init,
 	.config_aneg	= &vsc82x4_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 }, {
@@ -289,7 +287,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc824x_config_init,
 	.config_aneg	= &vsc82x4_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 }, {
@@ -300,7 +297,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -311,7 +307,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -321,8 +316,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.features       = PHY_GBIT_FEATURES,
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc8601_config_init,
-	.config_aneg    = &genphy_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -333,7 +326,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -344,8 +336,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc8221_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 }, {
@@ -356,8 +346,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc8221_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 } };
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc63102ca96e..8940417c30e5 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -731,7 +731,7 @@ static void sl_sync(void)
 
 
 /* Find a free SLIP channel, and link in this `tty' line. */
-static struct slip *sl_alloc(dev_t line)
+static struct slip *sl_alloc(void)
 {
 	int i;
 	char name[IFNAMSIZ];
@@ -809,7 +809,7 @@ static int slip_open(struct tty_struct *tty)
 
 	/* OK.  Find a free SLIP channel to use. */
 	err = -ENFILE;
-	sl = sl_alloc(tty_devnum(tty));
+	sl = sl_alloc();
 	if (sl == NULL)
 		goto err_exit;
 
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index f39c6f876e67..0a5ed004781c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -330,9 +330,6 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 	if (!q)
 		return RX_HANDLER_PASS;
 
-	if (__skb_array_full(&q->skb_array))
-		goto drop;
-
 	skb_push(skb, ETH_HLEN);
 
 	/* Apply the forward feature mask so that we perform segmentation
@@ -348,7 +345,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 			goto drop;
 
 		if (!segs) {
-			if (skb_array_produce(&q->skb_array, skb))
+			if (ptr_ring_produce(&q->ring, skb))
 				goto drop;
 			goto wake_up;
 		}
@@ -358,7 +355,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 			struct sk_buff *nskb = segs->next;
 
 			segs->next = NULL;
-			if (skb_array_produce(&q->skb_array, segs)) {
+			if (ptr_ring_produce(&q->ring, segs)) {
 				kfree_skb(segs);
 				kfree_skb_list(nskb);
 				break;
@@ -375,7 +372,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 		    !(features & NETIF_F_CSUM_MASK) &&
 		    skb_checksum_help(skb))
 			goto drop;
-		if (skb_array_produce(&q->skb_array, skb))
+		if (ptr_ring_produce(&q->ring, skb))
 			goto drop;
 	}
 
@@ -497,7 +494,7 @@ static void tap_sock_destruct(struct sock *sk)
 {
 	struct tap_queue *q = container_of(sk, struct tap_queue, sk);
 
-	skb_array_cleanup(&q->skb_array);
+	ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
 }
 
 static int tap_open(struct inode *inode, struct file *file)
@@ -517,7 +514,7 @@ static int tap_open(struct inode *inode, struct file *file)
 					     &tap_proto, 0);
 	if (!q)
 		goto err;
-	if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+	if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
 		sk_free(&q->sk);
 		goto err;
 	}
@@ -546,7 +543,7 @@ static int tap_open(struct inode *inode, struct file *file)
 
 	err = tap_set_queue(tap, file, q);
 	if (err) {
-		/* tap_sock_destruct() will take care of freeing skb_array */
+		/* tap_sock_destruct() will take care of freeing ptr_ring */
 		goto err_put;
 	}
 
@@ -583,7 +580,7 @@ static __poll_t tap_poll(struct file *file, poll_table *wait)
 	mask = 0;
 	poll_wait(file, &q->wq.wait, wait);
 
-	if (!skb_array_empty(&q->skb_array))
+	if (!ptr_ring_empty(&q->ring))
 		mask |= POLLIN | POLLRDNORM;
 
 	if (sock_writeable(&q->sk) ||
@@ -844,7 +841,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
 					TASK_INTERRUPTIBLE);
 
 		/* Read frames from the queue */
-		skb = skb_array_consume(&q->skb_array);
+		skb = ptr_ring_consume(&q->ring);
 		if (skb)
 			break;
 		if (noblock) {
@@ -1176,7 +1173,7 @@ static int tap_peek_len(struct socket *sock)
 {
 	struct tap_queue *q = container_of(sock, struct tap_queue,
 					       sock);
-	return skb_array_peek_len(&q->skb_array);
+	return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
 }
 
 /* Ops structure to mimic raw sockets with tun */
@@ -1202,7 +1199,7 @@ struct socket *tap_get_socket(struct file *file)
 }
 EXPORT_SYMBOL_GPL(tap_get_socket);
 
-struct skb_array *tap_get_skb_array(struct file *file)
+struct ptr_ring *tap_get_ptr_ring(struct file *file)
 {
 	struct tap_queue *q;
 
@@ -1211,29 +1208,30 @@ struct skb_array *tap_get_skb_array(struct file *file)
 	q = file->private_data;
 	if (!q)
 		return ERR_PTR(-EBADFD);
-	return &q->skb_array;
+	return &q->ring;
 }
-EXPORT_SYMBOL_GPL(tap_get_skb_array);
+EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
 
 int tap_queue_resize(struct tap_dev *tap)
 {
 	struct net_device *dev = tap->dev;
 	struct tap_queue *q;
-	struct skb_array **arrays;
+	struct ptr_ring **rings;
 	int n = tap->numqueues;
 	int ret, i = 0;
 
-	arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
-	if (!arrays)
+	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+	if (!rings)
 		return -ENOMEM;
 
 	list_for_each_entry(q, &tap->queue_list, next)
-		arrays[i++] = &q->skb_array;
+		rings[i++] = &q->ring;
 
-	ret = skb_array_resize_multiple(arrays, n,
-					dev->tx_queue_len, GFP_KERNEL);
+	ret = ptr_ring_resize_multiple(rings, n,
+				       dev->tx_queue_len, GFP_KERNEL,
+				       __skb_array_destroy_skb);
 
-	kfree(arrays);
+	kfree(rings);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(tap_queue_resize);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7548d8a11bdf..0dc66e4fbb2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -179,7 +179,8 @@ struct tun_file {
 	struct mutex napi_mutex;	/* Protects access to the above napi */
 	struct list_head next;
 	struct tun_struct *detached;
-	struct skb_array tx_array;
+	struct ptr_ring tx_ring;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct tun_flow_entry {
@@ -195,6 +196,11 @@ struct tun_flow_entry {
 
 #define TUN_NUM_FLOW_ENTRIES 1024
 
+struct tun_prog {
+	struct rcu_head rcu;
+	struct bpf_prog *prog;
+};
+
 /* Since the socket were moved to tun_file, to preserve the behavior of persist
  * device, socket filter, sndbuf and vnet header size were restore when the
  * file were attached to a persist device.
@@ -232,8 +238,33 @@ struct tun_struct {
 	u32 rx_batched;
 	struct tun_pcpu_stats __percpu *pcpu_stats;
 	struct bpf_prog __rcu *xdp_prog;
+	struct tun_prog __rcu *steering_prog;
+	struct tun_prog __rcu *filter_prog;
 };
 
+struct veth {
+	__be16 h_vlan_proto;
+	__be16 h_vlan_TCI;
+};
+
+bool tun_is_xdp_buff(void *ptr)
+{
+	return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+EXPORT_SYMBOL(tun_is_xdp_buff);
+
+void *tun_xdp_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_xdp_to_ptr);
+
+void *tun_ptr_to_xdp(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_ptr_to_xdp);
+
 static int tun_napi_receive(struct napi_struct *napi, int budget)
 {
 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -537,15 +568,12 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
  * different rxq no. here. If we could not get rxhash, then we would
  * hope the rxq no. may help here.
  */
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
-			    void *accel_priv, select_queue_fallback_t fallback)
+static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 {
-	struct tun_struct *tun = netdev_priv(dev);
 	struct tun_flow_entry *e;
 	u32 txq = 0;
 	u32 numqueues = 0;
 
-	rcu_read_lock();
 	numqueues = READ_ONCE(tun->numqueues);
 
 	txq = __skb_get_hash_symmetric(skb);
@@ -563,10 +591,37 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 			txq -= numqueues;
 	}
 
-	rcu_read_unlock();
 	return txq;
 }
 
+static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
+{
+	struct tun_prog *prog;
+	u16 ret = 0;
+
+	prog = rcu_dereference(tun->steering_prog);
+	if (prog)
+		ret = bpf_prog_run_clear_cb(prog->prog, skb);
+
+	return ret % tun->numqueues;
+}
+
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+			    void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	u16 ret;
+
+	rcu_read_lock();
+	if (rcu_dereference(tun->steering_prog))
+		ret = tun_ebpf_select_queue(tun, skb);
+	else
+		ret = tun_automq_select_queue(tun, skb);
+	rcu_read_unlock();
+
+	return ret;
+}
+
 static inline bool tun_not_capable(struct tun_struct *tun)
 {
 	const struct cred *cred = current_cred();
@@ -600,22 +655,36 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 	return tun;
 }
 
+static void tun_ptr_free(void *ptr)
+{
+	if (!ptr)
+		return;
+	if (tun_is_xdp_buff(ptr)) {
+		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+		put_page(virt_to_head_page(xdp->data));
+	} else {
+		__skb_array_destroy_skb(ptr);
+	}
+}
+
 static void tun_queue_purge(struct tun_file *tfile)
 {
-	struct sk_buff *skb;
+	void *ptr;
 
-	while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
-		kfree_skb(skb);
+	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
+		tun_ptr_free(ptr);
 
 	skb_queue_purge(&tfile->sk.sk_write_queue);
 	skb_queue_purge(&tfile->sk.sk_error_queue);
 }
 
-static void tun_cleanup_tx_array(struct tun_file *tfile)
+static void tun_cleanup_tx_ring(struct tun_file *tfile)
 {
-	if (tfile->tx_array.ring.queue) {
-		skb_array_cleanup(&tfile->tx_array);
-		memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+	if (tfile->tx_ring.queue) {
+		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+		xdp_rxq_info_unreg(&tfile->xdp_rxq);
+		memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
 	}
 }
 
@@ -665,7 +734,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 			    tun->dev->reg_state == NETREG_REGISTERED)
 				unregister_netdevice(tun->dev);
 		}
-		tun_cleanup_tx_array(tfile);
+		tun_cleanup_tx_ring(tfile);
 		sock_put(&tfile->sk);
 	}
 }
@@ -680,7 +749,6 @@ static void tun_detach(struct tun_file *tfile, bool clean)
 static void tun_detach_all(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
-	struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
 	struct tun_file *tfile, *tmp;
 	int i, n = tun->numqueues;
 
@@ -707,19 +775,16 @@ static void tun_detach_all(struct net_device *dev)
 		/* Drop read queue */
 		tun_queue_purge(tfile);
 		sock_put(&tfile->sk);
-		tun_cleanup_tx_array(tfile);
+		tun_cleanup_tx_ring(tfile);
 	}
 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 		tun_enable_queue(tfile);
 		tun_queue_purge(tfile);
 		sock_put(&tfile->sk);
-		tun_cleanup_tx_array(tfile);
+		tun_cleanup_tx_ring(tfile);
 	}
 	BUG_ON(tun->numdisabled != 0);
 
-	if (xdp_prog)
-		bpf_prog_put(xdp_prog);
-
 	if (tun->flags & IFF_PERSIST)
 		module_put(THIS_MODULE);
 }
@@ -760,13 +825,29 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
 	}
 
 	if (!tfile->detached &&
-	    skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+	    ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
 		err = -ENOMEM;
 		goto out;
 	}
 
 	tfile->queue_index = tun->numqueues;
 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+
+	if (tfile->detached) {
+		/* Re-attach detached tfile, updating XDP queue_index */
+		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
+
+		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
+			tfile->xdp_rxq.queue_index = tfile->queue_index;
+	} else {
+		/* Setup XDP RX-queue info, for new tfile getting attached */
+		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
+				       tun->dev, tfile->queue_index);
+		if (err < 0)
+			goto out;
+		err = 0;
+	}
+
 	rcu_assign_pointer(tfile->tun, tun);
 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 	tun->numqueues++;
@@ -946,23 +1027,10 @@ static int tun_net_close(struct net_device *dev)
 }
 
 /* Net device start xmit */
-static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 {
-	struct tun_struct *tun = netdev_priv(dev);
-	int txq = skb->queue_mapping;
-	struct tun_file *tfile;
-	u32 numqueues = 0;
-
-	rcu_read_lock();
-	tfile = rcu_dereference(tun->tfiles[txq]);
-	numqueues = READ_ONCE(tun->numqueues);
-
-	/* Drop packet if interface is not attached */
-	if (txq >= numqueues)
-		goto drop;
-
 #ifdef CONFIG_RPS
-	if (numqueues == 1 && static_key_false(&rps_needed)) {
+	if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
 		/* Select queue was not called for the skbuff, so we extract the
 		 * RPS hash and save it into the flow_table here.
 		 */
@@ -978,6 +1046,37 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
 	}
 #endif
+}
+
+static unsigned int run_ebpf_filter(struct tun_struct *tun,
+				    struct sk_buff *skb,
+				    int len)
+{
+	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
+
+	if (prog)
+		len = bpf_prog_run_clear_cb(prog->prog, skb);
+
+	return len;
+}
+
+/* Net device start xmit */
+static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	int txq = skb->queue_mapping;
+	struct tun_file *tfile;
+	int len = skb->len;
+
+	rcu_read_lock();
+	tfile = rcu_dereference(tun->tfiles[txq]);
+
+	/* Drop packet if interface is not attached */
+	if (txq >= tun->numqueues)
+		goto drop;
+
+	if (!rcu_dereference(tun->steering_prog))
+		tun_automq_xmit(tun, skb);
 
 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
@@ -993,6 +1092,15 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 	    sk_filter(tfile->socket.sk, skb))
 		goto drop;
 
+	len = run_ebpf_filter(tun, skb, len);
+
+	/* Trim extra bytes since we may insert vlan proto & TCI
+	 * in tun_put_user().
+	 */
+	len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
+	if (len <= 0 || pskb_trim(skb, len))
+		goto drop;
+
 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
 		goto drop;
 
@@ -1005,7 +1113,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	nf_reset(skb);
 
-	if (skb_array_produce(&tfile->tx_array, skb))
+	if (ptr_ring_produce(&tfile->tx_ring, skb))
 		goto drop;
 
 	/* Notify and wake up reader process */
@@ -1178,6 +1286,67 @@ static const struct net_device_ops tun_netdev_ops = {
 	.ndo_get_stats64	= tun_net_get_stats64,
 };
 
+static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct xdp_buff *buff = xdp->data_hard_start;
+	int headroom = xdp->data - xdp->data_hard_start;
+	struct tun_file *tfile;
+	u32 numqueues;
+	int ret = 0;
+
+	/* Assure headroom is available and buff is properly aligned */
+	if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
+		return -ENOSPC;
+
+	*buff = *xdp;
+
+	rcu_read_lock();
+
+	numqueues = READ_ONCE(tun->numqueues);
+	if (!numqueues) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+					    numqueues]);
+	/* Encode the XDP flag into lowest bit for consumer to differ
+	 * XDP buffer from sk_buff.
+	 */
+	if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+		this_cpu_inc(tun->pcpu_stats->tx_dropped);
+		ret = -ENOSPC;
+	}
+
+out:
+	rcu_read_unlock();
+	return ret;
+}
+
+static void tun_xdp_flush(struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct tun_file *tfile;
+	u32 numqueues;
+
+	rcu_read_lock();
+
+	numqueues = READ_ONCE(tun->numqueues);
+	if (!numqueues)
+		goto out;
+
+	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+					    numqueues]);
+	/* Notify and wake up reader process */
+	if (tfile->flags & TUN_FASYNC)
+		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+
+out:
+	rcu_read_unlock();
+}
+
 static const struct net_device_ops tap_netdev_ops = {
 	.ndo_uninit		= tun_net_uninit,
 	.ndo_open		= tun_net_open,
@@ -1195,6 +1364,8 @@ static const struct net_device_ops tap_netdev_ops = {
 	.ndo_set_rx_headroom	= tun_set_headroom,
 	.ndo_get_stats64	= tun_net_get_stats64,
 	.ndo_bpf		= tun_xdp,
+	.ndo_xdp_xmit		= tun_xdp_xmit,
+	.ndo_xdp_flush		= tun_xdp_flush,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
@@ -1273,7 +1444,7 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
 
 	poll_wait(file, sk_sleep(sk), wait);
 
-	if (!skb_array_empty(&tfile->tx_array))
+	if (!ptr_ring_empty(&tfile->tx_ring))
 		mask |= POLLIN | POLLRDNORM;
 
 	if (tun->dev->flags & IFF_UP &&
@@ -1486,6 +1657,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 		xdp.data = buf + pad;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + len;
+		xdp.rxq = &tfile->xdp_rxq;
 		orig_data = xdp.data;
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -1560,7 +1732,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	int copylen;
 	bool zerocopy = false;
 	int err;
-	u32 rxhash;
+	u32 rxhash = 0;
 	int skb_xdp = 1;
 	bool frags = tun_napi_frags_enabled(tun);
 
@@ -1748,7 +1920,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 		rcu_read_unlock();
 	}
 
-	rxhash = __skb_get_hash_symmetric(skb);
+	rcu_read_lock();
+	if (!rcu_dereference(tun->steering_prog))
+		rxhash = __skb_get_hash_symmetric(skb);
+	rcu_read_unlock();
 
 	if (frags) {
 		/* Exercise flow dissector code path. */
@@ -1792,7 +1967,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	u64_stats_update_end(&stats->syncp);
 	put_cpu_ptr(stats);
 
-	tun_flow_update(tun, rxhash, tfile);
+	if (rxhash)
+		tun_flow_update(tun, rxhash, tfile);
+
 	return total_len;
 }
 
@@ -1813,6 +1990,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	return result;
 }
 
+static ssize_t tun_put_user_xdp(struct tun_struct *tun,
+				struct tun_file *tfile,
+				struct xdp_buff *xdp,
+				struct iov_iter *iter)
+{
+	int vnet_hdr_sz = 0;
+	size_t size = xdp->data_end - xdp->data;
+	struct tun_pcpu_stats *stats;
+	size_t ret;
+
+	if (tun->flags & IFF_VNET_HDR) {
+		struct virtio_net_hdr gso = { 0 };
+
+		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
+			return -EINVAL;
+		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
+			     sizeof(gso)))
+			return -EFAULT;
+		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+	}
+
+	ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+
+	stats = get_cpu_ptr(tun->pcpu_stats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->tx_packets++;
+	stats->tx_bytes += ret;
+	u64_stats_update_end(&stats->syncp);
+	put_cpu_ptr(tun->pcpu_stats);
+
+	return ret;
+}
+
 /* Put packet to the user space buffer */
 static ssize_t tun_put_user(struct tun_struct *tun,
 			    struct tun_file *tfile,
@@ -1877,10 +2088,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 
 	if (vlan_hlen) {
 		int ret;
-		struct {
-			__be16 h_vlan_proto;
-			__be16 h_vlan_TCI;
-		} veth;
+		struct veth veth;
 
 		veth.h_vlan_proto = skb->vlan_proto;
 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
@@ -1910,15 +2118,14 @@ done:
 	return total;
 }
 
-static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
-				     int *err)
+static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
 {
 	DECLARE_WAITQUEUE(wait, current);
-	struct sk_buff *skb = NULL;
+	void *ptr = NULL;
 	int error = 0;
 
-	skb = skb_array_consume(&tfile->tx_array);
-	if (skb)
+	ptr = ptr_ring_consume(&tfile->tx_ring);
+	if (ptr)
 		goto out;
 	if (noblock) {
 		error = -EAGAIN;
@@ -1929,8 +2136,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
 	current->state = TASK_INTERRUPTIBLE;
 
 	while (1) {
-		skb = skb_array_consume(&tfile->tx_array);
-		if (skb)
+		ptr = ptr_ring_consume(&tfile->tx_ring);
+		if (ptr)
 			break;
 		if (signal_pending(current)) {
 			error = -ERESTARTSYS;
@@ -1949,12 +2156,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
 
 out:
 	*err = error;
-	return skb;
+	return ptr;
 }
 
 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 			   struct iov_iter *to,
-			   int noblock, struct sk_buff *skb)
+			   int noblock, void *ptr)
 {
 	ssize_t ret;
 	int err;
@@ -1962,23 +2169,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
 	if (!iov_iter_count(to)) {
-		if (skb)
-			kfree_skb(skb);
+		tun_ptr_free(ptr);
 		return 0;
 	}
 
-	if (!skb) {
+	if (!ptr) {
 		/* Read frames from ring */
-		skb = tun_ring_recv(tfile, noblock, &err);
-		if (!skb)
+		ptr = tun_ring_recv(tfile, noblock, &err);
+		if (!ptr)
 			return err;
 	}
 
-	ret = tun_put_user(tun, tfile, skb, to);
-	if (unlikely(ret < 0))
-		kfree_skb(skb);
-	else
-		consume_skb(skb);
+	if (tun_is_xdp_buff(ptr)) {
+		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+		ret = tun_put_user_xdp(tun, tfile, xdp, to);
+		put_page(virt_to_head_page(xdp->data));
+	} else {
+		struct sk_buff *skb = ptr;
+
+		ret = tun_put_user(tun, tfile, skb, to);
+		if (unlikely(ret < 0))
+			kfree_skb(skb);
+		else
+			consume_skb(skb);
+	}
 
 	return ret;
 }
@@ -2000,6 +2215,39 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
 	return ret;
 }
 
+static void tun_prog_free(struct rcu_head *rcu)
+{
+	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
+
+	bpf_prog_destroy(prog->prog);
+	kfree(prog);
+}
+
+static int __tun_set_ebpf(struct tun_struct *tun,
+			  struct tun_prog __rcu **prog_p,
+			  struct bpf_prog *prog)
+{
+	struct tun_prog *old, *new = NULL;
+
+	if (prog) {
+		new = kmalloc(sizeof(*new), GFP_KERNEL);
+		if (!new)
+			return -ENOMEM;
+		new->prog = prog;
+	}
+
+	spin_lock_bh(&tun->lock);
+	old = rcu_dereference_protected(*prog_p,
+					lockdep_is_held(&tun->lock));
+	rcu_assign_pointer(*prog_p, new);
+	spin_unlock_bh(&tun->lock);
+
+	if (old)
+		call_rcu(&old->rcu, tun_prog_free);
+
+	return 0;
+}
+
 static void tun_free_netdev(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
@@ -2008,6 +2256,8 @@ static void tun_free_netdev(struct net_device *dev)
 	free_percpu(tun->pcpu_stats);
 	tun_flow_uninit(tun);
 	security_tun_dev_free_security(tun->security);
+	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
+	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
 }
 
 static void tun_setup(struct net_device *dev)
@@ -2081,12 +2331,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 {
 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
 	struct tun_struct *tun = tun_get(tfile);
-	struct sk_buff *skb = m->msg_control;
+	void *ptr = m->msg_control;
 	int ret;
 
 	if (!tun) {
 		ret = -EBADFD;
-		goto out_free_skb;
+		goto out_free;
 	}
 
 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
@@ -2098,7 +2348,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 					 SOL_PACKET, TUN_TX_TIMESTAMP);
 		goto out;
 	}
-	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
+	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
 	if (ret > (ssize_t)total_len) {
 		m->msg_flags |= MSG_TRUNC;
 		ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2109,12 +2359,25 @@ out:
 
 out_put_tun:
 	tun_put(tun);
-out_free_skb:
-	if (skb)
-		kfree_skb(skb);
+out_free:
+	tun_ptr_free(ptr);
 	return ret;
 }
 
+static int tun_ptr_peek_len(void *ptr)
+{
+	if (likely(ptr)) {
+		if (tun_is_xdp_buff(ptr)) {
+			struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+			return xdp->data_end - xdp->data;
+		}
+		return __skb_array_len_with_tag(ptr);
+	} else {
+		return 0;
+	}
+}
+
 static int tun_peek_len(struct socket *sock)
 {
 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2125,7 +2388,7 @@ static int tun_peek_len(struct socket *sock)
 	if (!tun)
 		return 0;
 
-	ret = skb_array_peek_len(&tfile->tx_array);
+	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
 	tun_put(tun);
 
 	return ret;
@@ -2296,6 +2559,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 		tun->filter_attached = false;
 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 		tun->rx_batched = 0;
+		RCU_INIT_POINTER(tun->steering_prog, NULL);
 
 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
 		if (!tun->pcpu_stats) {
@@ -2488,6 +2752,26 @@ unlock:
 	return ret;
 }
 
+static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
+			void __user *data)
+{
+	struct bpf_prog *prog;
+	int fd;
+
+	if (copy_from_user(&fd, data, sizeof(fd)))
+		return -EFAULT;
+
+	if (fd == -1) {
+		prog = NULL;
+	} else {
+		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+	}
+
+	return __tun_set_ebpf(tun, prog_p, prog);
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 			    unsigned long arg, int ifreq_len)
 {
@@ -2764,6 +3048,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 		ret = 0;
 		break;
 
+	case TUNSETSTEERINGEBPF:
+		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
+		break;
+
+	case TUNSETFILTEREBPF:
+		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
+		break;
+
 	default:
 		ret = -EINVAL;
 		break;
@@ -2860,7 +3152,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 
 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
 
-	memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+	memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
 
 	return 0;
 }
@@ -3009,25 +3301,26 @@ static int tun_queue_resize(struct tun_struct *tun)
 {
 	struct net_device *dev = tun->dev;
 	struct tun_file *tfile;
-	struct skb_array **arrays;
+	struct ptr_ring **rings;
 	int n = tun->numqueues + tun->numdisabled;
 	int ret, i;
 
-	arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
-	if (!arrays)
+	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+	if (!rings)
 		return -ENOMEM;
 
 	for (i = 0; i < tun->numqueues; i++) {
 		tfile = rtnl_dereference(tun->tfiles[i]);
-		arrays[i] = &tfile->tx_array;
+		rings[i] = &tfile->tx_ring;
 	}
 	list_for_each_entry(tfile, &tun->disabled, next)
-		arrays[i++] = &tfile->tx_array;
+		rings[i++] = &tfile->tx_ring;
 
-	ret = skb_array_resize_multiple(arrays, n,
-					dev->tx_queue_len, GFP_KERNEL);
+	ret = ptr_ring_resize_multiple(rings, n,
+				       dev->tx_queue_len, GFP_KERNEL,
+				       tun_ptr_free);
 
-	kfree(arrays);
+	kfree(rings);
 	return ret;
 }
 
@@ -3113,7 +3406,7 @@ struct socket *tun_get_socket(struct file *file)
 }
 EXPORT_SYMBOL_GPL(tun_get_socket);
 
-struct skb_array *tun_get_skb_array(struct file *file)
+struct ptr_ring *tun_get_tx_ring(struct file *file)
 {
 	struct tun_file *tfile;
 
@@ -3122,9 +3415,9 @@ struct skb_array *tun_get_skb_array(struct file *file)
 	tfile = file->private_data;
 	if (!tfile)
 		return ERR_PTR(-EBADFD);
-	return &tfile->tx_array;
+	return &tfile->tx_ring;
 }
-EXPORT_SYMBOL_GPL(tun_get_skb_array);
+EXPORT_SYMBOL_GPL(tun_get_tx_ring);
 
 module_init(tun_init);
 module_exit(tun_cleanup);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 728819feab44..76ac48095c29 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -826,7 +826,7 @@ err:
 
 static const struct driver_info	qmi_wwan_info = {
 	.description	= "WWAN/QMI device",
-	.flags		= FLAG_WWAN,
+	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
 	.bind		= qmi_wwan_bind,
 	.unbind		= qmi_wwan_unbind,
 	.manage_power	= qmi_wwan_manage_power,
@@ -835,7 +835,7 @@ static const struct driver_info	qmi_wwan_info = {
 
 static const struct driver_info	qmi_wwan_info_quirk_dtr = {
 	.description	= "WWAN/QMI device",
-	.flags		= FLAG_WWAN,
+	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
 	.bind		= qmi_wwan_bind,
 	.unbind		= qmi_wwan_unbind,
 	.manage_power	= qmi_wwan_manage_power,
@@ -1245,6 +1245,7 @@ static const struct usb_device_id products[] = {
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
 	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
+	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)},	/* Quectel EP06 Mini PCIe */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f5438d0978ca..a69ad39ee57e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 	if (ifmp && (dev->ifindex != 0))
 		peer->ifindex = ifmp->ifi_index;
 
+	peer->gso_max_size = dev->gso_max_size;
+	peer->gso_max_segs = dev->gso_max_segs;
+
 	err = register_netdevice(peer);
 	put_net(net);
 	net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 559b215c0169..626c27352ae2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <net/route.h>
+#include <net/xdp.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -65,16 +66,39 @@ static const unsigned long guest_offloads[] = {
 	VIRTIO_NET_F_GUEST_UFO
 };
 
-struct virtnet_stats {
-	struct u64_stats_sync tx_syncp;
-	struct u64_stats_sync rx_syncp;
-	u64 tx_bytes;
-	u64 tx_packets;
+struct virtnet_stat_desc {
+	char desc[ETH_GSTRING_LEN];
+	size_t offset;
+};
+
+struct virtnet_sq_stats {
+	struct u64_stats_sync syncp;
+	u64 packets;
+	u64 bytes;
+};
+
+struct virtnet_rq_stats {
+	struct u64_stats_sync syncp;
+	u64 packets;
+	u64 bytes;
+};
 
-	u64 rx_bytes;
-	u64 rx_packets;
+#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
+#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
+
+static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
+	{ "packets",	VIRTNET_SQ_STAT(packets) },
+	{ "bytes",	VIRTNET_SQ_STAT(bytes) },
+};
+
+static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
+	{ "packets",	VIRTNET_RQ_STAT(packets) },
+	{ "bytes",	VIRTNET_RQ_STAT(bytes) },
 };
 
+#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
+#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
+
 /* Internal representation of a send virtqueue */
 struct send_queue {
 	/* Virtqueue associated with this send _queue */
@@ -86,6 +110,8 @@ struct send_queue {
 	/* Name of the send queue: output.$index */
 	char name[40];
 
+	struct virtnet_sq_stats stats;
+
 	struct napi_struct napi;
 };
 
@@ -98,6 +124,8 @@ struct receive_queue {
 
 	struct bpf_prog __rcu *xdp_prog;
 
+	struct virtnet_rq_stats stats;
+
 	/* Chain pages by the private ptr. */
 	struct page *pages;
 
@@ -115,6 +143,8 @@ struct receive_queue {
 
 	/* Name of this receive queue: input.$index */
 	char name[40];
+
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct virtnet_info {
@@ -149,9 +179,6 @@ struct virtnet_info {
 	/* Packet virtio header size */
 	u8 hdr_len;
 
-	/* Active statistics */
-	struct virtnet_stats __percpu *stats;
-
 	/* Work struct for refilling if we run low on memory. */
 	struct delayed_work refill;
 
@@ -261,9 +288,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
 	int opaque;
 
 	opaque = virtqueue_enable_cb_prepare(vq);
-	if (napi_complete_done(napi, processed) &&
-	    unlikely(virtqueue_poll(vq, opaque)))
-		virtqueue_napi_schedule(napi, vq);
+	if (napi_complete_done(napi, processed)) {
+		if (unlikely(virtqueue_poll(vq, opaque)))
+			virtqueue_napi_schedule(napi, vq);
+	} else {
+		virtqueue_disable_cb(vq);
+	}
 }
 
 static void skb_xmit_done(struct virtqueue *vq)
@@ -556,6 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
 		xdp.data = xdp.data_hard_start + xdp_headroom;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + len;
+		xdp.rxq = &rq->xdp_rxq;
 		orig_data = xdp.data;
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -689,6 +720,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 		xdp.data = data + vi->hdr_len;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + (len - vi->hdr_len);
+		xdp.rxq = &rq->xdp_rxq;
+
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 		if (act != XDP_PASS)
@@ -1118,7 +1151,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	unsigned int len, received = 0, bytes = 0;
 	void *buf;
-	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
 
 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
 		void *ctx;
@@ -1141,10 +1173,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
 			schedule_delayed_work(&vi->refill, 0);
 	}
 
-	u64_stats_update_begin(&stats->rx_syncp);
-	stats->rx_bytes += bytes;
-	stats->rx_packets += received;
-	u64_stats_update_end(&stats->rx_syncp);
+	u64_stats_update_begin(&rq->stats.syncp);
+	rq->stats.bytes += bytes;
+	rq->stats.packets += received;
+	u64_stats_update_end(&rq->stats.syncp);
 
 	return received;
 }
@@ -1153,8 +1185,6 @@ static void free_old_xmit_skbs(struct send_queue *sq)
 {
 	struct sk_buff *skb;
 	unsigned int len;
-	struct virtnet_info *vi = sq->vq->vdev->priv;
-	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
 	unsigned int packets = 0;
 	unsigned int bytes = 0;
 
@@ -1173,10 +1203,10 @@ static void free_old_xmit_skbs(struct send_queue *sq)
 	if (!packets)
 		return;
 
-	u64_stats_update_begin(&stats->tx_syncp);
-	stats->tx_bytes += bytes;
-	stats->tx_packets += packets;
-	u64_stats_update_end(&stats->tx_syncp);
+	u64_stats_update_begin(&sq->stats.syncp);
+	sq->stats.bytes += bytes;
+	sq->stats.packets += packets;
+	u64_stats_update_end(&sq->stats.syncp);
 }
 
 static void virtnet_poll_cleantx(struct receive_queue *rq)
@@ -1222,13 +1252,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 static int virtnet_open(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	int i;
+	int i, err;
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (i < vi->curr_queue_pairs)
 			/* Make sure we have some buffers: if oom use wq. */
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
 				schedule_delayed_work(&vi->refill, 0);
+
+		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+		if (err < 0)
+			return err;
+
 		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
 		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
 	}
@@ -1460,24 +1495,25 @@ static void virtnet_stats(struct net_device *dev,
 			  struct rtnl_link_stats64 *tot)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	int cpu;
 	unsigned int start;
+	int i;
 
-	for_each_possible_cpu(cpu) {
-		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
+	for (i = 0; i < vi->max_queue_pairs; i++) {
 		u64 tpackets, tbytes, rpackets, rbytes;
+		struct receive_queue *rq = &vi->rq[i];
+		struct send_queue *sq = &vi->sq[i];
 
 		do {
-			start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
-			tpackets = stats->tx_packets;
-			tbytes   = stats->tx_bytes;
-		} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
+			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+			tpackets = sq->stats.packets;
+			tbytes   = sq->stats.bytes;
+		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
 
 		do {
-			start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
-			rpackets = stats->rx_packets;
-			rbytes   = stats->rx_bytes;
-		} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
+			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+			rpackets = rq->stats.packets;
+			rbytes   = rq->stats.bytes;
+		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
 
 		tot->rx_packets += rpackets;
 		tot->tx_packets += tpackets;
@@ -1557,6 +1593,7 @@ static int virtnet_close(struct net_device *dev)
 	cancel_delayed_work_sync(&vi->refill);
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
+		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
 		napi_disable(&vi->rq[i].napi);
 		virtnet_napi_tx_disable(&vi->sq[i].napi);
 	}
@@ -1814,6 +1851,83 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
+static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+	char *p = (char *)data;
+	unsigned int i, j;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < vi->curr_queue_pairs; i++) {
+			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+				snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+					 i, virtnet_rq_stats_desc[j].desc);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+
+		for (i = 0; i < vi->curr_queue_pairs; i++) {
+			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+				snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
+					 i, virtnet_sq_stats_desc[j].desc);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		break;
+	}
+}
+
+static int virtnet_get_sset_count(struct net_device *dev, int sset)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
+					       VIRTNET_SQ_STATS_LEN);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void virtnet_get_ethtool_stats(struct net_device *dev,
+				      struct ethtool_stats *stats, u64 *data)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+	unsigned int idx = 0, start, i, j;
+	const u8 *stats_base;
+	size_t offset;
+
+	for (i = 0; i < vi->curr_queue_pairs; i++) {
+		struct receive_queue *rq = &vi->rq[i];
+
+		stats_base = (u8 *)&rq->stats;
+		do {
+			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+				offset = virtnet_rq_stats_desc[j].offset;
+				data[idx + j] = *(u64 *)(stats_base + offset);
+			}
+		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
+		idx += VIRTNET_RQ_STATS_LEN;
+	}
+
+	for (i = 0; i < vi->curr_queue_pairs; i++) {
+		struct send_queue *sq = &vi->sq[i];
+
+		stats_base = (u8 *)&sq->stats;
+		do {
+			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+				offset = virtnet_sq_stats_desc[j].offset;
+				data[idx + j] = *(u64 *)(stats_base + offset);
+			}
+		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
+		idx += VIRTNET_SQ_STATS_LEN;
+	}
+}
+
 static void virtnet_get_channels(struct net_device *dev,
 				 struct ethtool_channels *channels)
 {
@@ -1891,10 +2005,31 @@ static void virtnet_init_settings(struct net_device *dev)
 	vi->duplex = DUPLEX_UNKNOWN;
 }
 
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+	u32 speed;
+	u8 duplex;
+
+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+		return;
+
+	speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
+						  speed));
+	if (ethtool_validate_speed(speed))
+		vi->speed = speed;
+	duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
+						  duplex));
+	if (ethtool_validate_duplex(duplex))
+		vi->duplex = duplex;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
 	.get_drvinfo = virtnet_get_drvinfo,
 	.get_link = ethtool_op_get_link,
 	.get_ringparam = virtnet_get_ringparam,
+	.get_strings = virtnet_get_strings,
+	.get_sset_count = virtnet_get_sset_count,
+	.get_ethtool_stats = virtnet_get_ethtool_stats,
 	.set_channels = virtnet_set_channels,
 	.get_channels = virtnet_get_channels,
 	.get_ts_info = ethtool_op_get_ts_info,
@@ -2144,6 +2279,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
 	vi->status = v;
 
 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
+		virtnet_update_settings(vi);
 		netif_carrier_on(vi->dev);
 		netif_tx_wake_all_queues(vi->dev);
 	} else {
@@ -2386,6 +2522,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
+
+		u64_stats_init(&vi->rq[i].stats.syncp);
+		u64_stats_init(&vi->sq[i].stats.syncp);
 	}
 
 	return 0;
@@ -2510,7 +2649,7 @@ static int virtnet_validate(struct virtio_device *vdev)
 
 static int virtnet_probe(struct virtio_device *vdev)
 {
-	int i, err;
+	int i, err = -ENOMEM;
 	struct net_device *dev;
 	struct virtnet_info *vi;
 	u16 max_queue_pairs;
@@ -2587,17 +2726,6 @@ static int virtnet_probe(struct virtio_device *vdev)
 	vi->dev = dev;
 	vi->vdev = vdev;
 	vdev->priv = vi;
-	vi->stats = alloc_percpu(struct virtnet_stats);
-	err = -ENOMEM;
-	if (vi->stats == NULL)
-		goto free;
-
-	for_each_possible_cpu(i) {
-		struct virtnet_stats *virtnet_stats;
-		virtnet_stats = per_cpu_ptr(vi->stats, i);
-		u64_stats_init(&virtnet_stats->tx_syncp);
-		u64_stats_init(&virtnet_stats->rx_syncp);
-	}
 
 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
 
@@ -2634,7 +2762,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 			 */
 			dev_err(&vdev->dev, "device MTU appears to have changed "
 				"it is now %d < %d", mtu, dev->min_mtu);
-			goto free_stats;
+			goto free;
 		}
 
 		dev->mtu = mtu;
@@ -2658,7 +2786,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
 	err = init_vqs(vi);
 	if (err)
-		goto free_stats;
+		goto free;
 
 #ifdef CONFIG_SYSFS
 	if (vi->mergeable_rx_bufs)
@@ -2692,6 +2820,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 		schedule_work(&vi->config_work);
 	} else {
 		vi->status = VIRTIO_NET_S_LINK_UP;
+		virtnet_update_settings(vi);
 		netif_carrier_on(dev);
 	}
 
@@ -2712,8 +2841,6 @@ free_vqs:
 	cancel_delayed_work_sync(&vi->refill);
 	free_receive_page_frags(vi);
 	virtnet_del_vqs(vi);
-free_stats:
-	free_percpu(vi->stats);
 free:
 	free_netdev(dev);
 	return err;
@@ -2746,7 +2873,6 @@ static void virtnet_remove(struct virtio_device *vdev)
 
 	remove_vq_common(vi);
 
-	free_percpu(vi->stats);
 	free_netdev(vi->dev);
 }
 
@@ -2793,7 +2919,8 @@ static struct virtio_device_id id_table[] = {
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
-	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
+	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
+	VIRTIO_NET_F_SPEED_DUPLEX
 
 static unsigned int features[] = {
 	VIRTNET_FEATURES,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 9c51b8be0038..5ba222920e80 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.a.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.11.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040a00
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040b00
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
@@ -416,8 +416,8 @@ struct vmxnet3_adapter {
 
 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
 #define VMXNET3_DEF_TX_RING_SIZE    512
-#define VMXNET3_DEF_RX_RING_SIZE    256
-#define VMXNET3_DEF_RX_RING2_SIZE   128
+#define VMXNET3_DEF_RX_RING_SIZE    1024
+#define VMXNET3_DEF_RX_RING2_SIZE   256
 
 #define VMXNET3_DEF_RXDATA_DESC_SIZE 128
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c3e34e3c82a7..fab7a4db249e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3709,18 +3709,16 @@ static __net_init int vxlan_init_net(struct net *net)
 	return 0;
 }
 
-static void __net_exit vxlan_exit_net(struct net *net)
+static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 	struct vxlan_dev *vxlan, *next;
 	struct net_device *dev, *aux;
 	unsigned int h;
-	LIST_HEAD(list);
 
-	rtnl_lock();
 	for_each_netdev_safe(net, dev, aux)
 		if (dev->rtnl_link_ops == &vxlan_link_ops)
-			unregister_netdevice_queue(dev, &list);
+			unregister_netdevice_queue(dev, head);
 
 	list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
 		/* If vxlan->dev is in the same netns, it has already been added
@@ -3728,20 +3726,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
 		 */
 		if (!net_eq(dev_net(vxlan->dev), net)) {
 			gro_cells_destroy(&vxlan->gro_cells);
-			unregister_netdevice_queue(vxlan->dev, &list);
+			unregister_netdevice_queue(vxlan->dev, head);
 		}
 	}
 
-	unregister_netdevice_many(&list);
-	rtnl_unlock();
-
 	for (h = 0; h < PORT_HASH_SIZE; ++h)
 		WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
 }
 
+static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+{
+	struct net *net;
+	LIST_HEAD(list);
+
+	rtnl_lock();
+	list_for_each_entry(net, net_list, exit_list)
+		vxlan_destroy_tunnels(net, &list);
+
+	unregister_netdevice_many(&list);
+	rtnl_unlock();
+}
+
 static struct pernet_operations vxlan_net_ops = {
 	.init = vxlan_init_net,
-	.exit = vxlan_exit_net,
+	.exit_batch = vxlan_exit_batch_net,
 	.id   = &vxlan_net_id,
 	.size = sizeof(struct vxlan_net),
 };
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 87f56d0e17a6..deb5ae21a559 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -47,12 +47,19 @@ config ATH10K_DEBUG
 config ATH10K_DEBUGFS
 	bool "Atheros ath10k debugfs support"
 	depends on ATH10K && DEBUG_FS
-	select RELAY
 	---help---
 	  Enabled debugfs support
 
 	  If unsure, say Y to make it easier to debug problems.
 
+config ATH10K_SPECTRAL
+	bool "Atheros ath10k spectral scan support"
+	depends on ATH10K_DEBUGFS
+	select RELAY
+	default n
+	---help---
+	  Say Y to enable access to the FFT/spectral data via debugfs.
+
 config ATH10K_TRACING
 	bool "Atheros ath10k tracing support"
 	depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9492177e9063..6739ac26fd29 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -15,12 +15,13 @@ ath10k_core-y += mac.o \
 		 p2p.o \
 		 swap.o
 
-ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
 ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
 ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
 
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ff6815e95684..35d10490f6c3 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
  * Copyright (c) 2015 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 2d3a2f31123d..af4978d6a14b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 9c0839b2ca8f..9a396817aa55 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a8afd690290f..b9def7bace2f 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  * Guts of ath10k_ce_send.
  * The caller takes responsibility for any needed locking.
  */
-int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
-			  void *per_transfer_context,
-			  u32 buffer,
-			  unsigned int nbytes,
-			  unsigned int transfer_id,
-			  unsigned int flags)
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+				  void *per_transfer_context,
+				  dma_addr_t buffer,
+				  unsigned int nbytes,
+				  unsigned int transfer_id,
+				  unsigned int flags)
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -384,6 +384,87 @@ exit:
 	return ret;
 }
 
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+				     void *per_transfer_context,
+				     dma_addr_t buffer,
+				     unsigned int nbytes,
+				     unsigned int transfer_id,
+				     unsigned int flags)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+	struct ce_desc_64 *desc, sdesc;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index = src_ring->sw_index;
+	unsigned int write_index = src_ring->write_index;
+	u32 ctrl_addr = ce_state->ctrl_addr;
+	__le32 *addr;
+	u32 desc_flags = 0;
+	int ret = 0;
+
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+		return -ESHUTDOWN;
+
+	if (nbytes > ce_state->src_sz_max)
+		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+			    __func__, nbytes, ce_state->src_sz_max);
+
+	if (unlikely(CE_RING_DELTA(nentries_mask,
+				   write_index, sw_index - 1) <= 0)) {
+		ret = -ENOSR;
+		goto exit;
+	}
+
+	desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+				      write_index);
+
+	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+	if (flags & CE_SEND_FLAG_GATHER)
+		desc_flags |= CE_DESC_FLAGS_GATHER;
+
+	if (flags & CE_SEND_FLAG_BYTE_SWAP)
+		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+	addr = (__le32 *)&sdesc.addr;
+
+	flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+	addr[0] = __cpu_to_le32(buffer);
+	addr[1] = __cpu_to_le32(flags);
+	if (flags & CE_SEND_FLAG_GATHER)
+		addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+	else
+		addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+	sdesc.nbytes = __cpu_to_le16(nbytes);
+	sdesc.flags  = __cpu_to_le16(desc_flags);
+
+	*desc = sdesc;
+
+	src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+	/* Update Source Ring Write Index */
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+	if (!(flags & CE_SEND_FLAG_GATHER))
+		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+	src_ring->write_index = write_index;
+exit:
+	return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+			  void *per_transfer_context,
+			  dma_addr_t buffer,
+			  unsigned int nbytes,
+			  unsigned int transfer_id,
+			  unsigned int flags)
+{
+	return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+				    buffer, nbytes, transfer_id, flags);
+}
+
 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 {
 	struct ath10k *ar = pipe->ar;
@@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_context,
-		   u32 buffer,
+		   dma_addr_t buffer,
 		   unsigned int nbytes,
 		   unsigned int transfer_id,
 		   unsigned int flags)
@@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
 }
 
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+				   dma_addr_t paddr)
 {
 	struct ath10k *ar = pipe->ar;
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
@@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 	return 0;
 }
 
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+				      void *ctx,
+				      dma_addr_t paddr)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_ce *ce = ath10k_ce_priv(ar);
+	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index = dest_ring->write_index;
+	unsigned int sw_index = dest_ring->sw_index;
+	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+	struct ce_desc_64 *desc =
+			CE_DEST_RING_TO_DESC_64(base, write_index);
+	u32 ctrl_addr = pipe->ctrl_addr;
+
+	lockdep_assert_held(&ce->ce_lock);
+
+	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+		return -ENOSPC;
+
+	desc->addr = __cpu_to_le64(paddr);
+	desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+
+	desc->nbytes = 0;
+
+	dest_ring->per_transfer_context[write_index] = ctx;
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+	dest_ring->write_index = write_index;
+
+	return 0;
+}
+
 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 {
 	struct ath10k *ar = pipe->ar;
@@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 	dest_ring->write_index = write_index;
 }
 
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			  dma_addr_t paddr)
 {
 	struct ath10k *ar = pipe->ar;
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 	int ret;
 
 	spin_lock_bh(&ce->ce_lock);
-	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+	ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
 	spin_unlock_bh(&ce->ce_lock);
 
 	return ret;
@@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
  * Guts of ath10k_ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
-					 void **per_transfer_contextp,
-					 unsigned int *nbytesp)
+static int
+	 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+					       void **per_transfer_contextp,
+					       unsigned int *nbytesp)
 {
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 	return 0;
 }
 
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_contextp,
+					 unsigned int *nbytesp)
+{
+	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int sw_index = dest_ring->sw_index;
+	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+	struct ce_desc_64 *desc =
+		CE_DEST_RING_TO_DESC_64(base, sw_index);
+	struct ce_desc_64 sdesc;
+	u16 nbytes;
+
+	/* Copy in one go for performance reasons */
+	sdesc = *desc;
+
+	nbytes = __le16_to_cpu(sdesc.nbytes);
+	if (nbytes == 0) {
+		/* This closes a relatively unusual race where the Host
+		 * sees the updated DRRI before the update to the
+		 * corresponding descriptor has completed. We treat this
+		 * as a descriptor that is not yet done.
+		 */
+		return -EIO;
+	}
+
+	desc->nbytes = 0;
+
+	/* Return data from completed destination descriptor */
+	*nbytesp = nbytes;
+
+	if (per_transfer_contextp)
+		*per_transfer_contextp =
+			dest_ring->per_transfer_context[sw_index];
+
+	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+	 * So update transfer context all CEs except CE5.
+	 */
+	if (ce_state->id != 5)
+		dest_ring->per_transfer_context[sw_index] = NULL;
+
+	/* Update sw_index */
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	dest_ring->sw_index = sw_index;
+
+	return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_ctx,
+					 unsigned int *nbytesp)
+{
+	return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+							    per_transfer_ctx,
+							    nbytesp);
+}
+
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
 				  unsigned int *nbytesp)
@@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 	int ret;
 
 	spin_lock_bh(&ce->ce_lock);
-	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+	ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
 						   per_transfer_contextp,
 						   nbytesp);
+
 	spin_unlock_bh(&ce->ce_lock);
 
 	return ret;
 }
 
-int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
-			       void **per_transfer_contextp,
-			       u32 *bufferp)
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+				       void **per_transfer_contextp,
+				       dma_addr_t *bufferp)
 {
 	struct ath10k_ce_ring *dest_ring;
 	unsigned int nentries_mask;
@@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 	return ret;
 }
 
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+					  void **per_transfer_contextp,
+					  dma_addr_t *bufferp)
+{
+	struct ath10k_ce_ring *dest_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	int ret;
+	struct ath10k *ar;
+	struct ath10k_ce *ce;
+
+	dest_ring = ce_state->dest_ring;
+
+	if (!dest_ring)
+		return -EIO;
+
+	ar = ce_state->ar;
+	ce = ath10k_ce_priv(ar);
+
+	spin_lock_bh(&ce->ce_lock);
+
+	nentries_mask = dest_ring->nentries_mask;
+	sw_index = dest_ring->sw_index;
+	write_index = dest_ring->write_index;
+	if (write_index != sw_index) {
+		struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+		struct ce_desc_64 *desc =
+			CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+		/* Return data from completed destination descriptor */
+		*bufferp = __le64_to_cpu(desc->addr);
+
+		if (per_transfer_contextp)
+			*per_transfer_contextp =
+				dest_ring->per_transfer_context[sw_index];
+
+		/* sanity */
+		dest_ring->per_transfer_context[sw_index] = NULL;
+		desc->nbytes = 0;
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		dest_ring->sw_index = sw_index;
+		ret = 0;
+	} else {
+		ret = -EIO;
+	}
+
+	spin_unlock_bh(&ce->ce_lock);
+
+	return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+			       void **per_transfer_contextp,
+			       dma_addr_t *bufferp)
+{
+	return ce_state->ops->ce_revoke_recv_next(ce_state,
+						  per_transfer_contextp,
+						  bufferp);
+}
+
 /*
  * Guts of ath10k_ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
@@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
 	return 0;
 }
 
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+					struct ath10k_ce_ring *src_ring,
+					u32 sw_index,
+					dma_addr_t *bufferp,
+					u32 *nbytesp,
+					u32 *transfer_idp)
+{
+		struct ce_desc *base = src_ring->base_addr_owner_space;
+		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+		/* Return data from completed source descriptor */
+		*bufferp = __le32_to_cpu(desc->addr);
+		*nbytesp = __le16_to_cpu(desc->nbytes);
+		*transfer_idp = MS(__le16_to_cpu(desc->flags),
+				   CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+					   struct ath10k_ce_ring *src_ring,
+					   u32 sw_index,
+					   dma_addr_t *bufferp,
+					   u32 *nbytesp,
+					   u32 *transfer_idp)
+{
+		struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+		struct ce_desc_64 *desc =
+			CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+		/* Return data from completed source descriptor */
+		*bufferp = __le64_to_cpu(desc->addr);
+		*nbytesp = __le16_to_cpu(desc->nbytes);
+		*transfer_idp = MS(__le16_to_cpu(desc->flags),
+				   CE_DESC_FLAGS_META_DATA);
+}
+
 /* NB: Modeled after ath10k_ce_completed_send_next */
 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
-			       u32 *bufferp,
+			       dma_addr_t *bufferp,
 			       unsigned int *nbytesp,
 			       unsigned int *transfer_idp)
 {
@@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 	write_index = src_ring->write_index;
 
 	if (write_index != sw_index) {
-		struct ce_desc *base = src_ring->base_addr_owner_space;
-		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
-
-		/* Return data from completed source descriptor */
-		*bufferp = __le32_to_cpu(desc->addr);
-		*nbytesp = __le16_to_cpu(desc->nbytes);
-		*transfer_idp = MS(__le16_to_cpu(desc->flags),
-						CE_DESC_FLAGS_META_DATA);
+		ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+						    bufferp, nbytesp,
+						    transfer_idp);
 
 		if (per_transfer_contextp)
 			*per_transfer_contextp =
@@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
 
 	nentries = roundup_pow_of_two(attr->src_nentries);
 
-	memset(src_ring->base_addr_owner_space, 0,
-	       nentries * sizeof(struct ce_desc));
+	if (ar->hw_params.target_64bit)
+		memset(src_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc_64));
+	else
+		memset(src_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc));
 
 	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
 	src_ring->sw_index &= src_ring->nentries_mask;
@@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
 
 	nentries = roundup_pow_of_two(attr->dest_nentries);
 
-	memset(dest_ring->base_addr_owner_space, 0,
-	       nentries * sizeof(struct ce_desc));
+	if (ar->hw_params.target_64bit)
+		memset(dest_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc_64));
+	else
+		memset(dest_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc));
 
 	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
 	dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
 
 	src_ring->base_addr_ce_space_unaligned = base_addr;
 
-	src_ring->base_addr_owner_space = PTR_ALIGN(
-			src_ring->base_addr_owner_space_unaligned,
-			CE_DESC_RING_ALIGN);
-	src_ring->base_addr_ce_space = ALIGN(
-			src_ring->base_addr_ce_space_unaligned,
-			CE_DESC_RING_ALIGN);
+	src_ring->base_addr_owner_space =
+			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	src_ring->base_addr_ce_space =
+			ALIGN(src_ring->base_addr_ce_space_unaligned,
+			      CE_DESC_RING_ALIGN);
+
+	return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+			    const struct ce_attr *attr)
+{
+	struct ath10k_ce_ring *src_ring;
+	u32 nentries = attr->src_nentries;
+	dma_addr_t base_addr;
+
+	nentries = roundup_pow_of_two(nentries);
+
+	src_ring = kzalloc(sizeof(*src_ring) +
+			   (nentries *
+			    sizeof(*src_ring->per_transfer_context)),
+			   GFP_KERNEL);
+	if (!src_ring)
+		return ERR_PTR(-ENOMEM);
+
+	src_ring->nentries = nentries;
+	src_ring->nentries_mask = nentries - 1;
+
+	/* Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	src_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ar->dev,
+				   (nentries * sizeof(struct ce_desc_64) +
+				    CE_DESC_RING_ALIGN),
+				   &base_addr, GFP_KERNEL);
+	if (!src_ring->base_addr_owner_space_unaligned) {
+		kfree(src_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	src_ring->base_addr_ce_space_unaligned = base_addr;
+
+	src_ring->base_addr_owner_space =
+			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	src_ring->base_addr_ce_space =
+			ALIGN(src_ring->base_addr_ce_space_unaligned,
+			      CE_DESC_RING_ALIGN);
 
 	return src_ring;
 }
@@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
 
 	dest_ring->base_addr_ce_space_unaligned = base_addr;
 
-	dest_ring->base_addr_owner_space = PTR_ALIGN(
-			dest_ring->base_addr_owner_space_unaligned,
-			CE_DESC_RING_ALIGN);
-	dest_ring->base_addr_ce_space = ALIGN(
-			dest_ring->base_addr_ce_space_unaligned,
-			CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_owner_space =
+			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_ce_space =
+				ALIGN(dest_ring->base_addr_ce_space_unaligned,
+				      CE_DESC_RING_ALIGN);
+
+	return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+			     const struct ce_attr *attr)
+{
+	struct ath10k_ce_ring *dest_ring;
+	u32 nentries;
+	dma_addr_t base_addr;
+
+	nentries = roundup_pow_of_two(attr->dest_nentries);
+
+	dest_ring = kzalloc(sizeof(*dest_ring) +
+			    (nentries *
+			     sizeof(*dest_ring->per_transfer_context)),
+			    GFP_KERNEL);
+	if (!dest_ring)
+		return ERR_PTR(-ENOMEM);
+
+	dest_ring->nentries = nentries;
+	dest_ring->nentries_mask = nentries - 1;
+
+	/* Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	dest_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ar->dev,
+				   (nentries * sizeof(struct ce_desc_64) +
+				    CE_DESC_RING_ALIGN),
+				   &base_addr, GFP_KERNEL);
+	if (!dest_ring->base_addr_owner_space_unaligned) {
+		kfree(dest_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+	/* Correctly initialize memory to 0 to prevent garbage
+	 * data crashing system when download firmware
+	 */
+	memset(dest_ring->base_addr_owner_space_unaligned, 0,
+	       nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
+
+	dest_ring->base_addr_owner_space =
+			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_ce_space =
+			ALIGN(dest_ring->base_addr_ce_space_unaligned,
+			      CE_DESC_RING_ALIGN);
 
 	return dest_ring;
 }
@@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
 	ath10k_ce_deinit_dest_ring(ar, ce_id);
 }
 
-int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-			 const struct ce_attr *attr)
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
-	int ret;
-
-	/*
-	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
-	 * additional TX locking checks.
-	 *
-	 * For the lack of a better place do the check here.
-	 */
-	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
-		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
-		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
-		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
-	ce_state->ar = ar;
-	ce_state->id = ce_id;
-	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
-	ce_state->attr_flags = attr->flags;
-	ce_state->src_sz_max = attr->src_sz_max;
 
-	if (attr->src_nentries)
-		ce_state->send_cb = attr->send_cb;
-
-	if (attr->dest_nentries)
-		ce_state->recv_cb = attr->recv_cb;
-
-	if (attr->src_nentries) {
-		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
-		if (IS_ERR(ce_state->src_ring)) {
-			ret = PTR_ERR(ce_state->src_ring);
-			ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
-				   ce_id, ret);
-			ce_state->src_ring = NULL;
-			return ret;
-		}
+	if (ce_state->src_ring) {
+		dma_free_coherent(ar->dev,
+				  (ce_state->src_ring->nentries *
+				   sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				  ce_state->src_ring->base_addr_owner_space,
+				  ce_state->src_ring->base_addr_ce_space);
+		kfree(ce_state->src_ring);
 	}
 
-	if (attr->dest_nentries) {
-		ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
-								attr);
-		if (IS_ERR(ce_state->dest_ring)) {
-			ret = PTR_ERR(ce_state->dest_ring);
-			ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
-				   ce_id, ret);
-			ce_state->dest_ring = NULL;
-			return ret;
-		}
+	if (ce_state->dest_ring) {
+		dma_free_coherent(ar->dev,
+				  (ce_state->dest_ring->nentries *
+				   sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				  ce_state->dest_ring->base_addr_owner_space,
+				  ce_state->dest_ring->base_addr_ce_space);
+		kfree(ce_state->dest_ring);
 	}
 
-	return 0;
+	ce_state->src_ring = NULL;
+	ce_state->dest_ring = NULL;
 }
 
-void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
 {
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 	if (ce_state->src_ring) {
 		dma_free_coherent(ar->dev,
 				  (ce_state->src_ring->nentries *
-				   sizeof(struct ce_desc) +
+				   sizeof(struct ce_desc_64) +
 				   CE_DESC_RING_ALIGN),
 				  ce_state->src_ring->base_addr_owner_space,
 				  ce_state->src_ring->base_addr_ce_space);
@@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 	if (ce_state->dest_ring) {
 		dma_free_coherent(ar->dev,
 				  (ce_state->dest_ring->nentries *
-				   sizeof(struct ce_desc) +
+				   sizeof(struct ce_desc_64) +
 				   CE_DESC_RING_ALIGN),
 				  ce_state->dest_ring->base_addr_owner_space,
 				  ce_state->dest_ring->base_addr_ce_space);
@@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 	ce_state->dest_ring = NULL;
 }
 
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+	struct ath10k_ce *ce = ath10k_ce_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+	ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+
 void ath10k_ce_dump_registers(struct ath10k *ar,
 			      struct ath10k_fw_crash_data *crash_data)
 {
@@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
 
 	spin_unlock_bh(&ce->ce_lock);
 }
+
+static const struct ath10k_ce_ops ce_ops = {
+	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+	.ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+	.ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+	.ce_extract_desc_data = ath10k_ce_extract_desc_data,
+	.ce_free_pipe = _ath10k_ce_free_pipe,
+	.ce_send_nolock = _ath10k_ce_send_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+	.ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+	.ce_completed_recv_next_nolock =
+				_ath10k_ce_completed_recv_next_nolock_64,
+	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+	.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+	.ce_free_pipe = _ath10k_ce_free_pipe_64,
+	.ce_send_nolock = _ath10k_ce_send_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+			      struct ath10k_ce_pipe *ce_state)
+{
+	switch (ar->hw_rev) {
+	case ATH10K_HW_WCN3990:
+		ce_state->ops = &ce_64_ops;
+		break;
+	default:
+		ce_state->ops = &ce_ops;
+		break;
+	}
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+			 const struct ce_attr *attr)
+{
+	struct ath10k_ce *ce = ath10k_ce_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+	int ret;
+
+	ath10k_ce_set_ops(ar, ce_state);
+	/* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+	 * additional TX locking checks.
+	 *
+	 * For the lack of a better place do the check here.
+	 */
+	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+	ce_state->ar = ar;
+	ce_state->id = ce_id;
+	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+	ce_state->attr_flags = attr->flags;
+	ce_state->src_sz_max = attr->src_sz_max;
+
+	if (attr->src_nentries)
+		ce_state->send_cb = attr->send_cb;
+
+	if (attr->dest_nentries)
+		ce_state->recv_cb = attr->recv_cb;
+
+	if (attr->src_nentries) {
+		ce_state->src_ring =
+			ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+		if (IS_ERR(ce_state->src_ring)) {
+			ret = PTR_ERR(ce_state->src_ring);
+			ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+				   ce_id, ret);
+			ce_state->src_ring = NULL;
+			return ret;
+		}
+	}
+
+	if (attr->dest_nentries) {
+		ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+									ce_id,
+									attr);
+		if (IS_ERR(ce_state->dest_ring)) {
+			ret = PTR_ERR(ce_state->dest_ring);
+			ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+				   ce_id, ret);
+			ce_state->dest_ring = NULL;
+			return ret;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index bdec794704d9..2c3c8f5e90ea 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -36,6 +36,10 @@ struct ath10k_ce_pipe;
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK		GENMASK(4, 0)
+#define CE_DESC_37BIT_ADDR_MASK		GENMASK_ULL(37, 0)
 
 /* Following desc flags are used in QCA99X0 */
 #define CE_DESC_FLAGS_HOST_INT_DIS	(1 << 2)
@@ -50,6 +54,16 @@ struct ce_desc {
 	__le16 flags; /* %CE_DESC_FLAGS_ */
 };
 
+struct ce_desc_64 {
+	__le64 addr;
+	__le16 nbytes; /* length in register map */
+	__le16 flags; /* fw_metadata_high */
+	__le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
 struct ath10k_ce_ring {
 	/* Number of entries in this ring; must be power of 2 */
 	unsigned int nentries;
@@ -117,6 +131,7 @@ struct ath10k_ce_pipe {
 	unsigned int src_sz_max;
 	struct ath10k_ce_ring *src_ring;
 	struct ath10k_ce_ring *dest_ring;
+	const struct ath10k_ce_ops *ops;
 };
 
 /* Copy Engine settable attributes */
@@ -160,7 +175,7 @@ struct ath10k_ce {
  */
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_send_context,
-		   u32 buffer,
+		   dma_addr_t buffer,
 		   unsigned int nbytes,
 		   /* 14 bits */
 		   unsigned int transfer_id,
@@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 
 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 			  void *per_transfer_context,
-			  u32 buffer,
+			  dma_addr_t buffer,
 			  unsigned int nbytes,
 			  unsigned int transfer_id,
 			  unsigned int flags);
@@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 /*==================Recv=======================*/
 
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			  dma_addr_t paddr);
 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
@@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
  */
 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
-			       u32 *bufferp);
+			       dma_addr_t *bufferp);
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 					 void **per_transfer_contextp,
@@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  */
 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
-			       u32 *bufferp,
+			       dma_addr_t *bufferp,
 			       unsigned int *nbytesp,
 			       unsigned int *transfer_idp);
 
@@ -281,6 +296,32 @@ struct ce_attr {
 	void (*recv_cb)(struct ath10k_ce_pipe *);
 };
 
+struct ath10k_ce_ops {
+	struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+						    u32 ce_id,
+						    const struct ce_attr *attr);
+	struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+						    u32 ce_id,
+						    const struct ce_attr *attr);
+	int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+			      dma_addr_t paddr);
+	int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+					     void **per_transfer_contextp,
+					     u32 *nbytesp);
+	int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+				   void **per_transfer_contextp,
+				   dma_addr_t *nbytesp);
+	void (*ce_extract_desc_data)(struct ath10k *ar,
+				     struct ath10k_ce_ring *src_ring,
+				     u32 sw_index, dma_addr_t *bufferp,
+				     u32 *nbytesp, u32 *transfer_idp);
+	void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+	int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+			      void *per_transfer_context,
+			      dma_addr_t buffer, u32 nbytes,
+			      u32 transfer_id, u32 flags);
+};
+
 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 {
 	return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
@@ -292,6 +333,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 #define CE_DEST_RING_TO_DESC(baddr, idx) \
 	(&(((struct ce_desc *)baddr)[idx]))
 
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+	(&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+	(&(((struct ce_desc_64 *)baddr)[idx]))
+
 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
 	(((int)(toidx) - (int)(fromidx)) & (nentries_mask))
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b29fdbd21ead..b0fdc1023619 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,7 @@
 #include "htt.h"
 #include "testmode.h"
 #include "wmi-ops.h"
+#include "coredump.h"
 
 unsigned int ath10k_debug_mask;
 static unsigned int ath10k_cryptmode_param;
@@ -39,17 +40,25 @@ static bool uart_print;
 static bool skip_otp;
 static bool rawmode;
 
+/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
+ * by default.
+ */
+unsigned long ath10k_coredump_mask = 0x3;
+
+/* FIXME: most of these should be readonly */
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
 module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
 MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
@@ -75,6 +84,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9887_HW_1_0_VERSION,
@@ -99,6 +113,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -122,6 +141,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -145,6 +169,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_3_0_VERSION,
@@ -168,6 +197,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_3_2_VERSION,
@@ -194,6 +228,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -223,6 +262,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -257,6 +301,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 1560,
 		.vht160_mcs_tx_highest = 1560,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -290,6 +339,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 780,
 		.vht160_mcs_tx_highest = 780,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -313,6 +367,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -338,6 +397,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -368,6 +432,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+	},
+	{
+		.id = WCN3990_HW_1_0_DEV_VERSION,
+		.dev_id = 0,
+		.name = "wcn3990 hw1.0",
+		.continuous_frag_desc = true,
+		.tx_chain_mask = 0x7,
+		.rx_chain_mask = 0x7,
+		.max_spatial_stream = 4,
+		.fw = {
+			.dir = WCN3990_HW_1_0_FW_DIR,
+		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &wcn3990_ops,
+		.decap_align_bytes = 1,
+		.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+		.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+		.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+		.target_64bit = true,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
 	},
 };
 
@@ -390,6 +479,8 @@ static const char *const ath10k_core_fw_feature_str[] = {
 	[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
 	[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
 	[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+	[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+	[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -860,6 +951,28 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
 	return 0;
 }
 
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+	struct device_node *node;
+	const char *variant = NULL;
+
+	node = ar->dev->of_node;
+	if (!node)
+		return -ENOENT;
+
+	of_property_read_string(node, "qcom,ath10k-calibration-variant",
+				&variant);
+	if (!variant)
+		return -ENODATA;
+
+	if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+			    variant);
+
+	return 0;
+}
+
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
 	u32 result, address = ar->hw_params.patch_load_addr;
@@ -1163,7 +1276,10 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
 		len -= sizeof(*hdr);
 		data = hdr->data;
 
-		if (len < ALIGN(ie_len, 4)) {
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		if (len < ie_len) {
 			ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
 				   ie_id, ie_len, len);
 			ret = -EINVAL;
@@ -1202,9 +1318,6 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
 			goto out;
 		}
 
-		/* jump over the padding */
-		ie_len = ALIGN(ie_len, 4);
-
 		len -= ie_len;
 		data += ie_len;
 	}
@@ -1231,19 +1344,19 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
 	/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
 	char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
 
+	if (ar->id.bdf_ext[0] != '\0')
+		scnprintf(variant, sizeof(variant), ",variant=%s",
+			  ar->id.bdf_ext);
+
 	if (ar->id.bmi_ids_valid) {
 		scnprintf(name, name_len,
-			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
 			  ath10k_bus_str(ar->hif.bus),
 			  ar->id.bmi_chip_id,
-			  ar->id.bmi_board_id);
+			  ar->id.bmi_board_id, variant);
 		goto out;
 	}
 
-	if (ar->id.bdf_ext[0] != '\0')
-		scnprintf(variant, sizeof(variant), ",variant=%s",
-			  ar->id.bdf_ext);
-
 	scnprintf(name, name_len,
 		  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
 		  ath10k_bus_str(ar->hif.bus),
@@ -1335,6 +1448,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
 		len -= sizeof(*hdr);
 		data += sizeof(*hdr);
 
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
 		if (len < ie_len) {
 			ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
 				   ie_id, len, ie_len);
@@ -1440,15 +1556,12 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
 			break;
 		}
 
-		/* jump over the padding */
-		ie_len = ALIGN(ie_len, 4);
-
 		len -= ie_len;
 		data += ie_len;
 	}
 
-	if (!fw_file->firmware_data ||
-	    !fw_file->firmware_len) {
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+	    (!fw_file->firmware_data || !fw_file->firmware_len)) {
 		ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
 			    ar->hw_params.fw.dir, name);
 		ret = -ENOMEDIUM;
@@ -1474,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
 		break;
 	case ATH10K_BUS_PCI:
 	case ATH10K_BUS_AHB:
+	case ATH10K_BUS_SNOC:
 		scnprintf(fw_name, fw_name_len, "%s-%d.bin",
 			  ATH10K_FW_FILE_BASE, fw_api);
 		break;
@@ -1759,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work)
 
 	mutex_unlock(&ar->conf_mutex);
 
-	ret = ath10k_debug_fw_devcoredump(ar);
+	ret = ath10k_coredump_submit(ar);
 	if (ret)
 		ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
 			    ret);
@@ -2001,43 +2115,47 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
 
 	ar->running_fw = fw;
 
-	ath10k_bmi_start(ar);
-
-	if (ath10k_init_configure_target(ar)) {
-		status = -EINVAL;
-		goto err;
-	}
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->running_fw->fw_file.fw_features)) {
+		ath10k_bmi_start(ar);
 
-	status = ath10k_download_cal_data(ar);
-	if (status)
-		goto err;
+		if (ath10k_init_configure_target(ar)) {
+			status = -EINVAL;
+			goto err;
+		}
 
-	/* Some of of qca988x solutions are having global reset issue
-	 * during target initialization. Bypassing PLL setting before
-	 * downloading firmware and letting the SoC run on REF_CLK is
-	 * fixing the problem. Corresponding firmware change is also needed
-	 * to set the clock source once the target is initialized.
-	 */
-	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
-		     ar->running_fw->fw_file.fw_features)) {
-		status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
-		if (status) {
-			ath10k_err(ar, "could not write to skip_clock_init: %d\n",
-				   status);
+		status = ath10k_download_cal_data(ar);
+		if (status)
 			goto err;
+
+		/* Some of of qca988x solutions are having global reset issue
+		 * during target initialization. Bypassing PLL setting before
+		 * downloading firmware and letting the SoC run on REF_CLK is
+		 * fixing the problem. Corresponding firmware change is also
+		 * needed to set the clock source once the target is
+		 * initialized.
+		 */
+		if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+			     ar->running_fw->fw_file.fw_features)) {
+			status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+			if (status) {
+				ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+					   status);
+				goto err;
+			}
 		}
-	}
 
-	status = ath10k_download_fw(ar);
-	if (status)
-		goto err;
+		status = ath10k_download_fw(ar);
+		if (status)
+			goto err;
 
-	status = ath10k_init_uart(ar);
-	if (status)
-		goto err;
+		status = ath10k_init_uart(ar);
+		if (status)
+			goto err;
 
-	if (ar->hif.bus == ATH10K_BUS_SDIO)
-		ath10k_init_sdio(ar);
+		if (ar->hif.bus == ATH10K_BUS_SDIO)
+			ath10k_init_sdio(ar);
+	}
 
 	ar->htc.htc_ops.target_send_suspend_complete =
 		ath10k_send_suspend_complete;
@@ -2048,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
 		goto err;
 	}
 
-	status = ath10k_bmi_done(ar);
-	if (status)
-		goto err;
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->running_fw->fw_file.fw_features)) {
+		status = ath10k_bmi_done(ar);
+		if (status)
+			goto err;
+	}
 
 	status = ath10k_wmi_attach(ar);
 	if (status) {
@@ -2293,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 		return ret;
 	}
 
-	memset(&target_info, 0, sizeof(target_info));
-	if (ar->hif.bus == ATH10K_BUS_SDIO)
+	switch (ar->hif.bus) {
+	case ATH10K_BUS_SDIO:
+		memset(&target_info, 0, sizeof(target_info));
 		ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
-	else
+		if (ret) {
+			ath10k_err(ar, "could not get target info (%d)\n", ret);
+			goto err_power_down;
+		}
+		ar->target_version = target_info.version;
+		ar->hw->wiphy->hw_version = target_info.version;
+		break;
+	case ATH10K_BUS_PCI:
+	case ATH10K_BUS_AHB:
+	case ATH10K_BUS_USB:
+		memset(&target_info, 0, sizeof(target_info));
 		ret = ath10k_bmi_get_target_info(ar, &target_info);
-	if (ret) {
-		ath10k_err(ar, "could not get target info (%d)\n", ret);
-		goto err_power_down;
+		if (ret) {
+			ath10k_err(ar, "could not get target info (%d)\n", ret);
+			goto err_power_down;
+		}
+		ar->target_version = target_info.version;
+		ar->hw->wiphy->hw_version = target_info.version;
+		break;
+	case ATH10K_BUS_SNOC:
+		break;
+	default:
+		ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
 	}
 
-	ar->target_version = target_info.version;
-	ar->hw->wiphy->hw_version = target_info.version;
-
 	ret = ath10k_init_hw_params(ar);
 	if (ret) {
 		ath10k_err(ar, "could not get hw params (%d)\n", ret);
@@ -2325,33 +2462,40 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 
 	ath10k_debug_print_hwfw_info(ar);
 
-	ret = ath10k_core_pre_cal_download(ar);
-	if (ret) {
-		/* pre calibration data download is not necessary
-		 * for all the chipsets. Ignore failures and continue.
-		 */
-		ath10k_dbg(ar, ATH10K_DBG_BOOT,
-			   "could not load pre cal data: %d\n", ret);
-	}
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->normal_mode_fw.fw_file.fw_features)) {
+		ret = ath10k_core_pre_cal_download(ar);
+		if (ret) {
+			/* pre calibration data download is not necessary
+			 * for all the chipsets. Ignore failures and continue.
+			 */
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "could not load pre cal data: %d\n", ret);
+		}
 
-	ret = ath10k_core_get_board_id_from_otp(ar);
-	if (ret && ret != -EOPNOTSUPP) {
-		ath10k_err(ar, "failed to get board id from otp: %d\n",
-			   ret);
-		goto err_free_firmware_files;
-	}
+		ret = ath10k_core_get_board_id_from_otp(ar);
+		if (ret && ret != -EOPNOTSUPP) {
+			ath10k_err(ar, "failed to get board id from otp: %d\n",
+				   ret);
+			goto err_free_firmware_files;
+		}
 
-	ret = ath10k_core_check_smbios(ar);
-	if (ret)
-		ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
+		ret = ath10k_core_check_smbios(ar);
+		if (ret)
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
 
-	ret = ath10k_core_fetch_board_file(ar);
-	if (ret) {
-		ath10k_err(ar, "failed to fetch board file: %d\n", ret);
-		goto err_free_firmware_files;
-	}
+		ret = ath10k_core_check_dt(ar);
+		if (ret)
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+
+		ret = ath10k_core_fetch_board_file(ar);
+		if (ret) {
+			ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+			goto err_free_firmware_files;
+		}
 
-	ath10k_debug_print_board_info(ar);
+		ath10k_debug_print_board_info(ar);
+	}
 
 	ret = ath10k_core_init_firmware_features(ar);
 	if (ret) {
@@ -2360,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 		goto err_free_firmware_files;
 	}
 
-	ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
-	if (ret) {
-		ath10k_err(ar, "failed to initialize code swap segment: %d\n",
-			   ret);
-		goto err_free_firmware_files;
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->normal_mode_fw.fw_file.fw_features)) {
+		ret = ath10k_swap_code_seg_init(ar,
+						&ar->normal_mode_fw.fw_file);
+		if (ret) {
+			ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+				   ret);
+			goto err_free_firmware_files;
+		}
 	}
 
 	mutex_lock(&ar->conf_mutex);
@@ -2416,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work)
 		goto err_release_fw;
 	}
 
+	status = ath10k_coredump_register(ar);
+	if (status) {
+		ath10k_err(ar, "unable to register coredump\n");
+		goto err_unregister_mac;
+	}
+
 	status = ath10k_debug_register(ar);
 	if (status) {
 		ath10k_err(ar, "unable to initialize debugfs\n");
-		goto err_unregister_mac;
+		goto err_unregister_coredump;
 	}
 
 	status = ath10k_spectral_create(ar);
@@ -2442,6 +2596,8 @@ err_spectral_destroy:
 	ath10k_spectral_destroy(ar);
 err_debug_destroy:
 	ath10k_debug_destroy(ar);
+err_unregister_coredump:
+	ath10k_coredump_unregister(ar);
 err_unregister_mac:
 	ath10k_mac_unregister(ar);
 err_release_fw:
@@ -2596,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 
 	init_dummy_netdev(&ar->napi_dev);
 
-	ret = ath10k_debug_create(ar);
+	ret = ath10k_coredump_create(ar);
 	if (ret)
 		goto err_free_aux_wq;
 
+	ret = ath10k_debug_create(ar);
+	if (ret)
+		goto err_free_coredump;
+
 	return ar;
 
+err_free_coredump:
+	ath10k_coredump_destroy(ar);
+
 err_free_aux_wq:
 	destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
@@ -2623,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar)
 	destroy_workqueue(ar->workqueue_aux);
 
 	ath10k_debug_destroy(ar);
+	ath10k_coredump_destroy(ar);
 	ath10k_htt_tx_destroy(&ar->htt);
 	ath10k_wmi_free_host_mem(ar);
 	ath10k_mac_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 643041ef3271..fe6b30356d3b 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -67,7 +67,6 @@
 
 /* NAPI poll budget */
 #define ATH10K_NAPI_BUDGET      64
-#define ATH10K_NAPI_QUOTA_LIMIT 60
 
 /* SMBIOS type containing Board Data File Name Extension */
 #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
@@ -93,6 +92,7 @@ enum ath10k_bus {
 	ATH10K_BUS_AHB,
 	ATH10K_BUS_SDIO,
 	ATH10K_BUS_USB,
+	ATH10K_BUS_SNOC,
 };
 
 static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -106,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
 		return "sdio";
 	case ATH10K_BUS_USB:
 		return "usb";
+	case ATH10K_BUS_SNOC:
+		return "snoc";
 	}
 
 	return "unknown";
@@ -364,11 +366,11 @@ struct ath10k_sta {
 	struct rate_info txrate;
 
 	struct work_struct update_wk;
+	u64 rx_duration;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 	/* protected by conf_mutex */
 	bool aggr_mode;
-	u64 rx_duration;
 #endif
 };
 
@@ -458,14 +460,17 @@ struct ath10k_ce_crash_hdr {
 	struct ath10k_ce_crash_data entries[];
 };
 
+#define MAX_MEM_DUMP_TYPE	5
+
 /* used for crash-dump storage, protected by data-lock */
 struct ath10k_fw_crash_data {
-	bool crashed_since_read;
-
 	guid_t guid;
-	struct timespec timestamp;
+	struct timespec64 timestamp;
 	__le32 registers[REG_DUMP_COUNT_QCA988X];
 	struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+	u8 *ramdump_buf;
+	size_t ramdump_buf_len;
 };
 
 struct ath10k_debug {
@@ -488,12 +493,9 @@ struct ath10k_debug {
 	/* protected by conf_mutex */
 	u64 fw_dbglog_mask;
 	u32 fw_dbglog_level;
-	u32 pktlog_filter;
 	u32 reg_addr;
 	u32 nf_cal_period;
 	void *cal_data;
-
-	struct ath10k_fw_crash_data *fw_crash_data;
 };
 
 enum ath10k_state {
@@ -615,6 +617,12 @@ enum ath10k_fw_features {
 	/* Firmware does not support power save in station mode. */
 	ATH10K_FW_FEATURE_NO_PS = 17,
 
+	/* Firmware allows management tx by reference instead of by value. */
+	ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
+	/* Firmware load is done externally, not by bmi */
+	ATH10K_FW_FEATURE_NON_BMI = 19,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -963,6 +971,14 @@ struct ath10k {
 	} spectral;
 #endif
 
+	u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+	struct {
+		struct ath10k_fw_crash_data *fw_crash_data;
+	} coredump;
+#endif
+
 	struct {
 		/* protected by conf_mutex */
 		struct ath10k_fw_components utf_mode_fw;
@@ -1016,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
 	return false;
 }
 
+extern unsigned long ath10k_coredump_mask;
+
 struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 				  enum ath10k_bus bus,
 				  enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
new file mode 100644
index 000000000000..4dde126dab17
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -0,0 +1,993 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+	{0x800, 0x810},
+	{0x820, 0x82C},
+	{0x830, 0x8F4},
+	{0x90C, 0x91C},
+	{0xA14, 0xA18},
+	{0xA84, 0xA94},
+	{0xAA8, 0xAD4},
+	{0xADC, 0xB40},
+	{0x1000, 0x10A4},
+	{0x10BC, 0x111C},
+	{0x1134, 0x1138},
+	{0x1144, 0x114C},
+	{0x1150, 0x115C},
+	{0x1160, 0x1178},
+	{0x1240, 0x1260},
+	{0x2000, 0x207C},
+	{0x3000, 0x3014},
+	{0x4000, 0x4014},
+	{0x5000, 0x5124},
+	{0x6000, 0x6040},
+	{0x6080, 0x60CC},
+	{0x6100, 0x611C},
+	{0x6140, 0x61D8},
+	{0x6200, 0x6238},
+	{0x6240, 0x628C},
+	{0x62C0, 0x62EC},
+	{0x6380, 0x63E8},
+	{0x6400, 0x6440},
+	{0x6480, 0x64CC},
+	{0x6500, 0x651C},
+	{0x6540, 0x6580},
+	{0x6600, 0x6638},
+	{0x6640, 0x668C},
+	{0x66C0, 0x66EC},
+	{0x6780, 0x67E8},
+	{0x7080, 0x708C},
+	{0x70C0, 0x70C8},
+	{0x7400, 0x741C},
+	{0x7440, 0x7454},
+	{0x7800, 0x7818},
+	{0x8000, 0x8004},
+	{0x8010, 0x8064},
+	{0x8080, 0x8084},
+	{0x80A0, 0x80A4},
+	{0x80C0, 0x80C4},
+	{0x80E0, 0x80F4},
+	{0x8100, 0x8104},
+	{0x8110, 0x812C},
+	{0x9000, 0x9004},
+	{0x9800, 0x982C},
+	{0x9830, 0x9838},
+	{0x9840, 0x986C},
+	{0x9870, 0x9898},
+	{0x9A00, 0x9C00},
+	{0xD580, 0xD59C},
+	{0xF000, 0xF0E0},
+	{0xF140, 0xF190},
+	{0xF250, 0xF25C},
+	{0xF260, 0xF268},
+	{0xF26C, 0xF2A8},
+	{0x10008, 0x1000C},
+	{0x10014, 0x10018},
+	{0x1001C, 0x10020},
+	{0x10024, 0x10028},
+	{0x10030, 0x10034},
+	{0x10040, 0x10054},
+	{0x10058, 0x1007C},
+	{0x10080, 0x100C4},
+	{0x100C8, 0x10114},
+	{0x1012C, 0x10130},
+	{0x10138, 0x10144},
+	{0x10200, 0x10220},
+	{0x10230, 0x10250},
+	{0x10260, 0x10280},
+	{0x10290, 0x102B0},
+	{0x102C0, 0x102DC},
+	{0x102E0, 0x102F4},
+	{0x102FC, 0x1037C},
+	{0x10380, 0x10390},
+	{0x10800, 0x10828},
+	{0x10840, 0x10844},
+	{0x10880, 0x10884},
+	{0x108C0, 0x108E8},
+	{0x10900, 0x10928},
+	{0x10940, 0x10944},
+	{0x10980, 0x10984},
+	{0x109C0, 0x109E8},
+	{0x10A00, 0x10A28},
+	{0x10A40, 0x10A50},
+	{0x11000, 0x11028},
+	{0x11030, 0x11034},
+	{0x11038, 0x11068},
+	{0x11070, 0x11074},
+	{0x11078, 0x110A8},
+	{0x110B0, 0x110B4},
+	{0x110B8, 0x110E8},
+	{0x110F0, 0x110F4},
+	{0x110F8, 0x11128},
+	{0x11138, 0x11144},
+	{0x11178, 0x11180},
+	{0x111B8, 0x111C0},
+	{0x111F8, 0x11200},
+	{0x11238, 0x1123C},
+	{0x11270, 0x11274},
+	{0x11278, 0x1127C},
+	{0x112B0, 0x112B4},
+	{0x112B8, 0x112BC},
+	{0x112F0, 0x112F4},
+	{0x112F8, 0x112FC},
+	{0x11338, 0x1133C},
+	{0x11378, 0x1137C},
+	{0x113B8, 0x113BC},
+	{0x113F8, 0x113FC},
+	{0x11438, 0x11440},
+	{0x11478, 0x11480},
+	{0x114B8, 0x114BC},
+	{0x114F8, 0x114FC},
+	{0x11538, 0x1153C},
+	{0x11578, 0x1157C},
+	{0x115B8, 0x115BC},
+	{0x115F8, 0x115FC},
+	{0x11638, 0x1163C},
+	{0x11678, 0x1167C},
+	{0x116B8, 0x116BC},
+	{0x116F8, 0x116FC},
+	{0x11738, 0x1173C},
+	{0x11778, 0x1177C},
+	{0x117B8, 0x117BC},
+	{0x117F8, 0x117FC},
+	{0x17000, 0x1701C},
+	{0x17020, 0x170AC},
+	{0x18000, 0x18050},
+	{0x18054, 0x18074},
+	{0x18080, 0x180D4},
+	{0x180DC, 0x18104},
+	{0x18108, 0x1813C},
+	{0x18144, 0x18148},
+	{0x18168, 0x18174},
+	{0x18178, 0x18180},
+	{0x181C8, 0x181E0},
+	{0x181E4, 0x181E8},
+	{0x181EC, 0x1820C},
+	{0x1825C, 0x18280},
+	{0x18284, 0x18290},
+	{0x18294, 0x182A0},
+	{0x18300, 0x18304},
+	{0x18314, 0x18320},
+	{0x18328, 0x18350},
+	{0x1835C, 0x1836C},
+	{0x18370, 0x18390},
+	{0x18398, 0x183AC},
+	{0x183BC, 0x183D8},
+	{0x183DC, 0x183F4},
+	{0x18400, 0x186F4},
+	{0x186F8, 0x1871C},
+	{0x18720, 0x18790},
+	{0x19800, 0x19830},
+	{0x19834, 0x19840},
+	{0x19880, 0x1989C},
+	{0x198A4, 0x198B0},
+	{0x198BC, 0x19900},
+	{0x19C00, 0x19C88},
+	{0x19D00, 0x19D20},
+	{0x19E00, 0x19E7C},
+	{0x19E80, 0x19E94},
+	{0x19E98, 0x19EAC},
+	{0x19EB0, 0x19EBC},
+	{0x19F70, 0x19F74},
+	{0x19F80, 0x19F8C},
+	{0x19FA0, 0x19FB4},
+	{0x19FC0, 0x19FD8},
+	{0x1A000, 0x1A200},
+	{0x1A204, 0x1A210},
+	{0x1A228, 0x1A22C},
+	{0x1A230, 0x1A248},
+	{0x1A250, 0x1A270},
+	{0x1A280, 0x1A290},
+	{0x1A2A0, 0x1A2A4},
+	{0x1A2C0, 0x1A2EC},
+	{0x1A300, 0x1A3BC},
+	{0x1A3F0, 0x1A3F4},
+	{0x1A3F8, 0x1A434},
+	{0x1A438, 0x1A444},
+	{0x1A448, 0x1A468},
+	{0x1A580, 0x1A58C},
+	{0x1A644, 0x1A654},
+	{0x1A670, 0x1A698},
+	{0x1A6AC, 0x1A6B0},
+	{0x1A6D0, 0x1A6D4},
+	{0x1A6EC, 0x1A70C},
+	{0x1A710, 0x1A738},
+	{0x1A7C0, 0x1A7D0},
+	{0x1A7D4, 0x1A7D8},
+	{0x1A7DC, 0x1A7E4},
+	{0x1A7F0, 0x1A7F8},
+	{0x1A888, 0x1A89C},
+	{0x1A8A8, 0x1A8AC},
+	{0x1A8C0, 0x1A8DC},
+	{0x1A8F0, 0x1A8FC},
+	{0x1AE04, 0x1AE08},
+	{0x1AE18, 0x1AE24},
+	{0x1AF80, 0x1AF8C},
+	{0x1AFA0, 0x1AFB4},
+	{0x1B000, 0x1B200},
+	{0x1B284, 0x1B288},
+	{0x1B2D0, 0x1B2D8},
+	{0x1B2DC, 0x1B2EC},
+	{0x1B300, 0x1B340},
+	{0x1B374, 0x1B378},
+	{0x1B380, 0x1B384},
+	{0x1B388, 0x1B38C},
+	{0x1B404, 0x1B408},
+	{0x1B420, 0x1B428},
+	{0x1B440, 0x1B444},
+	{0x1B448, 0x1B44C},
+	{0x1B450, 0x1B458},
+	{0x1B45C, 0x1B468},
+	{0x1B584, 0x1B58C},
+	{0x1B68C, 0x1B690},
+	{0x1B6AC, 0x1B6B0},
+	{0x1B7F0, 0x1B7F8},
+	{0x1C800, 0x1CC00},
+	{0x1CE00, 0x1CE04},
+	{0x1CF80, 0x1CF84},
+	{0x1D200, 0x1D800},
+	{0x1E000, 0x20014},
+	{0x20100, 0x20124},
+	{0x21400, 0x217A8},
+	{0x21800, 0x21BA8},
+	{0x21C00, 0x21FA8},
+	{0x22000, 0x223A8},
+	{0x22400, 0x227A8},
+	{0x22800, 0x22BA8},
+	{0x22C00, 0x22FA8},
+	{0x23000, 0x233A8},
+	{0x24000, 0x24034},
+	{0x26000, 0x26064},
+	{0x27000, 0x27024},
+	{0x34000, 0x3400C},
+	{0x34400, 0x3445C},
+	{0x34800, 0x3485C},
+	{0x34C00, 0x34C5C},
+	{0x35000, 0x3505C},
+	{0x35400, 0x3545C},
+	{0x35800, 0x3585C},
+	{0x35C00, 0x35C5C},
+	{0x36000, 0x3605C},
+	{0x38000, 0x38064},
+	{0x38070, 0x380E0},
+	{0x3A000, 0x3A064},
+	{0x40000, 0x400A4},
+	{0x80000, 0x8000C},
+	{0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+	{0x800, 0x810},
+	{0x820, 0x82C},
+	{0x830, 0x8F4},
+	{0x90C, 0x91C},
+	{0xA14, 0xA18},
+	{0xA84, 0xA94},
+	{0xAA8, 0xAD4},
+	{0xADC, 0xB40},
+	{0x1000, 0x10A4},
+	{0x10BC, 0x111C},
+	{0x1134, 0x1138},
+	{0x1144, 0x114C},
+	{0x1150, 0x115C},
+	{0x1160, 0x1178},
+	{0x1240, 0x1260},
+	{0x2000, 0x207C},
+	{0x3000, 0x3014},
+	{0x4000, 0x4014},
+	{0x5000, 0x5124},
+	{0x6000, 0x6040},
+	{0x6080, 0x60CC},
+	{0x6100, 0x611C},
+	{0x6140, 0x61D8},
+	{0x6200, 0x6238},
+	{0x6240, 0x628C},
+	{0x62C0, 0x62EC},
+	{0x6380, 0x63E8},
+	{0x6400, 0x6440},
+	{0x6480, 0x64CC},
+	{0x6500, 0x651C},
+	{0x6540, 0x6580},
+	{0x6600, 0x6638},
+	{0x6640, 0x668C},
+	{0x66C0, 0x66EC},
+	{0x6780, 0x67E8},
+	{0x7080, 0x708C},
+	{0x70C0, 0x70C8},
+	{0x7400, 0x741C},
+	{0x7440, 0x7454},
+	{0x7800, 0x7818},
+	{0x8000, 0x8004},
+	{0x8010, 0x8064},
+	{0x8080, 0x8084},
+	{0x80A0, 0x80A4},
+	{0x80C0, 0x80C4},
+	{0x80E0, 0x80F4},
+	{0x8100, 0x8104},
+	{0x8110, 0x812C},
+	{0x9000, 0x9004},
+	{0x9800, 0x982C},
+	{0x9830, 0x9838},
+	{0x9840, 0x986C},
+	{0x9870, 0x9898},
+	{0x9A00, 0x9C00},
+	{0xD580, 0xD59C},
+	{0xF000, 0xF0E0},
+	{0xF140, 0xF190},
+	{0xF250, 0xF25C},
+	{0xF260, 0xF268},
+	{0xF26C, 0xF2A8},
+	{0x10008, 0x1000C},
+	{0x10014, 0x10018},
+	{0x1001C, 0x10020},
+	{0x10024, 0x10028},
+	{0x10030, 0x10034},
+	{0x10040, 0x10054},
+	{0x10058, 0x1007C},
+	{0x10080, 0x100C4},
+	{0x100C8, 0x10114},
+	{0x1012C, 0x10130},
+	{0x10138, 0x10144},
+	{0x10200, 0x10220},
+	{0x10230, 0x10250},
+	{0x10260, 0x10280},
+	{0x10290, 0x102B0},
+	{0x102C0, 0x102DC},
+	{0x102E0, 0x102F4},
+	{0x102FC, 0x1037C},
+	{0x10380, 0x10390},
+	{0x10800, 0x10828},
+	{0x10840, 0x10844},
+	{0x10880, 0x10884},
+	{0x108C0, 0x108E8},
+	{0x10900, 0x10928},
+	{0x10940, 0x10944},
+	{0x10980, 0x10984},
+	{0x109C0, 0x109E8},
+	{0x10A00, 0x10A28},
+	{0x10A40, 0x10A50},
+	{0x11000, 0x11028},
+	{0x11030, 0x11034},
+	{0x11038, 0x11068},
+	{0x11070, 0x11074},
+	{0x11078, 0x110A8},
+	{0x110B0, 0x110B4},
+	{0x110B8, 0x110E8},
+	{0x110F0, 0x110F4},
+	{0x110F8, 0x11128},
+	{0x11138, 0x11144},
+	{0x11178, 0x11180},
+	{0x111B8, 0x111C0},
+	{0x111F8, 0x11200},
+	{0x11238, 0x1123C},
+	{0x11270, 0x11274},
+	{0x11278, 0x1127C},
+	{0x112B0, 0x112B4},
+	{0x112B8, 0x112BC},
+	{0x112F0, 0x112F4},
+	{0x112F8, 0x112FC},
+	{0x11338, 0x1133C},
+	{0x11378, 0x1137C},
+	{0x113B8, 0x113BC},
+	{0x113F8, 0x113FC},
+	{0x11438, 0x11440},
+	{0x11478, 0x11480},
+	{0x114B8, 0x114BC},
+	{0x114F8, 0x114FC},
+	{0x11538, 0x1153C},
+	{0x11578, 0x1157C},
+	{0x115B8, 0x115BC},
+	{0x115F8, 0x115FC},
+	{0x11638, 0x1163C},
+	{0x11678, 0x1167C},
+	{0x116B8, 0x116BC},
+	{0x116F8, 0x116FC},
+	{0x11738, 0x1173C},
+	{0x11778, 0x1177C},
+	{0x117B8, 0x117BC},
+	{0x117F8, 0x117FC},
+	{0x17000, 0x1701C},
+	{0x17020, 0x170AC},
+	{0x18000, 0x18050},
+	{0x18054, 0x18074},
+	{0x18080, 0x180D4},
+	{0x180DC, 0x18104},
+	{0x18108, 0x1813C},
+	{0x18144, 0x18148},
+	{0x18168, 0x18174},
+	{0x18178, 0x18180},
+	{0x181C8, 0x181E0},
+	{0x181E4, 0x181E8},
+	{0x181EC, 0x1820C},
+	{0x1825C, 0x18280},
+	{0x18284, 0x18290},
+	{0x18294, 0x182A0},
+	{0x18300, 0x18304},
+	{0x18314, 0x18320},
+	{0x18328, 0x18350},
+	{0x1835C, 0x1836C},
+	{0x18370, 0x18390},
+	{0x18398, 0x183AC},
+	{0x183BC, 0x183D8},
+	{0x183DC, 0x183F4},
+	{0x18400, 0x186F4},
+	{0x186F8, 0x1871C},
+	{0x18720, 0x18790},
+	{0x19800, 0x19830},
+	{0x19834, 0x19840},
+	{0x19880, 0x1989C},
+	{0x198A4, 0x198B0},
+	{0x198BC, 0x19900},
+	{0x19C00, 0x19C88},
+	{0x19D00, 0x19D20},
+	{0x19E00, 0x19E7C},
+	{0x19E80, 0x19E94},
+	{0x19E98, 0x19EAC},
+	{0x19EB0, 0x19EBC},
+	{0x19F70, 0x19F74},
+	{0x19F80, 0x19F8C},
+	{0x19FA0, 0x19FB4},
+	{0x19FC0, 0x19FD8},
+	{0x1A000, 0x1A200},
+	{0x1A204, 0x1A210},
+	{0x1A228, 0x1A22C},
+	{0x1A230, 0x1A248},
+	{0x1A250, 0x1A270},
+	{0x1A280, 0x1A290},
+	{0x1A2A0, 0x1A2A4},
+	{0x1A2C0, 0x1A2EC},
+	{0x1A300, 0x1A3BC},
+	{0x1A3F0, 0x1A3F4},
+	{0x1A3F8, 0x1A434},
+	{0x1A438, 0x1A444},
+	{0x1A448, 0x1A468},
+	{0x1A580, 0x1A58C},
+	{0x1A644, 0x1A654},
+	{0x1A670, 0x1A698},
+	{0x1A6AC, 0x1A6B0},
+	{0x1A6D0, 0x1A6D4},
+	{0x1A6EC, 0x1A70C},
+	{0x1A710, 0x1A738},
+	{0x1A7C0, 0x1A7D0},
+	{0x1A7D4, 0x1A7D8},
+	{0x1A7DC, 0x1A7E4},
+	{0x1A7F0, 0x1A7F8},
+	{0x1A888, 0x1A89C},
+	{0x1A8A8, 0x1A8AC},
+	{0x1A8C0, 0x1A8DC},
+	{0x1A8F0, 0x1A8FC},
+	{0x1AE04, 0x1AE08},
+	{0x1AE18, 0x1AE24},
+	{0x1AF80, 0x1AF8C},
+	{0x1AFA0, 0x1AFB4},
+	{0x1B000, 0x1B200},
+	{0x1B284, 0x1B288},
+	{0x1B2D0, 0x1B2D8},
+	{0x1B2DC, 0x1B2EC},
+	{0x1B300, 0x1B340},
+	{0x1B374, 0x1B378},
+	{0x1B380, 0x1B384},
+	{0x1B388, 0x1B38C},
+	{0x1B404, 0x1B408},
+	{0x1B420, 0x1B428},
+	{0x1B440, 0x1B444},
+	{0x1B448, 0x1B44C},
+	{0x1B450, 0x1B458},
+	{0x1B45C, 0x1B468},
+	{0x1B584, 0x1B58C},
+	{0x1B68C, 0x1B690},
+	{0x1B6AC, 0x1B6B0},
+	{0x1B7F0, 0x1B7F8},
+	{0x1C800, 0x1CC00},
+	{0x1CE00, 0x1CE04},
+	{0x1CF80, 0x1CF84},
+	{0x1D200, 0x1D800},
+	{0x1E000, 0x20014},
+	{0x20100, 0x20124},
+	{0x21400, 0x217A8},
+	{0x21800, 0x21BA8},
+	{0x21C00, 0x21FA8},
+	{0x22000, 0x223A8},
+	{0x22400, 0x227A8},
+	{0x22800, 0x22BA8},
+	{0x22C00, 0x22FA8},
+	{0x23000, 0x233A8},
+	{0x24000, 0x24034},
+	{0x26000, 0x26064},
+	{0x27000, 0x27024},
+	{0x34000, 0x3400C},
+	{0x34400, 0x3445C},
+	{0x34800, 0x3485C},
+	{0x34C00, 0x34C5C},
+	{0x35000, 0x3505C},
+	{0x35400, 0x3545C},
+	{0x35800, 0x3585C},
+	{0x35C00, 0x35C5C},
+	{0x36000, 0x3605C},
+	{0x38000, 0x38064},
+	{0x38070, 0x380E0},
+	{0x3A000, 0x3A074},
+	{0x40000, 0x400A4},
+	{0x80000, 0x8000C},
+	{0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x70000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+
+		/* RTC_SOC_BASE_ADDRESS */
+		.start = 0x0,
+
+		/* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+		.len = 0x800 - 0x0,
+
+		.name = "REG_PART1",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+
+		/* STEREO_BASE_ADDRESS */
+		.start = 0x27000,
+
+		/* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+		.len = 0x60000 - 0x27000,
+
+		.name = "REG_PART2",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x70000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_AXI,
+		.start = 0xa0000,
+		.len = 0x18000,
+		.name = "AXI",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x800,
+		.len = 0x80020 - 0x800,
+		.name = "REG_TOTAL",
+		.section_table = {
+			.sections = qca6174_hw21_register_sections,
+			.size = ARRAY_SIZE(qca6174_hw21_register_sections),
+		},
+	},
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x90000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_AXI,
+		.start = 0xa0000,
+		.len = 0x18000,
+		.name = "AXI",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x800,
+		.len = 0x80020 - 0x800,
+		.name = "REG_TOTAL",
+		.section_table = {
+			.sections = qca6174_hw30_register_sections,
+			.size = ARRAY_SIZE(qca6174_hw30_register_sections),
+		},
+	},
+
+	/* IRAM dump must be put last */
+	{
+		.type = ATH10K_MEM_REGION_TYPE_IRAM1,
+		.start = 0x00980000,
+		.len = 0x00080000,
+		.name = "IRAM1",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_IRAM2,
+		.start = 0x00a00000,
+		.len = 0x00040000,
+		.name = "IRAM2",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x50000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x4000,
+		.len = 0x2000,
+		.name = "REG_PART1",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x8000,
+		.len = 0x58000,
+		.name = "REG_PART2",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+	{
+		.hw_id = QCA6174_HW_1_0_VERSION,
+		.region_table = {
+			.regions = qca6174_hw10_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_1_1_VERSION,
+		.region_table = {
+			.regions = qca6174_hw10_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_1_3_VERSION,
+		.region_table = {
+			.regions = qca6174_hw10_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_2_1_VERSION,
+		.region_table = {
+			.regions = qca6174_hw21_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_3_0_VERSION,
+		.region_table = {
+			.regions = qca6174_hw30_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_3_2_VERSION,
+		.region_table = {
+			.regions = qca6174_hw30_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA9377_HW_1_1_DEV_VERSION,
+		.region_table = {
+			.regions = qca6174_hw30_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA988X_HW_2_0_VERSION,
+		.region_table = {
+			.regions = qca988x_hw20_mem_regions,
+			.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+		},
+	},
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+	const struct ath10k_hw_mem_layout *hw;
+	const struct ath10k_mem_region *mem_region;
+	size_t size = 0;
+	int i;
+
+	hw = ath10k_coredump_get_mem_layout(ar);
+
+	if (!hw)
+		return 0;
+
+	mem_region = &hw->region_table.regions[0];
+
+	for (i = 0; i < hw->region_table.size; i++) {
+		size += mem_region->len;
+		mem_region++;
+	}
+
+	/* reserve space for the headers */
+	size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+	/* make sure it is aligned 16 bytes for debug message print out */
+	size = ALIGN(size, 16);
+
+	return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+	int i;
+
+	if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+		return NULL;
+
+	if (WARN_ON(ar->target_version == 0))
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+		if (ar->target_version == hw_mem_layouts[i].hw_id)
+			return &hw_mem_layouts[i];
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (ath10k_coredump_mask == 0)
+		/* coredump disabled */
+		return NULL;
+
+	guid_gen(&crash_data->guid);
+	ktime_get_real_ts64(&crash_data->timestamp);
+
+	return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+	struct ath10k_ce_crash_hdr *ce_hdr;
+	struct ath10k_dump_file_data *dump_data;
+	struct ath10k_tlv_dump_data *dump_tlv;
+	size_t hdr_len = sizeof(*dump_data);
+	size_t len, sofar = 0;
+	unsigned char *buf;
+
+	len = hdr_len;
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+		len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+		len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+			CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+		len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+	sofar += hdr_len;
+
+	/* This is going to get big when we start dumping FW RAM and such,
+	 * so go ahead and use vmalloc.
+	 */
+	buf = vzalloc(len);
+	if (!buf)
+		return NULL;
+
+	spin_lock_bh(&ar->data_lock);
+
+	dump_data = (struct ath10k_dump_file_data *)(buf);
+	strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+		sizeof(dump_data->df_magic));
+	dump_data->len = cpu_to_le32(len);
+
+	dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+	guid_copy(&dump_data->guid, &crash_data->guid);
+	dump_data->chip_id = cpu_to_le32(ar->chip_id);
+	dump_data->bus_type = cpu_to_le32(0);
+	dump_data->target_version = cpu_to_le32(ar->target_version);
+	dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+	dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+	dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+	dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+	dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+	dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+	dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+	dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+	dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+	dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+	strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+		sizeof(dump_data->fw_ver));
+
+	dump_data->kernel_ver_code = 0;
+	strlcpy(dump_data->kernel_ver, init_utsname()->release,
+		sizeof(dump_data->kernel_ver));
+
+	dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+	dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+		dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+		dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+		dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+		memcpy(dump_tlv->tlv_data, &crash_data->registers,
+		       sizeof(crash_data->registers));
+		sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+	}
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+		dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+		dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+		dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+						CE_COUNT * sizeof(ce_hdr->entries[0]));
+		ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+		ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+		memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+		memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+		       CE_COUNT * sizeof(ce_hdr->entries[0]));
+		sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+			CE_COUNT * sizeof(ce_hdr->entries[0]);
+	}
+
+	/* Gather ram dump */
+	if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+		dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+		dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+		dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+		memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+		       crash_data->ramdump_buf_len);
+		sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+
+	return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+	struct ath10k_dump_file_data *dump;
+
+	if (ath10k_coredump_mask == 0)
+		/* coredump disabled */
+		return 0;
+
+	dump = ath10k_coredump_build(ar);
+	if (!dump) {
+		ath10k_warn(ar, "no crash dump data found for devcoredump");
+		return -ENODATA;
+	}
+
+	dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+	return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+	if (ath10k_coredump_mask == 0)
+		/* coredump disabled */
+		return 0;
+
+	ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+	if (!ar->coredump.fw_crash_data)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+		crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+		crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+		if (!crash_data->ramdump_buf)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+	vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+	if (ar->coredump.fw_crash_data->ramdump_buf) {
+		vfree(ar->coredump.fw_crash_data->ramdump_buf);
+		ar->coredump.fw_crash_data->ramdump_buf = NULL;
+		ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+	}
+
+	vfree(ar->coredump.fw_crash_data);
+	ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
new file mode 100644
index 000000000000..bfee13038e59
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+	ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+	ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+	/* contains multiple struct ath10k_dump_ram_data_hdr */
+	ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+	ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+	/* see ath10k_fw_crash_dump_type above */
+	__le32 type;
+
+	/* in bytes */
+	__le32 tlv_len;
+
+	/* pad to 32-bit boundaries as needed */
+	u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+	/* dump file information */
+
+	/* "ATH10K-FW-DUMP" */
+	char df_magic[16];
+
+	__le32 len;
+
+	/* file dump version */
+	__le32 version;
+
+	/* some info we can get from ath10k struct that might help */
+
+	guid_t guid;
+
+	__le32 chip_id;
+
+	/* 0 for now, in place for later hardware */
+	__le32 bus_type;
+
+	__le32 target_version;
+	__le32 fw_version_major;
+	__le32 fw_version_minor;
+	__le32 fw_version_release;
+	__le32 fw_version_build;
+	__le32 phy_capability;
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+	__le32 ht_cap_info;
+	__le32 vht_cap_info;
+	__le32 num_rf_chains;
+
+	/* firmware version string */
+	char fw_ver[ETHTOOL_FWVERS_LEN];
+
+	/* Kernel related information */
+
+	/* time-of-day stamp */
+	__le64 tv_sec;
+
+	/* time-of-day stamp, nano-seconds */
+	__le64 tv_nsec;
+
+	/* LINUX_VERSION_CODE */
+	__le32 kernel_ver_code;
+
+	/* VERMAGIC_STRING */
+	char kernel_ver[64];
+
+	/* room for growth w/out changing binary format */
+	u8 unused[128];
+
+	/* struct ath10k_tlv_dump_data + more */
+	u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+	/* enum ath10k_mem_region_type */
+	__le32 region_type;
+
+	__le32 start;
+
+	/* length of payload data, not including this header */
+	__le32 length;
+
+	u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED		0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+	ATH10K_MEM_REGION_TYPE_REG	= 1,
+	ATH10K_MEM_REGION_TYPE_DRAM	= 2,
+	ATH10K_MEM_REGION_TYPE_AXI	= 3,
+	ATH10K_MEM_REGION_TYPE_IRAM1	= 4,
+	ATH10K_MEM_REGION_TYPE_IRAM2	= 5,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+	u32 start;
+	u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+	enum ath10k_mem_region_type type;
+	u32 start;
+	u32 len;
+
+	const char *name;
+
+	struct {
+		const struct ath10k_mem_section *sections;
+		u32 size;
+	} section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+	u32 hw_id;
+
+	struct {
+		const struct ath10k_mem_region *regions;
+		int size;
+	} region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+	return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+	return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index df514507d3f1..6d836a26272f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,10 +18,8 @@
 #include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
-#include <linux/utsname.h>
 #include <linux/crc32.h>
 #include <linux/firmware.h>
-#include <linux/devcoredump.h>
 
 #include "core.h"
 #include "debug.h"
@@ -33,86 +31,6 @@
 
 #define ATH10K_DEBUG_CAL_DATA_LEN 12064
 
-#define ATH10K_FW_CRASH_DUMP_VERSION 1
-
-/**
- * enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
- */
-enum ath10k_fw_crash_dump_type {
-	ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
-	ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
-
-	ATH10K_FW_CRASH_DUMP_MAX,
-};
-
-struct ath10k_tlv_dump_data {
-	/* see ath10k_fw_crash_dump_type above */
-	__le32 type;
-
-	/* in bytes */
-	__le32 tlv_len;
-
-	/* pad to 32-bit boundaries as needed */
-	u8 tlv_data[];
-} __packed;
-
-struct ath10k_dump_file_data {
-	/* dump file information */
-
-	/* "ATH10K-FW-DUMP" */
-	char df_magic[16];
-
-	__le32 len;
-
-	/* file dump version */
-	__le32 version;
-
-	/* some info we can get from ath10k struct that might help */
-
-	guid_t guid;
-
-	__le32 chip_id;
-
-	/* 0 for now, in place for later hardware */
-	__le32 bus_type;
-
-	__le32 target_version;
-	__le32 fw_version_major;
-	__le32 fw_version_minor;
-	__le32 fw_version_release;
-	__le32 fw_version_build;
-	__le32 phy_capability;
-	__le32 hw_min_tx_power;
-	__le32 hw_max_tx_power;
-	__le32 ht_cap_info;
-	__le32 vht_cap_info;
-	__le32 num_rf_chains;
-
-	/* firmware version string */
-	char fw_ver[ETHTOOL_FWVERS_LEN];
-
-	/* Kernel related information */
-
-	/* time-of-day stamp */
-	__le64 tv_sec;
-
-	/* time-of-day stamp, nano-seconds */
-	__le64 tv_nsec;
-
-	/* LINUX_VERSION_CODE */
-	__le32 kernel_ver_code;
-
-	/* VERMAGIC_STRING */
-	char kernel_ver[64];
-
-	/* room for growth w/out changing binary format */
-	u8 unused[128];
-
-	/* struct ath10k_tlv_dump_data + more */
-	u8 data[0];
-} __packed;
-
 void ath10k_info(struct ath10k *ar, const char *fmt, ...)
 {
 	struct va_format vaf = {
@@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = {
 	.llseek = default_llseek,
 };
 
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
-	struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-
-	lockdep_assert_held(&ar->data_lock);
-
-	crash_data->crashed_since_read = true;
-	guid_gen(&crash_data->guid);
-	getnstimeofday(&crash_data->timestamp);
-
-	return crash_data;
-}
-EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
-							    bool mark_read)
-{
-	struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-	struct ath10k_ce_crash_hdr *ce_hdr;
-	struct ath10k_dump_file_data *dump_data;
-	struct ath10k_tlv_dump_data *dump_tlv;
-	size_t hdr_len = sizeof(*dump_data);
-	size_t len, sofar = 0;
-	unsigned char *buf;
-
-	len = hdr_len;
-	len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-	len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
-		CE_COUNT * sizeof(ce_hdr->entries[0]);
-
-	sofar += hdr_len;
-
-	/* This is going to get big when we start dumping FW RAM and such,
-	 * so go ahead and use vmalloc.
-	 */
-	buf = vzalloc(len);
-	if (!buf)
-		return NULL;
-
-	spin_lock_bh(&ar->data_lock);
-
-	if (!crash_data->crashed_since_read) {
-		spin_unlock_bh(&ar->data_lock);
-		vfree(buf);
-		return NULL;
-	}
-
-	dump_data = (struct ath10k_dump_file_data *)(buf);
-	strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
-		sizeof(dump_data->df_magic));
-	dump_data->len = cpu_to_le32(len);
-
-	dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
-
-	guid_copy(&dump_data->guid, &crash_data->guid);
-	dump_data->chip_id = cpu_to_le32(ar->chip_id);
-	dump_data->bus_type = cpu_to_le32(0);
-	dump_data->target_version = cpu_to_le32(ar->target_version);
-	dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
-	dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
-	dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
-	dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
-	dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
-	dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
-	dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
-	dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
-	dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
-	dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
-
-	strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
-		sizeof(dump_data->fw_ver));
-
-	dump_data->kernel_ver_code = 0;
-	strlcpy(dump_data->kernel_ver, init_utsname()->release,
-		sizeof(dump_data->kernel_ver));
-
-	dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
-	dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
-
-	/* Gather crash-dump */
-	dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
-	dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
-	dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
-	memcpy(dump_tlv->tlv_data, &crash_data->registers,
-	       sizeof(crash_data->registers));
-	sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
-	dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
-	dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
-	dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
-					CE_COUNT * sizeof(ce_hdr->entries[0]));
-	ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
-	ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
-	memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
-	memcpy(ce_hdr->entries, crash_data->ce_crash_data,
-	       CE_COUNT * sizeof(ce_hdr->entries[0]));
-	sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
-		 CE_COUNT * sizeof(ce_hdr->entries[0]);
-
-	ar->debug.fw_crash_data->crashed_since_read = !mark_read;
-
-	spin_unlock_bh(&ar->data_lock);
-
-	return dump_data;
-}
-
-int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
-	struct ath10k_dump_file_data *dump;
-	void *dump_ptr;
-	u32 dump_len;
-
-	/* To keep the dump file available also for debugfs don't mark the
-	 * file read, only debugfs should do that.
-	 */
-	dump = ath10k_build_dump_file(ar, false);
-	if (!dump) {
-		ath10k_warn(ar, "no crash dump data found for devcoredump");
-		return -ENODATA;
-	}
-
-	/* Make a copy of the dump file for dev_coredumpv() as during the
-	 * transition period we need to own the original file. Once
-	 * fw_crash_dump debugfs file is removed no need to have a copy
-	 * anymore.
-	 */
-	dump_len = le32_to_cpu(dump->len);
-	dump_ptr = vzalloc(dump_len);
-
-	if (!dump_ptr)
-		return -ENOMEM;
-
-	memcpy(dump_ptr, dump, dump_len);
-
-	dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
-
-	return 0;
-}
-
-static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
-{
-	struct ath10k *ar = inode->i_private;
-	struct ath10k_dump_file_data *dump;
-
-	ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
-
-	dump = ath10k_build_dump_file(ar, true);
-	if (!dump)
-		return -ENODATA;
-
-	file->private_data = dump;
-
-	return 0;
-}
-
-static ssize_t ath10k_fw_crash_dump_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct ath10k_dump_file_data *dump_file = file->private_data;
-
-	return simple_read_from_buffer(user_buf, count, ppos,
-				       dump_file,
-				       le32_to_cpu(dump_file->len));
-}
-
-static int ath10k_fw_crash_dump_release(struct inode *inode,
-					struct file *file)
-{
-	vfree(file->private_data);
-
-	return 0;
-}
-
-static const struct file_operations fops_fw_crash_dump = {
-	.open = ath10k_fw_crash_dump_open,
-	.read = ath10k_fw_crash_dump_read,
-	.release = ath10k_fw_crash_dump_release,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
 static ssize_t ath10k_reg_addr_read(struct file *file,
 				    char __user *user_buf,
 				    size_t count, loff_t *ppos)
@@ -1950,14 +1685,14 @@ int ath10k_debug_start(struct ath10k *ar)
 				    ret);
 	}
 
-	if (ar->debug.pktlog_filter) {
+	if (ar->pktlog_filter) {
 		ret = ath10k_wmi_pdev_pktlog_enable(ar,
-						    ar->debug.pktlog_filter);
+						    ar->pktlog_filter);
 		if (ret)
 			/* not serious */
 			ath10k_warn(ar,
 				    "failed to enable pktlog filter %x: %d\n",
-				    ar->debug.pktlog_filter, ret);
+				    ar->pktlog_filter, ret);
 	} else {
 		ret = ath10k_wmi_pdev_pktlog_disable(ar);
 		if (ret)
@@ -2097,12 +1832,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
 	mutex_lock(&ar->conf_mutex);
 
 	if (ar->state != ATH10K_STATE_ON) {
-		ar->debug.pktlog_filter = filter;
+		ar->pktlog_filter = filter;
 		ret = count;
 		goto out;
 	}
 
-	if (filter == ar->debug.pktlog_filter) {
+	if (filter == ar->pktlog_filter) {
 		ret = count;
 		goto out;
 	}
@@ -2111,7 +1846,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
 		ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
 		if (ret) {
 			ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
-				    ar->debug.pktlog_filter, ret);
+				    ar->pktlog_filter, ret);
 			goto out;
 		}
 	} else {
@@ -2122,7 +1857,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
 		}
 	}
 
-	ar->debug.pktlog_filter = filter;
+	ar->pktlog_filter = filter;
 	ret = count;
 
 out:
@@ -2139,7 +1874,7 @@ static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
 
 	mutex_lock(&ar->conf_mutex);
 	len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
-			ar->debug.pktlog_filter);
+			ar->pktlog_filter);
 	mutex_unlock(&ar->conf_mutex);
 
 	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
@@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = {
 
 int ath10k_debug_create(struct ath10k *ar)
 {
-	ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
-	if (!ar->debug.fw_crash_data)
-		return -ENOMEM;
-
 	ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
 	if (!ar->debug.cal_data)
 		return -ENOMEM;
@@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar)
 
 void ath10k_debug_destroy(struct ath10k *ar)
 {
-	vfree(ar->debug.fw_crash_data);
-	ar->debug.fw_crash_data = NULL;
-
 	vfree(ar->debug.cal_data);
 	ar->debug.cal_data = NULL;
 
@@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar)
 	debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
 			    &fops_simulate_fw_crash);
 
-	debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
-			    &fops_fw_crash_dump);
-
 	debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
 			    &fops_reg_addr);
 
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 548ad5483a4a..e54308889e59 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -42,6 +42,7 @@ enum ath10k_debug_mask {
 	ATH10K_DBG_SDIO_DUMP	= 0x00020000,
 	ATH10K_DBG_USB		= 0x00040000,
 	ATH10K_DBG_USB_BULK	= 0x00080000,
+	ATH10K_DBG_SNOC		= 0x00100000,
 	ATH10K_DBG_ANY		= 0xffffffff,
 };
 
@@ -51,7 +52,8 @@ enum ath10k_pktlog_filter {
 	ATH10K_PKTLOG_RCFIND     = 0x000000004,
 	ATH10K_PKTLOG_RCUPDATE   = 0x000000008,
 	ATH10K_PKTLOG_DBG_PRINT  = 0x000000010,
-	ATH10K_PKTLOG_ANY        = 0x00000001f,
+	ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+	ATH10K_PKTLOG_ANY        = 0x00000005f,
 };
 
 enum ath10k_dbg_aggr_mode {
@@ -60,6 +62,21 @@ enum ath10k_dbg_aggr_mode {
 	ATH10K_DBG_AGGR_MODE_MAX,
 };
 
+/* Types of packet log events */
+enum ath_pktlog_type {
+	ATH_PKTLOG_TYPE_TX_CTRL = 1,
+	ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+	__le16 flags;
+	__le16 missed_cnt;
+	__le16 log_type; /* Type of log information foll this header */
+	__le16 size; /* Size of variable length log information in bytes */
+	__le32 timestamp;
+	u8 payload[0];
+} __packed;
+
 /* FIXME: How to calculate the buffer size sanely? */
 #define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
 
@@ -84,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_debug_tpc_stats_process(struct ath10k *ar,
 				    struct ath10k_tpc_stats *tpc_stats);
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
-
 void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
 
-int ath10k_debug_fw_devcoredump(struct ath10k *ar);
-
 #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
 
 void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -157,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
 {
 }
 
-static inline struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
-	return NULL;
-}
-
 static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
 {
 	return 0;
@@ -173,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
 	return 0;
 }
 
-static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
-	return 0;
-}
-
 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
 
 #define ath10k_debug_get_et_strings NULL
@@ -190,9 +191,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, struct dentry *dir);
 void ath10k_sta_update_rx_duration(struct ath10k *ar,
 				   struct ath10k_fw_stats *stats);
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			   struct ieee80211_sta *sta,
-			   struct station_info *sinfo);
 #else
 static inline
 void ath10k_sta_update_rx_duration(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index d59ac6b83340..b260b09dd4d3 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -65,33 +65,6 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar,
 		ath10k_sta_update_stats_rx_duration(ar, stats);
 }
 
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			   struct ieee80211_sta *sta,
-			   struct station_info *sinfo)
-{
-	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
-	struct ath10k *ar = arsta->arvif->ar;
-
-	if (!ath10k_peer_stats_enabled(ar))
-		return;
-
-	sinfo->rx_duration = arsta->rx_duration;
-	sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
-
-	if (!arsta->txrate.legacy && !arsta->txrate.nss)
-		return;
-
-	if (arsta->txrate.legacy) {
-		sinfo->txrate.legacy = arsta->txrate.legacy;
-	} else {
-		sinfo->txrate.mcs = arsta->txrate.mcs;
-		sinfo->txrate.nss = arsta->txrate.nss;
-		sinfo->txrate.bw = arsta->txrate.bw;
-	}
-	sinfo->txrate.flags = arsta->txrate.flags;
-	sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
-}
-
 static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
 					     char __user *user_buf,
 					     size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 6679dd9cfd12..6da4e3369c5a 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index e5c80f582ff5..492dc5b4bbf2 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 24663b07eeac..a2f8814b3e53 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index cd160b16db1e..625198dea18b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar)
 		WARN_ON(1);
 		return -EINVAL;
 	}
+	ath10k_htt_set_tx_ops(htt);
+	ath10k_htt_set_rx_ops(htt);
+
 	return 0;
 }
 
@@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
 		return status;
 	}
 
-	status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+	status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
 	if (status)
 		return status;
 
-	status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+	status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
 	if (status) {
 		ath10k_warn(ar, "failed to setup rx ring: %d\n",
 			    status);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 6305308422c4..8cc2a8b278e4 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -107,6 +107,14 @@ struct htt_msdu_ext_desc {
 	struct htt_data_tx_desc_frag frags[6];
 };
 
+struct htt_msdu_ext_desc_64 {
+	__le32 tso_flag[5];
+	__le16 ip_identification;
+	u8 flags;
+	u8 reserved;
+	struct htt_data_tx_desc_frag frags[6];
+};
+
 #define	HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE		BIT(0)
 #define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE	BIT(1)
 #define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE	BIT(2)
@@ -179,6 +187,22 @@ struct htt_data_tx_desc {
 	u8 prefetch[0]; /* start of frame, for FW classification engine */
 } __packed;
 
+struct htt_data_tx_desc_64 {
+	u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+	__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+	__le16 len;
+	__le16 id;
+	__le64 frags_paddr;
+	union {
+		__le32 peerid;
+		struct {
+			__le16 peerid;
+			__le16 freq;
+		} __packed offchan_tx;
+	} __packed;
+	u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
 enum htt_rx_ring_flags {
 	HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
 	HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
@@ -200,8 +224,11 @@ enum htt_rx_ring_flags {
 
 #define HTT_RX_RING_SIZE_MIN 128
 #define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
 
-struct htt_rx_ring_setup_ring {
+struct htt_rx_ring_setup_ring32 {
 	__le32 fw_idx_shadow_reg_paddr;
 	__le32 rx_ring_base_paddr;
 	__le16 rx_ring_len; /* in 4-byte words */
@@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring {
 	__le16 frag_info_offset;
 } __packed;
 
+struct htt_rx_ring_setup_ring64 {
+	__le64 fw_idx_shadow_reg_paddr;
+	__le64 rx_ring_base_paddr;
+	__le16 rx_ring_len; /* in 4-byte words */
+	__le16 rx_ring_bufsize; /* rx skb size - in bytes */
+	__le16 flags; /* %HTT_RX_RING_FLAGS_ */
+	__le16 fw_idx_init_val;
+
+	/* the following offsets are in 4-byte units */
+	__le16 mac80211_hdr_offset;
+	__le16 msdu_payload_offset;
+	__le16 ppdu_start_offset;
+	__le16 ppdu_end_offset;
+	__le16 mpdu_start_offset;
+	__le16 mpdu_end_offset;
+	__le16 msdu_start_offset;
+	__le16 msdu_end_offset;
+	__le16 rx_attention_offset;
+	__le16 frag_info_offset;
+} __packed;
+
 struct htt_rx_ring_setup_hdr {
 	u8 num_rings; /* supported values: 1, 2 */
 	__le16 rsvd0;
 } __packed;
 
-struct htt_rx_ring_setup {
+struct htt_rx_ring_setup_32 {
 	struct htt_rx_ring_setup_hdr hdr;
-	struct htt_rx_ring_setup_ring rings[0];
+	struct htt_rx_ring_setup_ring32 rings[0];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
+	struct htt_rx_ring_setup_hdr hdr;
+	struct htt_rx_ring_setup_ring64 rings[0];
 } __packed;
 
 /*
@@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc {
 	u8 reserved;
 } __packed;
 
+struct htt_rx_in_ord_msdu_desc_ext {
+	__le64 msdu_paddr;
+	__le16 msdu_len;
+	u8 fw_desc;
+	u8 reserved;
+} __packed;
+
 struct htt_rx_in_ord_ind {
 	u8 info;
 	__le16 peer_id;
 	u8 vdev_id;
 	u8 reserved;
 	__le16 msdu_count;
-	struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+	union {
+		struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
+		struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+	} __packed;
 } __packed;
 
 #define HTT_RX_IN_ORD_IND_INFO_TID_MASK		0x0000001f
@@ -1351,7 +1414,7 @@ struct htt_q_state_conf {
 	u8 pad[2];
 } __packed;
 
-struct htt_frag_desc_bank_cfg {
+struct htt_frag_desc_bank_cfg32 {
 	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
 	u8 num_banks;
 	u8 desc_size;
@@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg {
 	struct htt_q_state_conf q_state;
 } __packed;
 
+struct htt_frag_desc_bank_cfg64 {
+	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+	u8 num_banks;
+	u8 desc_size;
+	__le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+	struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+	struct htt_q_state_conf q_state;
+} __packed;
+
 #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT	128
 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK	0x3f
 #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB		0
@@ -1497,6 +1569,23 @@ struct htt_peer_tx_stats {
 	u8 payload[0];
 } __packed;
 
+#define ATH10K_10_2_TX_STATS_OFFSET	136
+#define PEER_STATS_FOR_NO_OF_PPDUS	4
+
+struct ath10k_10_2_peer_tx_stats {
+	u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le32 tx_duration;
+	u8 tx_ppdu_cnt;
+	u8 peer_id;
+} __packed;
+
 union htt_rx_pn_t {
 	/* WEP: 24-bit PN */
 	u32 pn24;
@@ -1514,11 +1603,13 @@ struct htt_cmd {
 		struct htt_ver_req ver_req;
 		struct htt_mgmt_tx_desc mgmt_tx;
 		struct htt_data_tx_desc data_tx;
-		struct htt_rx_ring_setup rx_setup;
+		struct htt_rx_ring_setup_32 rx_setup_32;
+		struct htt_rx_ring_setup_64 rx_setup_64;
 		struct htt_stats_req stats_req;
 		struct htt_oob_sync_req oob_sync_req;
 		struct htt_aggr_conf aggr_conf;
-		struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+		struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+		struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
 		struct htt_tx_fetch_resp tx_fetch_resp;
 	};
 } __packed;
@@ -1576,13 +1667,20 @@ struct htt_peer_unmap_event {
 	u16 peer_id;
 };
 
-struct ath10k_htt_txbuf {
+struct ath10k_htt_txbuf_32 {
 	struct htt_data_tx_desc_frag frags[2];
 	struct ath10k_htc_hdr htc_hdr;
 	struct htt_cmd_hdr cmd_hdr;
 	struct htt_data_tx_desc cmd_tx;
 } __packed;
 
+struct ath10k_htt_txbuf_64 {
+	struct htt_data_tx_desc_frag frags[2];
+	struct ath10k_htc_hdr htc_hdr;
+	struct htt_cmd_hdr cmd_hdr;
+	struct htt_data_tx_desc_64 cmd_tx;
+} __packed;
+
 struct ath10k_htt {
 	struct ath10k *ar;
 	enum ath10k_htc_ep_id eid;
@@ -1627,7 +1725,10 @@ struct ath10k_htt {
 		 * rx buffers the host SW provides for the MAC HW to
 		 * fill.
 		 */
-		__le32 *paddrs_ring;
+		union {
+			__le64 *paddrs_ring_64;
+			__le32 *paddrs_ring_32;
+		};
 
 		/*
 		 * Base address of ring, as a "physical" device address
@@ -1695,7 +1796,7 @@ struct ath10k_htt {
 	/* This is used to group tx/rx completions separately and process them
 	 * in batches to reduce cache stalls
 	 */
-	struct sk_buff_head rx_compl_q;
+	struct sk_buff_head rx_msdus_q;
 	struct sk_buff_head rx_in_ord_compl_q;
 	struct sk_buff_head tx_fetch_ind_q;
 
@@ -1704,12 +1805,20 @@ struct ath10k_htt {
 
 	struct {
 		dma_addr_t paddr;
-		struct htt_msdu_ext_desc *vaddr;
+		union {
+			struct htt_msdu_ext_desc *vaddr_desc_32;
+			struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+		};
+		size_t size;
 	} frag_desc;
 
 	struct {
 		dma_addr_t paddr;
-		struct ath10k_htt_txbuf *vaddr;
+		union {
+			struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+			struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+		};
+		size_t size;
 	} txbuf;
 
 	struct {
@@ -1724,6 +1833,28 @@ struct ath10k_htt {
 	} tx_q_state;
 
 	bool tx_mem_allocated;
+	const struct ath10k_htt_tx_ops *tx_ops;
+	const struct ath10k_htt_rx_ops *rx_ops;
+};
+
+struct ath10k_htt_tx_ops {
+	int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+	int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+	int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+	void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+	int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+		      struct sk_buff *msdu);
+	int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+	void (*htt_free_txbuff)(struct ath10k_htt *htt);
+};
+
+struct ath10k_htt_rx_ops {
+	size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+	void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+	void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+				    int idx);
+	void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+	void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1803,8 +1934,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
 				u8 max_subfrms_ampdu,
 				u8 max_subfrms_amsdu);
@@ -1829,11 +1958,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
-int ath10k_htt_tx(struct ath10k_htt *htt,
-		  enum ath10k_hw_txrx_mode txmode,
-		  struct sk_buff *msdu);
 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
 					     struct sk_buff *skb);
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
-
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 7d295ee71534..6d96f9560950 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -25,9 +25,6 @@
 
 #include <linux/log2.h>
 
-#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
-#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
-
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
@@ -36,7 +33,7 @@
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
 
 static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
 {
 	struct ath10k_skb_rxcb *rxcb;
 
@@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
 	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
 }
 
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+					     void *vaddr)
+{
+	htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+					     void *vaddr)
+{
+	htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+					  dma_addr_t paddr, int idx)
+{
+	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+					  dma_addr_t paddr, int idx)
+{
+	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+	htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+	htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+	return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+	return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 {
 	struct htt_rx_desc *rx_desc;
@@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 		rxcb = ATH10K_SKB_RXCB(skb);
 		rxcb->paddr = paddr;
 		htt->rx_ring.netbufs_ring[idx] = skb;
-		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+		htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
 		htt->rx_ring.fill_cnt++;
 
 		if (htt->rx_ring.in_ord_rx) {
 			hash_add(htt->rx_ring.skb_table,
 				 &ATH10K_SKB_RXCB(skb)->hlist,
-				 (u32)paddr);
+				 paddr);
 		}
 
 		num--;
@@ -227,16 +278,15 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
 
-	skb_queue_purge(&htt->rx_compl_q);
+	skb_queue_purge(&htt->rx_msdus_q);
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
 	skb_queue_purge(&htt->tx_fetch_ind_q);
 
 	ath10k_htt_rx_ring_free(htt);
 
 	dma_free_coherent(htt->ar->dev,
-			  (htt->rx_ring.size *
-			   sizeof(htt->rx_ring.paddrs_ring)),
-			  htt->rx_ring.paddrs_ring,
+			  htt->rx_ops->htt_get_rx_ring_size(htt),
+			  htt->rx_ops->htt_get_vaddr_ring(htt),
 			  htt->rx_ring.base_paddr);
 
 	dma_free_coherent(htt->ar->dev,
@@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 	msdu = htt->rx_ring.netbufs_ring[idx];
 	htt->rx_ring.netbufs_ring[idx] = NULL;
-	htt->rx_ring.paddrs_ring[idx] = 0;
+	htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
 
 	idx++;
 	idx &= htt->rx_ring.size_mask;
@@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 }
 
 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
-					       u32 paddr)
+					       u64 paddr)
 {
 	struct ath10k *ar = htt->ar;
 	struct ath10k_skb_rxcb *rxcb;
@@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
 	return msdu;
 }
 
-static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
-					struct htt_rx_in_ord_ind *ev,
-					struct sk_buff_head *list)
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+					  struct htt_rx_in_ord_ind *ev,
+					  struct sk_buff_head *list)
 {
 	struct ath10k *ar = htt->ar;
-	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
 	struct htt_rx_desc *rxd;
 	struct sk_buff *msdu;
 	int msdu_count;
@@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
 	return 0;
 }
 
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+					  struct htt_rx_in_ord_ind *ev,
+					  struct sk_buff_head *list)
+{
+	struct ath10k *ar = htt->ar;
+	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+	struct htt_rx_desc *rxd;
+	struct sk_buff *msdu;
+	int msdu_count;
+	bool is_offload;
+	u64 paddr;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	msdu_count = __le16_to_cpu(ev->msdu_count);
+	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+	while (msdu_count--) {
+		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+		if (!msdu) {
+			__skb_queue_purge(list);
+			return -ENOENT;
+		}
+
+		__skb_queue_tail(list, msdu);
+
+		if (!is_offload) {
+			rxd = (void *)msdu->data;
+
+			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+			skb_put(msdu, sizeof(*rxd));
+			skb_pull(msdu, sizeof(*rxd));
+			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+			if (!(__le32_to_cpu(rxd->attention.flags) &
+			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
+				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+				return -EIO;
+			}
+		}
+
+		msdu_desc++;
+	}
+
+	return 0;
+}
+
 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	dma_addr_t paddr;
-	void *vaddr;
+	void *vaddr, *vaddr_ring;
 	size_t size;
 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 
@@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 	 */
 	htt->rx_ring.size = HTT_RX_RING_SIZE;
 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
-	htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
 
 	if (!is_power_of_2(htt->rx_ring.size)) {
 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 	if (!htt->rx_ring.netbufs_ring)
 		goto err_netbuf;
 
-	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+	size = htt->rx_ops->htt_get_rx_ring_size(htt);
 
-	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
-	if (!vaddr)
+	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+	if (!vaddr_ring)
 		goto err_dma_ring;
 
-	htt->rx_ring.paddrs_ring = vaddr;
+	htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
 	htt->rx_ring.base_paddr = paddr;
 
 	vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -515,7 +614,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 	hash_init(htt->rx_ring.skb_table);
 
-	skb_queue_head_init(&htt->rx_compl_q);
+	skb_queue_head_init(&htt->rx_msdus_q);
 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
 	skb_queue_head_init(&htt->tx_fetch_ind_q);
 	atomic_set(&htt->num_mpdus_ready, 0);
@@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 
 err_dma_idx:
 	dma_free_coherent(htt->ar->dev,
-			  (htt->rx_ring.size *
-			   sizeof(htt->rx_ring.paddrs_ring)),
-			  htt->rx_ring.paddrs_ring,
+			  htt->rx_ops->htt_get_rx_ring_size(htt),
+			  vaddr_ring,
 			  htt->rx_ring.base_paddr);
 err_dma_ring:
 	kfree(htt->rx_ring.netbufs_ring);
@@ -974,16 +1072,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
 	return out;
 }
 
-static void ath10k_process_rx(struct ath10k *ar,
-			      struct ieee80211_rx_status *rx_status,
-			      struct sk_buff *skb)
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+				       struct ieee80211_rx_status *rx_status,
+				       struct sk_buff *skb)
+{
+	struct ieee80211_rx_status *status;
+
+	status = IEEE80211_SKB_RXCB(skb);
+	*status = *rx_status;
+
+	__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ieee80211_rx_status *status;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	char tid[32];
 
 	status = IEEE80211_SKB_RXCB(skb);
-	*status = *rx_status;
 
 	ath10k_dbg(ar, ATH10K_DBG_DATA,
 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
@@ -1517,7 +1624,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
 	}
 }
 
-static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
 				    struct sk_buff_head *amsdu,
 				    struct ieee80211_rx_status *status)
 {
@@ -1540,7 +1647,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
 			status->flag |= RX_FLAG_ALLOW_SAME_PN;
 		}
 
-		ath10k_process_rx(ar, status, msdu);
+		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
 	}
 }
 
@@ -1652,7 +1759,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 	struct ath10k *ar = htt->ar;
 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
 	struct sk_buff_head amsdu;
-	int ret, num_msdus;
+	int ret;
 
 	__skb_queue_head_init(&amsdu);
 
@@ -1674,7 +1781,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 		return ret;
 	}
 
-	num_msdus = skb_queue_len(&amsdu);
 	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 
 	/* only for ret = 1 indicates chained msdus */
@@ -1683,9 +1789,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 
 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
-	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
 
-	return num_msdus;
+	return 0;
 }
 
 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1893,15 +1999,14 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
 			RX_FLAG_MMIC_STRIPPED;
 }
 
-static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
-				      struct sk_buff_head *list)
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+				       struct sk_buff_head *list)
 {
 	struct ath10k_htt *htt = &ar->htt;
 	struct ieee80211_rx_status *status = &htt->rx_status;
 	struct htt_rx_offload_msdu *rx;
 	struct sk_buff *msdu;
 	size_t offset;
-	int num_msdu = 0;
 
 	while ((msdu = __skb_dequeue(list))) {
 		/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1940,10 +2045,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
 
 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
 		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
-		ath10k_process_rx(ar, status, msdu);
-		num_msdu++;
+		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
 	}
-	return num_msdu;
 }
 
 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
@@ -1959,7 +2062,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 	u8 tid;
 	bool offload;
 	bool frag;
-	int ret, num_msdus = 0;
+	int ret;
 
 	lockdep_assert_held(&htt->rx_ring.lock);
 
@@ -1981,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
 		   vdev_id, peer_id, tid, offload, frag, msdu_count);
 
-	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
 		ath10k_warn(ar, "dropping invalid in order rx indication\n");
 		return -EINVAL;
 	}
@@ -1990,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 	 * extracted and processed.
 	 */
 	__skb_queue_head_init(&list);
-	ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+	if (ar->hw_params.target_64bit)
+		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+						     &list);
+	else
+		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+						     &list);
+
 	if (ret < 0) {
 		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
 		htt->rx_confused = true;
@@ -2001,7 +2110,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 	 * separately.
 	 */
 	if (offload)
-		num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
+		ath10k_htt_rx_h_rx_offload(ar, &list);
 
 	while (!skb_queue_empty(&list)) {
 		__skb_queue_head_init(&amsdu);
@@ -2014,11 +2123,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 			 * better to report something than nothing though. This
 			 * should still give an idea about rx rate to the user.
 			 */
-			num_msdus += skb_queue_len(&amsdu);
 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
-			ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
 			break;
 		case -EAGAIN:
 			/* fall through */
@@ -2030,7 +2138,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 			return -EIO;
 		}
 	}
-	return num_msdus;
+	return ret;
 }
 
 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2449,6 +2557,62 @@ out:
 	rcu_read_unlock();
 }
 
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+	struct ath10k_10_2_peer_tx_stats *tx_stats;
+	struct ieee80211_sta *sta;
+	struct ath10k_peer *peer;
+	u16 log_type = __le16_to_cpu(hdr->log_type);
+	u32 peer_id = 0, i;
+
+	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+		return;
+
+	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+		    ATH10K_10_2_TX_STATS_OFFSET);
+
+	if (!tx_stats->tx_ppdu_cnt)
+		return;
+
+	peer_id = tx_stats->peer_id;
+
+	rcu_read_lock();
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find_by_id(ar, peer_id);
+	if (!peer) {
+		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+			    peer_id);
+		goto out;
+	}
+
+	sta = peer->sta;
+	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+		p_tx_stats->succ_bytes =
+			__le16_to_cpu(tx_stats->success_bytes[i]);
+		p_tx_stats->retry_bytes =
+			__le16_to_cpu(tx_stats->retry_bytes[i]);
+		p_tx_stats->failed_bytes =
+			__le16_to_cpu(tx_stats->failed_bytes[i]);
+		p_tx_stats->ratecode = tx_stats->ratecode[i];
+		p_tx_stats->flags = tx_stats->flags[i];
+		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+	}
+	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
+
+	return;
+
+out:
+	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
+}
+
 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -2566,6 +2730,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 					skb->len -
 					offsetof(struct htt_resp,
 						 pktlog_msg.payload));
+
+		if (ath10k_peer_stats_enabled(ar))
+			ath10k_fetch_10_2_tx_stats(ar,
+						   resp->pktlog_msg.payload);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2631,6 +2799,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
 }
 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
 
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+	struct sk_buff *skb;
+
+	while (quota < budget) {
+		if (skb_queue_empty(&ar->htt.rx_msdus_q))
+			break;
+
+		skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+		if (!skb)
+			break;
+		ath10k_process_rx(ar, skb);
+		quota++;
+	}
+
+	return quota;
+}
+
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -2638,63 +2824,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 	struct sk_buff_head tx_ind_q;
 	struct sk_buff *skb;
 	unsigned long flags;
-	int quota = 0, done, num_rx_msdus;
+	int quota = 0, done, ret;
 	bool resched_napi = false;
 
 	__skb_queue_head_init(&tx_ind_q);
 
-	/* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
-	 * process it first to utilize full available quota.
+	/* Process pending frames before dequeuing more data
+	 * from hardware.
 	 */
-	while (quota < budget) {
-		if (skb_queue_empty(&htt->rx_in_ord_compl_q))
-			break;
-
-		skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
-		if (!skb) {
-			resched_napi = true;
-			goto exit;
-		}
+	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+	if (quota == budget) {
+		resched_napi = true;
+		goto exit;
+	}
 
+	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
 		spin_lock_bh(&htt->rx_ring.lock);
-		num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
 		spin_unlock_bh(&htt->rx_ring.lock);
-		if (num_rx_msdus < 0) {
-			resched_napi = true;
-			goto exit;
-		}
 
 		dev_kfree_skb_any(skb);
-		if (num_rx_msdus > 0)
-			quota += num_rx_msdus;
-
-		if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
-		    !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+		if (ret == -EIO) {
 			resched_napi = true;
 			goto exit;
 		}
 	}
 
-	while (quota < budget) {
-		/* no more data to receive */
-		if (!atomic_read(&htt->num_mpdus_ready))
-			break;
-
-		num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
-		if (num_rx_msdus < 0) {
+	while (atomic_read(&htt->num_mpdus_ready)) {
+		ret = ath10k_htt_rx_handle_amsdu(htt);
+		if (ret == -EIO) {
 			resched_napi = true;
 			goto exit;
 		}
-
-		quota += num_rx_msdus;
 		atomic_dec(&htt->num_mpdus_ready);
-		if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
-		    atomic_read(&htt->num_mpdus_ready)) {
-			resched_napi = true;
-			goto exit;
-		}
 	}
 
+	/* Deliver received data after processing data from hardware */
+	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
 	/* From NAPI documentation:
 	 *  The napi poll() function may also process TX completions, in which
 	 *  case if it processes the entire TX ring then it should count that
@@ -2732,3 +2899,29 @@ exit:
 	return done;
 }
 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+
+	if (ar->hw_params.target_64bit)
+		htt->rx_ops = &htt_rx_ops_64;
+	else
+		htt->rx_ops = &htt_rx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 685faac1368f..d334b7be1fea 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 	idr_remove(&htt->pending_tx, msdu_id);
 }
 
-static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	if (!htt->txbuf.vaddr)
+	if (!htt->txbuf.vaddr_txbuff_32)
 		return;
 
-	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
-	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
-	htt->txbuf.vaddr = NULL;
+	size = htt->txbuf.size;
+	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+			  htt->txbuf.paddr);
+	htt->txbuf.vaddr_txbuff_32 = NULL;
 }
 
-static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
-	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
-					      GFP_KERNEL);
-	if (!htt->txbuf.vaddr)
+	size = htt->max_num_pending_tx *
+			sizeof(struct ath10k_htt_txbuf_32);
+
+	htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+							&htt->txbuf.paddr,
+							GFP_KERNEL);
+	if (!htt->txbuf.vaddr_txbuff_32)
 		return -ENOMEM;
 
+	htt->txbuf.size = size;
+
 	return 0;
 }
 
-static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 {
+	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	if (!htt->frag_desc.vaddr)
+	if (!htt->txbuf.vaddr_txbuff_64)
 		return;
 
-	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+	size = htt->txbuf.size;
+	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+			  htt->txbuf.paddr);
+	htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct ath10k_htt_txbuf_64);
+
+	htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+							&htt->txbuf.paddr,
+							GFP_KERNEL);
+	if (!htt->txbuf.vaddr_txbuff_64)
+		return -ENOMEM;
+
+	htt->txbuf.size = size;
+
+	return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+	size_t size;
+
+	if (!htt->frag_desc.vaddr_desc_32)
+		return;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc);
 
 	dma_free_coherent(htt->ar->dev,
 			  size,
-			  htt->frag_desc.vaddr,
+			  htt->frag_desc.vaddr_desc_32,
 			  htt->frag_desc.paddr);
-	htt->frag_desc.vaddr = NULL;
+
+	htt->frag_desc.vaddr_desc_32 = NULL;
 }
 
-static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	size_t size;
@@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
 	if (!ar->hw_params.continuous_frag_desc)
 		return 0;
 
-	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
-	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
-						  &htt->frag_desc.paddr,
-						  GFP_KERNEL);
-	if (!htt->frag_desc.vaddr)
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc);
+	htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+							  &htt->frag_desc.paddr,
+							  GFP_KERNEL);
+	if (!htt->frag_desc.vaddr_desc_32) {
+		ath10k_err(ar, "failed to alloc fragment desc memory\n");
 		return -ENOMEM;
+	}
+	htt->frag_desc.size = size;
+
+	return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+	size_t size;
+
+	if (!htt->frag_desc.vaddr_desc_64)
+		return;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc_64);
+
+	dma_free_coherent(htt->ar->dev,
+			  size,
+			  htt->frag_desc.vaddr_desc_64,
+			  htt->frag_desc.paddr);
+
+	htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc_64);
+
+	htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+							  &htt->frag_desc.paddr,
+							  GFP_KERNEL);
+	if (!htt->frag_desc.vaddr_desc_64) {
+		ath10k_err(ar, "failed to alloc fragment desc memory\n");
+		return -ENOMEM;
+	}
+	htt->frag_desc.size = size;
 
 	return 0;
 }
@@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
 	struct ath10k *ar = htt->ar;
 	int ret;
 
-	ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
+	ret = htt->tx_ops->htt_alloc_txbuff(htt);
 	if (ret) {
 		ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
 		return ret;
 	}
 
-	ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+	ret = htt->tx_ops->htt_alloc_frag_desc(htt);
 	if (ret) {
 		ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 		goto free_txbuf;
@@ -387,10 +473,10 @@ free_txq:
 	ath10k_htt_tx_free_txq(htt);
 
 free_frag_desc:
-	ath10k_htt_tx_free_cont_frag_desc(htt);
+	htt->tx_ops->htt_free_frag_desc(htt);
 
 free_txbuf:
-	ath10k_htt_tx_free_cont_txbuf(htt);
+	htt->tx_ops->htt_free_txbuff(htt);
 
 	return ret;
 }
@@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
 	if (!htt->tx_mem_allocated)
 		return;
 
-	ath10k_htt_tx_free_cont_txbuf(htt);
+	htt->tx_ops->htt_free_txbuff(htt);
 	ath10k_htt_tx_free_txq(htt);
-	ath10k_htt_tx_free_cont_frag_desc(htt);
+	htt->tx_ops->htt_free_frag_desc(htt);
 	ath10k_htt_tx_free_txdone_fifo(htt);
 	htt->tx_mem_allocated = false;
 }
@@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
 	return 0;
 }
 
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	struct sk_buff *skb;
 	struct htt_cmd *cmd;
-	struct htt_frag_desc_bank_cfg *cfg;
+	struct htt_frag_desc_bank_cfg32 *cfg;
 	int ret, size;
 	u8 info;
 
@@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 		return -EINVAL;
 	}
 
-	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
 	skb = ath10k_htc_alloc_skb(ar, size);
 	if (!skb)
 		return -ENOMEM;
@@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 		     ar->running_fw->fw_file.fw_features))
 		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 
-	cfg = &cmd->frag_desc_bank_cfg;
+	cfg = &cmd->frag_desc_bank_cfg32;
 	cfg->info = info;
 	cfg->num_banks = 1;
 	cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
@@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 	return 0;
 }
 
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	struct sk_buff *skb;
 	struct htt_cmd *cmd;
-	struct htt_rx_ring_setup_ring *ring;
+	struct htt_frag_desc_bank_cfg64 *cfg;
+	int ret, size;
+	u8 info;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	if (!htt->frag_desc.paddr) {
+		ath10k_warn(ar, "invalid frag desc memory\n");
+		return -EINVAL;
+	}
+
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+	skb = ath10k_htc_alloc_skb(ar, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+	info = 0;
+	info |= SM(htt->tx_q_state.type,
+		   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		     ar->running_fw->fw_file.fw_features))
+		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+	cfg = &cmd->frag_desc_bank_cfg64;
+	cfg->info = info;
+	cfg->num_banks = 1;
+	cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+	cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
+	cfg->bank_id[0].bank_min_id = 0;
+	cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+						    1);
+
+	cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+	cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+	cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+	cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+	cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+{
+	struct htt_rx_ring_setup_ring32 *ring =
+			(struct htt_rx_ring_setup_ring32 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+{
+	struct htt_rx_ring_setup_ring64 *ring =
+			(struct htt_rx_ring_setup_ring64 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	struct htt_rx_ring_setup_ring32 *ring;
 	const int num_rx_ring = 1;
 	u16 flags;
 	u32 fw_idx;
@@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 
-	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 	    + (sizeof(*ring) * num_rx_ring);
 	skb = ath10k_htc_alloc_skb(ar, len);
 	if (!skb)
@@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 	skb_put(skb, len);
 
 	cmd = (struct htt_cmd *)skb->data;
-	ring = &cmd->rx_setup.rings[0];
+	ring = &cmd->rx_setup_32.rings[0];
 
 	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
-	cmd->rx_setup.hdr.num_rings = 1;
+	cmd->rx_setup_32.hdr.num_rings = 1;
 
 	/* FIXME: do we need all of this? */
 	flags = 0;
@@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 	ring->flags = __cpu_to_le16(flags);
 	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+	ath10k_htt_fill_rx_desc_offset_32(ring);
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
 
-	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
-	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
-	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
-	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
-	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
-	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
-	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
-	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
-	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
-	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+	return 0;
+}
 
-#undef desc_offset
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	struct htt_rx_ring_setup_ring64 *ring;
+	const int num_rx_ring = 1;
+	u16 flags;
+	u32 fw_idx;
+	int len;
+	int ret;
+
+	/* HW expects the buffer to be an integral number of 4-byte
+	 * "words"
+	 */
+	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+	    + (sizeof(*ring) * num_rx_ring);
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+
+	cmd = (struct htt_cmd *)skb->data;
+	ring = &cmd->rx_setup_64.rings[0];
+
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+	cmd->rx_setup_64.hdr.num_rings = 1;
+
+	flags = 0;
+	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+	flags |= HTT_RX_RING_FLAGS_PPDU_START;
+	flags |= HTT_RX_RING_FLAGS_PPDU_END;
+	flags |= HTT_RX_RING_FLAGS_MPDU_START;
+	flags |= HTT_RX_RING_FLAGS_MPDU_END;
+	flags |= HTT_RX_RING_FLAGS_MSDU_START;
+	flags |= HTT_RX_RING_FLAGS_MSDU_END;
+	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+	flags |= HTT_RX_RING_FLAGS_NULL_RX;
+	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 
+	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+	ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+	ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+	ring->flags = __cpu_to_le16(flags);
+	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+	ath10k_htt_fill_rx_desc_offset_64(ring);
 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 	if (ret) {
 		dev_kfree_skb_any(skb);
@@ -895,8 +1136,9 @@ err:
 	return res;
 }
 
-int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
-		  struct sk_buff *msdu)
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+			    enum ath10k_hw_txrx_mode txmode,
+			    struct sk_buff *msdu)
 {
 	struct ath10k *ar = htt->ar;
 	struct device *dev = ar->dev;
@@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
 	struct ath10k_hif_sg_item sg_items[2];
-	struct ath10k_htt_txbuf *txbuf;
+	struct ath10k_htt_txbuf_32 *txbuf;
 	struct htt_data_tx_desc_frag *frags;
 	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
 	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
@@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 	u32 frags_paddr = 0;
 	u32 txbuf_paddr;
 	struct htt_msdu_ext_desc *ext_desc = NULL;
+	struct htt_msdu_ext_desc *ext_desc_t = NULL;
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
@@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
 
-	txbuf = &htt->txbuf.vaddr[msdu_id];
+	txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
 	txbuf_paddr = htt->txbuf.paddr +
-		      (sizeof(struct ath10k_htt_txbuf) * msdu_id);
+		      (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
 
 	if ((ieee80211_is_action(hdr->frame_control) ||
 	     ieee80211_is_deauth(hdr->frame_control) ||
@@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 		/* pass through */
 	case ATH10K_HW_TXRX_ETHERNET:
 		if (ar->hw_params.continuous_frag_desc) {
-			memset(&htt->frag_desc.vaddr[msdu_id], 0,
+			ext_desc_t = htt->frag_desc.vaddr_desc_32;
+			memset(&ext_desc_t[msdu_id], 0,
 			       sizeof(struct htt_msdu_ext_desc));
 			frags = (struct htt_data_tx_desc_frag *)
-				&htt->frag_desc.vaddr[msdu_id].frags;
-			ext_desc = &htt->frag_desc.vaddr[msdu_id];
+				&ext_desc_t[msdu_id].frags;
+			ext_desc = &ext_desc_t[msdu_id];
 			frags[0].tword_addr.paddr_lo =
 				__cpu_to_le32(skb_cb->paddr);
 			frags[0].tword_addr.paddr_hi = 0;
@@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 
 	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
-		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
-		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
-		   (u32)skb_cb->paddr, vdev_id, tid, freq);
+		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+		   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+		   &skb_cb->paddr, vdev_id, tid, freq);
 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
 			msdu->data, msdu->len);
 	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -1093,3 +1337,239 @@ err_free_msdu_id:
 err:
 	return res;
 }
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+			    enum ath10k_hw_txrx_mode txmode,
+			    struct sk_buff *msdu)
+{
+	struct ath10k *ar = htt->ar;
+	struct device *dev = ar->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+	struct ath10k_hif_sg_item sg_items[2];
+	struct ath10k_htt_txbuf_64 *txbuf;
+	struct htt_data_tx_desc_frag *frags;
+	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+	u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+	int prefetch_len;
+	int res;
+	u8 flags0 = 0;
+	u16 msdu_id, flags1 = 0;
+	u16 freq = 0;
+	dma_addr_t frags_paddr = 0;
+	u32 txbuf_paddr;
+	struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+	struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+	spin_lock_bh(&htt->tx_lock);
+	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
+	if (res < 0)
+		goto err;
+
+	msdu_id = res;
+
+	prefetch_len = min(htt->prefetch_len, msdu->len);
+	prefetch_len = roundup(prefetch_len, 4);
+
+	txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+	txbuf_paddr = htt->txbuf.paddr +
+		      (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+		   txmode == ATH10K_HW_TXRX_RAW &&
+		   ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	}
+
+	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+				       DMA_TO_DEVICE);
+	res = dma_mapping_error(dev, skb_cb->paddr);
+	if (res) {
+		res = -EIO;
+		goto err_free_msdu_id;
+	}
+
+	if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+		freq = ar->scan.roc_freq;
+
+	switch (txmode) {
+	case ATH10K_HW_TXRX_RAW:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+		/* pass through */
+	case ATH10K_HW_TXRX_ETHERNET:
+		if (ar->hw_params.continuous_frag_desc) {
+			ext_desc_t = htt->frag_desc.vaddr_desc_64;
+			memset(&ext_desc_t[msdu_id], 0,
+			       sizeof(struct htt_msdu_ext_desc_64));
+			frags = (struct htt_data_tx_desc_frag *)
+				&ext_desc_t[msdu_id].frags;
+			ext_desc = &ext_desc_t[msdu_id];
+			frags[0].tword_addr.paddr_lo =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi =
+				__cpu_to_le16(upper_32_bits(skb_cb->paddr));
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+			frags_paddr =  htt->frag_desc.paddr +
+			   (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+		} else {
+			frags = txbuf->frags;
+			frags[0].tword_addr.paddr_lo =
+						__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi =
+				__cpu_to_le16(upper_32_bits(skb_cb->paddr));
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+			frags[1].tword_addr.paddr_lo = 0;
+			frags[1].tword_addr.paddr_hi = 0;
+			frags[1].tword_addr.len_16 = 0;
+		}
+		flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		break;
+	case ATH10K_HW_TXRX_MGMT:
+		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+		frags_paddr = skb_cb->paddr;
+		break;
+	}
+
+	/* Normally all commands go through HTC which manages tx credits for
+	 * each endpoint and notifies when tx is completed.
+	 *
+	 * HTT endpoint is creditless so there's no need to care about HTC
+	 * flags. In that case it is trivial to fill the HTC header here.
+	 *
+	 * MSDU transmission is considered completed upon HTT event. This
+	 * implies no relevant resources can be freed until after the event is
+	 * received. That's why HTC tx completion handler itself is ignored by
+	 * setting NULL to transfer_context for all sg items.
+	 *
+	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
+	 * as it's a waste of resources. By bypassing HTC it is possible to
+	 * avoid extra memory allocations, compress data structures and thus
+	 * improve performance.
+	 */
+
+	txbuf->htc_hdr.eid = htt->eid;
+	txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+					   sizeof(txbuf->cmd_tx) +
+					   prefetch_len);
+	txbuf->htc_hdr.flags = 0;
+
+	if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+		if (ar->hw_params.continuous_frag_desc)
+			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+	}
+
+	/* Prevent firmware from sending up tx inspection requests. There's
+	 * nothing ath10k can do with frames requested for inspection so force
+	 * it to simply rely a regular tx completion with discard status.
+	 */
+	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+	txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+	txbuf->cmd_tx.flags0 = flags0;
+	txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+	txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+	txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+	/* fill fragment descriptor */
+	txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+	if (ath10k_mac_tx_frm_has_freq(ar)) {
+		txbuf->cmd_tx.offchan_tx.peerid =
+				__cpu_to_le16(HTT_INVALID_PEERID);
+		txbuf->cmd_tx.offchan_tx.freq =
+				__cpu_to_le16(freq);
+	} else {
+		txbuf->cmd_tx.peerid =
+				__cpu_to_le32(HTT_INVALID_PEERID);
+	}
+
+	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+		   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+		   &skb_cb->paddr, vdev_id, tid, freq);
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+			msdu->data, msdu->len);
+	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+	sg_items[0].transfer_id = 0;
+	sg_items[0].transfer_context = NULL;
+	sg_items[0].vaddr = &txbuf->htc_hdr;
+	sg_items[0].paddr = txbuf_paddr +
+			    sizeof(txbuf->frags);
+	sg_items[0].len = sizeof(txbuf->htc_hdr) +
+			  sizeof(txbuf->cmd_hdr) +
+			  sizeof(txbuf->cmd_tx);
+
+	sg_items[1].transfer_id = 0;
+	sg_items[1].transfer_context = NULL;
+	sg_items[1].vaddr = msdu->data;
+	sg_items[1].paddr = skb_cb->paddr;
+	sg_items[1].len = prefetch_len;
+
+	res = ath10k_hif_tx_sg(htt->ar,
+			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+			       sg_items, ARRAY_SIZE(sg_items));
+	if (res)
+		goto err_unmap_msdu;
+
+	return 0;
+
+err_unmap_msdu:
+	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+err:
+	return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+	.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+	.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+	.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+	.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+	.htt_tx = ath10k_htt_tx_32,
+	.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+	.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+	.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+	.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+	.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+	.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+	.htt_tx = ath10k_htt_tx_64,
+	.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+	.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+
+	if (ar->hw_params.target_64bit)
+		htt->tx_ops = &htt_tx_ops_64;
+	else
+		htt->tx_ops = &htt_tx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 88955bbe20bd..497ac33e0fbf 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -931,3 +931,5 @@ const struct ath10k_hw_ops qca6174_ops = {
 	.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
 	.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
 };
+
+const struct ath10k_hw_ops wcn3990_ops = {};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 05f26e5858ad..6203bc65799b 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
 #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
 
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION	ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR		ATH10K_FW_DIR "/WCN3990/hw3.0"
+
 #define ATH10K_FW_FILE_BASE		"firmware"
 #define ATH10K_FW_API_MAX		6
 #define ATH10K_FW_API_MIN		2
@@ -553,6 +557,16 @@ struct ath10k_hw_params {
 
 	/* Number of ciphers supported (i.e First N) in cipher_suites array */
 	int n_cipher_suites;
+
+	u32 num_peers;
+	u32 ast_skid_limit;
+	u32 num_wds_entries;
+
+	/* Targets supporting physical addressing capability above 32-bits */
+	bool target_64bit;
+
+	/* Target rx ring fill level */
+	u32 rx_ring_fill_level;
 };
 
 struct htt_rx_desc;
@@ -567,6 +581,7 @@ struct ath10k_hw_ops {
 extern const struct ath10k_hw_ops qca988x_ops;
 extern const struct ath10k_hw_ops qca99x0_ops;
 extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
 
 extern const struct ath10k_hw_clk_params qca6174_clk[];
 
@@ -663,6 +678,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS		22
 
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS		14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT		6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES	2
+
 /* Diagnostic Window */
 #define CE_DIAG_PIPE	7
 
@@ -868,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define PCIE_INTR_CLR_ADDRESS			ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS			ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS			0x0010
+#define FW_RAM_CONFIG_ADDRESS			0x0018
 
 #define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
 
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0a947eef348d..ebb3f1b046f3 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -2563,7 +2563,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
 		}
 		break;
 	case WMI_VDEV_TYPE_STA:
-		if (vif->bss_conf.qos)
+		if (sta->wme)
 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
 		break;
 	case WMI_VDEV_TYPE_IBSS:
@@ -3574,7 +3574,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
 		return ATH10K_MAC_TX_HTT;
 	case ATH10K_HW_TXRX_MGMT:
 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-			     ar->running_fw->fw_file.fw_features))
+			     ar->running_fw->fw_file.fw_features) ||
+			     test_bit(WMI_SERVICE_MGMT_TX_WMI,
+				      ar->wmi.svc_map))
 			return ATH10K_MAC_TX_WMI_MGMT;
 		else if (ar->htt.target_version_major >= 3)
 			return ATH10K_MAC_TX_HTT;
@@ -3595,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
 
 	switch (txpath) {
 	case ATH10K_MAC_TX_HTT:
-		ret = ath10k_htt_tx(htt, txmode, skb);
+		ret = htt->tx_ops->htt_tx(htt, txmode, skb);
 		break;
 	case ATH10K_MAC_TX_HTT_MGMT:
 		ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -6201,6 +6203,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
 			   arvif->vdev_id, sta->addr, sta);
 
+		if (sta->tdls) {
+			ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+							  sta,
+							  WMI_TDLS_PEER_STATE_TEARDOWN);
+			if (ret)
+				ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+					    sta->addr,
+					    WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+		}
+
 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
 		if (ret)
 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7536,6 +7548,16 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 				    arvif->vdev_id, ret);
 	}
 
+	if (ath10k_peer_stats_enabled(ar)) {
+		ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+		ret = ath10k_wmi_pdev_pktlog_enable(ar,
+						    ar->pktlog_filter);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+			goto err_stop;
+		}
+	}
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
@@ -7620,6 +7642,34 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
 			peer->removed = true;
 }
 
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_sta *sta,
+				  struct station_info *sinfo)
+{
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+
+	if (!ath10k_peer_stats_enabled(ar))
+		return;
+
+	sinfo->rx_duration = arsta->rx_duration;
+	sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+
+	if (!arsta->txrate.legacy && !arsta->txrate.nss)
+		return;
+
+	if (arsta->txrate.legacy) {
+		sinfo->txrate.legacy = arsta->txrate.legacy;
+	} else {
+		sinfo->txrate.mcs = arsta->txrate.mcs;
+		sinfo->txrate.nss = arsta->txrate.nss;
+		sinfo->txrate.bw = arsta->txrate.bw;
+	}
+	sinfo->txrate.flags = arsta->txrate.flags;
+	sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
 	.tx				= ath10k_mac_op_tx,
 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
@@ -7661,6 +7711,7 @@ static const struct ieee80211_ops ath10k_ops = {
 	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
 	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
 	.sta_pre_rcu_remove		= ath10k_mac_op_sta_pre_rcu_remove,
+	.sta_statistics			= ath10k_sta_statistics,
 
 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
 
@@ -7671,7 +7722,6 @@ static const struct ieee80211_ops ath10k_ops = {
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
-	.sta_statistics			= ath10k_sta_statistics,
 #endif
 };
 
@@ -8244,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar)
 	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
 	    test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
 		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
-		ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+		if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+			ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
 	}
 
 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -8329,15 +8380,6 @@ int ath10k_mac_register(struct ath10k *ar)
 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
 	}
 
-	/* Current wake_tx_queue implementation imposes a significant
-	 * performance penalty in some setups. The tx scheduling code needs
-	 * more work anyway so disable the wake_tx_queue unless firmware
-	 * supports the pull-push mechanism.
-	 */
-	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
-		      ar->running_fw->fw_file.fw_features))
-		ar->ops->wake_tx_queue = NULL;
-
 	ret = ath10k_mac_init_rd(ar);
 	if (ret) {
 		ath10k_err(ar, "failed to derive regdom: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 553747bc19ed..81f8d6c0af35 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ffea348b2190..355db6a0fcf3 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,7 @@
 
 #include "core.h"
 #include "debug.h"
+#include "coredump.h"
 
 #include "targaddrs.h"
 #include "bmi.h"
@@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 #define ATH10K_PCI_TARGET_WAIT 3000
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
 
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT	0x5000
+
 static const struct pci_device_id ath10k_pci_id_table[] = {
 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
 	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
 
 	spin_lock_bh(&ce->ce_lock);
-	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
 	spin_unlock_bh(&ce->ce_lock);
 	if (ret) {
 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
@@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 		nbytes = min_t(unsigned int, remaining_bytes,
 			       DIAG_TRANSFER_LIMIT);
 
-		ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+		ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 		if (ret != 0)
 			goto done;
 
@@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 
 		/* Set up to receive directly into Target(!) address */
-		ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
+		ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
 		if (ret != 0)
 			goto done;
 
@@ -1461,6 +1467,215 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
 		crash_data->registers[i] = reg_dump_values[i];
 }
 
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+					  const struct ath10k_mem_region *mem_region,
+					  u8 *buf, size_t buf_len)
+{
+	const struct ath10k_mem_section *cur_section, *next_section;
+	unsigned int count, section_size, skip_size;
+	int ret, i, j;
+
+	if (!mem_region || !buf)
+		return 0;
+
+	cur_section = &mem_region->section_table.sections[0];
+
+	if (mem_region->start > cur_section->start) {
+		ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+			    mem_region->start, cur_section->start);
+		return 0;
+	}
+
+	skip_size = cur_section->start - mem_region->start;
+
+	/* fill the gap between the first register section and register
+	 * start address
+	 */
+	for (i = 0; i < skip_size; i++) {
+		*buf = ATH10K_MAGIC_NOT_COPIED;
+		buf++;
+	}
+
+	count = 0;
+
+	for (i = 0; cur_section != NULL; i++) {
+		section_size = cur_section->end - cur_section->start;
+
+		if (section_size <= 0) {
+			ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+				    cur_section->start,
+				    cur_section->end);
+			break;
+		}
+
+		if ((i + 1) == mem_region->section_table.size) {
+			/* last section */
+			next_section = NULL;
+			skip_size = 0;
+		} else {
+			next_section = cur_section + 1;
+
+			if (cur_section->end > next_section->start) {
+				ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+					    next_section->start,
+					    cur_section->end);
+				break;
+			}
+
+			skip_size = next_section->start - cur_section->end;
+		}
+
+		if (buf_len < (skip_size + section_size)) {
+			ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+			break;
+		}
+
+		buf_len -= skip_size + section_size;
+
+		/* read section to dest memory */
+		ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+					       buf, section_size);
+		if (ret) {
+			ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+				    cur_section->start, ret);
+			break;
+		}
+
+		buf += section_size;
+		count += section_size;
+
+		/* fill in the gap between this section and the next */
+		for (j = 0; j < skip_size; j++) {
+			*buf = ATH10K_MAGIC_NOT_COPIED;
+			buf++;
+		}
+
+		count += skip_size;
+
+		if (!next_section)
+			/* this was the last section */
+			break;
+
+		cur_section = next_section;
+	}
+
+	return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+	u32 val;
+
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+			   FW_RAM_CONFIG_ADDRESS, config);
+
+	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				FW_RAM_CONFIG_ADDRESS);
+	if (val != config) {
+		ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+			    val, config);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+				   struct ath10k_fw_crash_data *crash_data)
+{
+	const struct ath10k_hw_mem_layout *mem_layout;
+	const struct ath10k_mem_region *current_region;
+	struct ath10k_dump_ram_data_hdr *hdr;
+	u32 count, shift;
+	size_t buf_len;
+	int ret, i;
+	u8 *buf;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (!crash_data)
+		return;
+
+	mem_layout = ath10k_coredump_get_mem_layout(ar);
+	if (!mem_layout)
+		return;
+
+	current_region = &mem_layout->region_table.regions[0];
+
+	buf = crash_data->ramdump_buf;
+	buf_len = crash_data->ramdump_buf_len;
+
+	memset(buf, 0, buf_len);
+
+	for (i = 0; i < mem_layout->region_table.size; i++) {
+		count = 0;
+
+		if (current_region->len > buf_len) {
+			ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+				    current_region->name,
+				    current_region->len,
+				    buf_len);
+			break;
+		}
+
+		/* To get IRAM dump, the host driver needs to switch target
+		 * ram config from DRAM to IRAM.
+		 */
+		if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+		    current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+			shift = current_region->start >> 20;
+
+			ret = ath10k_pci_set_ram_config(ar, shift);
+			if (ret) {
+				ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+					    current_region->name, ret);
+				break;
+			}
+		}
+
+		/* Reserve space for the header. */
+		hdr = (void *)buf;
+		buf += sizeof(*hdr);
+		buf_len -= sizeof(*hdr);
+
+		if (current_region->section_table.size > 0) {
+			/* Copy each section individually. */
+			count = ath10k_pci_dump_memory_section(ar,
+							       current_region,
+							       buf,
+							       current_region->len);
+		} else {
+			/* No individiual memory sections defined so we can
+			 * copy the entire memory region.
+			 */
+			ret = ath10k_pci_diag_read_mem(ar,
+						       current_region->start,
+						       buf,
+						       current_region->len);
+			if (ret) {
+				ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+					    current_region->name, ret);
+				break;
+			}
+
+			count = current_region->len;
+		}
+
+		hdr->region_type = cpu_to_le32(current_region->type);
+		hdr->start = cpu_to_le32(current_region->start);
+		hdr->length = cpu_to_le32(count);
+
+		if (count == 0)
+			/* Note: the header remains, just with zero length. */
+			break;
+
+		buf += count;
+		buf_len -= count;
+
+		current_region++;
+	}
+}
+
 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 {
 	struct ath10k_fw_crash_data *crash_data;
@@ -1470,7 +1685,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 
 	ar->stats.fw_crash_counter++;
 
-	crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+	crash_data = ath10k_coredump_new(ar);
 
 	if (crash_data)
 		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
@@ -1481,6 +1696,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 	ath10k_print_driver_info(ar);
 	ath10k_pci_dump_registers(ar, crash_data);
 	ath10k_ce_dump_registers(ar, crash_data);
+	ath10k_pci_dump_memory(ar, crash_data);
 
 	spin_unlock_bh(&ar->data_lock);
 
@@ -1858,7 +2074,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 
 	ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
 	if (ret) {
-		u32 unused_buffer;
+		dma_addr_t unused_buffer;
 		unsigned int unused_nbytes;
 		unsigned int unused_id;
 
@@ -1871,7 +2087,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 
 err_resp:
 	if (resp) {
-		u32 unused_buffer;
+		dma_addr_t unused_buffer;
 
 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
 		dma_unmap_single(ar->dev, resp_paddr,
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 08704fbc11e3..e52fd83156b6 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 28da14398951..545deb6d7af1 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -210,6 +210,10 @@ struct rx_frag_info {
 	u8 ring1_more_count;
 	u8 ring2_more_count;
 	u8 ring3_more_count;
+	u8 ring4_more_count;
+	u8 ring5_more_count;
+	u8 ring6_more_count;
+	u8 ring7_more_count;
 } __packed;
 
 /*
@@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 {
 	__le32 info2; /* %RX_MSDU_START_INFO2_ */
 } __packed;
 
+struct rx_msdu_start_wcn3990 {
+	__le32 info2; /* %RX_MSDU_START_INFO2_ */
+	__le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
 struct rx_msdu_start {
 	struct rx_msdu_start_common common;
 	union {
 		struct rx_msdu_start_qca99x0 qca99x0;
+		struct rx_msdu_start_wcn3990 wcn3990;
 	} __packed;
 } __packed;
 
@@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 {
 	__le32 info2;
 } __packed;
 
+struct rx_msdu_end_wcn3990 {
+	__le32 ipv6_crc;
+	__le32 tcp_seq_no;
+	__le32 tcp_ack_no;
+	__le32 info1;
+	__le32 info2;
+	__le32 rule_indication_0;
+	__le32 rule_indication_1;
+	__le32 rule_indication_2;
+	__le32 rule_indication_3;
+} __packed;
+
 struct rx_msdu_end {
 	struct rx_msdu_end_common common;
 	union {
 		struct rx_msdu_end_qca99x0 qca99x0;
+		struct rx_msdu_end_wcn3990 wcn3990;
 	} __packed;
 } __packed;
 
@@ -963,6 +986,12 @@ struct rx_pkt_end {
 	__le32 phy_timestamp_2;
 } __packed;
 
+struct rx_pkt_end_wcn3990 {
+	__le32 info0; /* %RX_PKT_END_INFO0_ */
+	__le64 phy_timestamp_1;
+	__le64 phy_timestamp_2;
+} __packed;
+
 #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK		0x00003fff
 #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB		0
 #define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK		0x1fff8000
@@ -998,6 +1027,12 @@ struct rx_location_info {
 	__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
 } __packed;
 
+struct rx_location_info_wcn3990 {
+	__le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+	__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+	__le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
 enum rx_phy_ppdu_end_info0 {
 	RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
 	RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
@@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 {
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+struct rx_ppdu_end_wcn3990 {
+	struct rx_pkt_end_wcn3990 rx_pkt_end;
+	struct rx_location_info_wcn3990 rx_location_info;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	__le32 rx_timing_offset;
+	__le32 reserved_info_0;
+	__le32 reserved_info_1;
+	__le32 rx_antenna_info;
+	__le32 rx_coex_info;
+	__le32 rx_mpdu_cnt_info;
+	__le64 phy_timestamp_tx;
+	__le32 rx_bb_length;
+} __packed;
+
 struct rx_ppdu_end {
 	struct rx_ppdu_end_common common;
 	union {
@@ -1093,6 +1142,7 @@ struct rx_ppdu_end {
 		struct rx_ppdu_end_qca6174 qca6174;
 		struct rx_ppdu_end_qca99x0 qca99x0;
 		struct rx_ppdu_end_qca9984 qca9984;
+		struct rx_ppdu_end_wcn3990 wcn3990;
 	} __packed;
 } __packed;
 
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 2048b1e5262b..af6995de7e00 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 89b0ad769d4f..13276f4dc12c 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -44,7 +44,7 @@ enum ath10k_spectral_mode {
 	SPECTRAL_MANUAL,
 };
 
-#ifdef CONFIG_ATH10K_DEBUGFS
+#ifdef CONFIG_ATH10K_SPECTRAL
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
 				struct wmi_phyerr_ev_arg *phyerr,
@@ -85,6 +85,6 @@ static inline void ath10k_spectral_destroy(struct ath10k *ar)
 {
 }
 
-#endif /* CONFIG_ATH10K_DEBUGFS */
+#endif /* CONFIG_ATH10K_SPECTRAL */
 
 #endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index adf4592374b4..e7f57efadae1 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index f5dc0476493e..fa602f15fa93 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 8bded5da9d0d..c2b5bad0459b 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 9d3eb258ac2f..568810b41657 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index 191a8f34c8ea..6514d1a14242 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index ef717b631ff8..aa8978a8d751 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 3abb97f63b1e..65e2419543f9 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00cef0bd8..e40edced1d82 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index d4986f626c35..5b3b021526ab 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e7ea1ae1c438..2bf401e436d3 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 2fc3f24ff1ca..14093cfdc505 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
 	struct sk_buff *skb;
 	int ret;
+	u32 mgmt_tx_cmdid;
 
 	if (!ar->wmi.ops->gen_mgmt_tx)
 		return -EOPNOTSUPP;
@@ -385,7 +386,13 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 	if (IS_ERR(skb))
 		return PTR_ERR(skb);
 
-	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+		     ar->running_fw->fw_file.fw_features))
+		mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
+	else
+		mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
+
+	ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7616c1c4bbd3..ae77a007ae07 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -917,33 +917,69 @@ ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
 	return -ENOMEM;
 }
 
+struct wmi_tlv_svc_rdy_parse {
+	const struct hal_reg_capabilities *reg;
+	const struct wmi_tlv_svc_rdy_ev *ev;
+	const __le32 *svc_bmap;
+	const struct wlan_host_mem_req *mem_reqs;
+	bool svc_bmap_done;
+	bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+					const void *ptr, void *data)
+{
+	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+	switch (tag) {
+	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+		svc_rdy->ev = ptr;
+		break;
+	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+		svc_rdy->reg = ptr;
+		break;
+	case WMI_TLV_TAG_ARRAY_STRUCT:
+		svc_rdy->mem_reqs = ptr;
+		break;
+	case WMI_TLV_TAG_ARRAY_UINT32:
+		if (!svc_rdy->svc_bmap_done) {
+			svc_rdy->svc_bmap_done = true;
+			svc_rdy->svc_bmap = ptr;
+		} else if (!svc_rdy->dbs_hw_mode_done) {
+			svc_rdy->dbs_hw_mode_done = true;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
 					     struct sk_buff *skb,
 					     struct wmi_svc_rdy_ev_arg *arg)
 {
-	const void **tb;
 	const struct hal_reg_capabilities *reg;
 	const struct wmi_tlv_svc_rdy_ev *ev;
 	const __le32 *svc_bmap;
 	const struct wlan_host_mem_req *mem_reqs;
+	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
 	int ret;
 
-	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
-	if (IS_ERR(tb)) {
-		ret = PTR_ERR(tb);
+	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+	if (ret) {
 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 		return ret;
 	}
 
-	ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
-	reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
-	svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
-	mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+	ev = svc_rdy.ev;
+	reg = svc_rdy.reg;
+	svc_bmap = svc_rdy.svc_bmap;
+	mem_reqs = svc_rdy.mem_reqs;
 
-	if (!ev || !reg || !svc_bmap || !mem_reqs) {
-		kfree(tb);
+	if (!ev || !reg || !svc_bmap || !mem_reqs)
 		return -EPROTO;
-	}
 
 	/* This is an internal ABI compatibility check for WMI TLV so check it
 	 * here instead of the generic WMI code.
@@ -961,7 +997,6 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
 	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
 	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
 	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
-		kfree(tb);
 		return -ENOTSUPP;
 	}
 
@@ -982,12 +1017,10 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
 	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
 				  ath10k_wmi_tlv_parse_mem_reqs, arg);
 	if (ret) {
-		kfree(tb);
 		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
 		return ret;
 	}
 
-	kfree(tb);
 	return 0;
 }
 
@@ -1406,7 +1439,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
 
 	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
-	cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+	cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
 
 	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
 		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -1418,7 +1454,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 
 	cfg->num_peer_keys = __cpu_to_le32(2);
 	cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
-	cfg->ast_skid_limit = __cpu_to_le32(0x10);
 	cfg->tx_chain_mask = __cpu_to_le32(0x7);
 	cfg->rx_chain_mask = __cpu_to_le32(0x7);
 	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -1434,7 +1469,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 	cfg->num_mcast_table_elems = __cpu_to_le32(0);
 	cfg->mcast2ucast_mode = __cpu_to_le32(0);
 	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
-	cfg->num_wds_entries = __cpu_to_le32(0x20);
 	cfg->dma_burst_size = __cpu_to_le32(0);
 	cfg->mac_aggr_delim = __cpu_to_le32(0);
 	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
@@ -2450,6 +2484,80 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 }
 
 static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+	struct wmi_tlv_mgmt_tx_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *skb;
+	void *ptr;
+	int len;
+	u32 buf_len = msdu->len;
+	struct ath10k_vif *arvif;
+	dma_addr_t mgmt_frame_dma;
+	u32 vdev_id;
+
+	if (!cb->vif)
+		return ERR_PTR(-EINVAL);
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	arvif = (void *)cb->vif->drv_priv;
+	vdev_id = arvif->vdev_id;
+
+	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+		return ERR_PTR(-EINVAL);
+
+	len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		len += IEEE80211_CCMP_MIC_LEN;
+		buf_len += IEEE80211_CCMP_MIC_LEN;
+	}
+
+	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+	buf_len = round_up(buf_len, 4);
+
+	len += buf_len;
+	len = round_up(len, 4);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->desc_id = 0;
+	cmd->chanfreq = 0;
+	cmd->buf_len = __cpu_to_le32(buf_len);
+	cmd->frame_len = __cpu_to_le32(msdu->len);
+	mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
+					msdu->len, DMA_TO_DEVICE);
+	if (!mgmt_frame_dma)
+		return ERR_PTR(-ENOMEM);
+
+	cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(buf_len);
+
+	ptr += sizeof(*tlv);
+	memcpy(ptr, msdu->data, buf_len);
+
+	return skb;
+}
+
+static struct sk_buff *
 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
 				    enum wmi_force_fw_hang_type type,
 				    u32 delay_ms)
@@ -3258,6 +3366,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
 	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
 	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
 	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
 	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
 	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
 	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3592,6 +3701,7 @@ static const struct wmi_ops wmi_tlv_ops = {
 	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
 	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
 	/* .gen_mgmt_tx = not implemented; HTT is used */
+	.gen_mgmt_tx =  ath10k_wmi_tlv_op_gen_mgmt_tx,
 	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
 	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
 	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 22cf011e839a..da89128e8dd6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +22,7 @@
 #define WMI_TLV_CMD_UNSUPPORTED 0
 #define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN	64
 
 enum wmi_tlv_grp_id {
 	WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id {
 	WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
 	WMI_TLV_MGMT_TX_CMDID,
 	WMI_TLV_PRB_TMPL_CMDID,
+	WMI_TLV_MGMT_TX_SEND_CMD,
 	WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
 	WMI_TLV_ADDBA_SEND_CMDID,
 	WMI_TLV_ADDBA_STATUS_CMDID,
@@ -890,6 +892,63 @@ enum wmi_tlv_tag {
 	WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
 	WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
 	WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+	WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+	WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+	WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+	WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+	WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+	WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+	WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+	WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+	WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+	WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+	WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+	WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+	WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+	WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+	WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+	WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+	WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+	WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+	WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+	WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+	WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+	WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+	WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+	WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+	WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+	WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+	WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+	WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+	WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+	WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+	WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+	WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
 
 	WMI_TLV_TAG_MAX
 };
@@ -965,6 +1024,50 @@ enum wmi_tlv_service {
 	WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
 	WMI_TLV_SERVICE_MDNS_OFFLOAD,
 	WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+	WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+	WMI_TLV_SERVICE_OCB,
+	WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+	WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+	WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+	WMI_TLV_SERVICE_MGMT_TX_HTT,
+	WMI_TLV_SERVICE_MGMT_TX_WMI,
+	WMI_TLV_SERVICE_EXT_MSG,
+	WMI_TLV_SERVICE_MAWC,
+	WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+	WMI_TLV_SERVICE_EGAP,
+	WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+	WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+	WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+	WMI_TLV_SERVICE_ATF,
+	WMI_TLV_SERVICE_COEX_GPIO,
+	WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+	WMI_TLV_SERVICE_ENTERPRISE_MESH,
+	WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+	WMI_TLV_SERVICE_BPF_OFFLOAD,
+	WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+	WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+	WMI_TLV_SERVICE_NAN_DATA,
+	WMI_TLV_SERVICE_NAN_RTT,
+	WMI_TLV_SERVICE_11AX,
+	WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+	WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+	WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+	WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+	WMI_TLV_SERVICE_MESH_11S,
+	WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+	WMI_TLV_SERVICE_VDEV_RX_FILTER,
+	WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+	WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+	WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+	WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+	WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+	WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+	WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+	WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
 };
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -1121,6 +1224,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
 	       WMI_SERVICE_MDNS_OFFLOAD, len);
 	SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
 	       WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+	       WMI_SERVICE_MGMT_TX_WMI, len);
 }
 
 #undef SVCMAP
@@ -1643,4 +1748,12 @@ struct wmi_tlv_tx_pause_ev {
 
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
+struct wmi_tlv_mgmt_tx_cmd {
+	__le32 vdev_id;
+	__le32 desc_id;
+	__le32 chanfreq;
+	__le64 paddr;
+	__le32 frame_len;
+	__le32 buf_len;
+} __packed;
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cad2e42dcef6..58dc2189ba49 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -29,6 +29,7 @@
 #include "p2p.h"
 #include "hw.h"
 #include "hif.h"
+#include "txrx.h"
 
 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -4456,6 +4457,74 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
 		   __le32_to_cpu(ev->rate_max));
 }
 
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_tdls_peer_event *ev;
+	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	int vdev_id;
+	int peer_status;
+	int peer_reason;
+	u8 reason;
+
+	if (skb->len < sizeof(*ev)) {
+		ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+			   skb->len);
+		return;
+	}
+
+	ev = (struct wmi_tdls_peer_event *)skb->data;
+	vdev_id = __le32_to_cpu(ev->vdev_id);
+	peer_status = __le32_to_cpu(ev->peer_status);
+	peer_reason = __le32_to_cpu(ev->peer_reason);
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!peer) {
+		ath10k_warn(ar, "failed to find peer entry for %pM\n",
+			    ev->peer_macaddr.addr);
+		return;
+	}
+
+	switch (peer_status) {
+	case WMI_TDLS_SHOULD_TEARDOWN:
+		switch (peer_reason) {
+		case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+		case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+		case WMI_TDLS_TEARDOWN_REASON_RSSI:
+			reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+			break;
+		default:
+			reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+			break;
+		}
+
+		arvif = ath10k_get_arvif(ar, vdev_id);
+		if (!arvif) {
+			ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+				    vdev_id);
+			return;
+		}
+
+		ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+					    NL80211_TDLS_TEARDOWN, reason,
+					    GFP_ATOMIC);
+
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received tdls teardown event for peer %pM reason %u\n",
+			   ev->peer_macaddr.addr, peer_reason);
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received unknown tdls peer event %u\n",
+			   peer_status);
+		break;
+	}
+}
+
 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
 {
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5477,6 +5546,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
 	case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
 		break;
+	case WMI_10_4_TDLS_PEER_EVENTID:
+		ath10k_wmi_handle_tdls_peer_event(ar, skb);
+		break;
 	default:
 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
 		break;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index c02b21cff38d..c7b30ed9015d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -195,6 +195,8 @@ enum wmi_service {
 	WMI_SERVICE_SMART_LOGGING_SUPPORT,
 	WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
 	WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+	WMI_SERVICE_MGMT_TX_WMI,
+	WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -336,6 +338,7 @@ enum wmi_10_4_service {
 	WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
 	WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
 	WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+	WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -444,6 +447,7 @@ static inline char *wmi_service_name(int service_id)
 	SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
 	SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
 	SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+	SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
 	default:
 		return NULL;
 	}
@@ -740,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
 	       WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
 	SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
 	       WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+	SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+	       WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
 }
 
 #undef SVCMAP
@@ -797,6 +803,7 @@ struct wmi_cmd_map {
 	u32 bcn_filter_rx_cmdid;
 	u32 prb_req_filter_rx_cmdid;
 	u32 mgmt_tx_cmdid;
+	u32 mgmt_tx_send_cmdid;
 	u32 prb_tmpl_cmdid;
 	u32 addba_clear_resp_cmdid;
 	u32 addba_send_cmdid;
@@ -2922,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd {
 	__le32 max_tdls_concurrent_buffer_sta;
 };
 
-/* strucutre describing host memory chunk. */
+/* structure describing host memory chunk. */
 struct host_memory_chunk {
 	/* id of the request that is passed up in service ready */
 	__le32 req_id;
@@ -5236,7 +5243,8 @@ enum wmi_10_4_vdev_param {
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
 #define WMI_TXBF_STS_CAP_OFFSET_LSB	4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK	0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK	0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF       BIT(7)
 #define WMI_BF_SOUND_DIM_OFFSET_LSB	8
 #define WMI_BF_SOUND_DIM_OFFSET_MASK	0xf00
 
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 0d46d6dc7578..c4cbccb29b31 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index 9745b9ddc7f5..6e810105b775 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b53eb2b85f02..2ba8cf3f38af 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2766,7 +2766,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
 	struct ieee80211_mgmt *mgmt;
 	bool hidden = false;
 	u8 *ies;
-	int ies_len;
 	struct wmi_connect_cmd p;
 	int res;
 	int i, ret;
@@ -2804,7 +2803,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
 	ies = mgmt->u.beacon.variable;
 	if (ies > info->beacon.head + info->beacon.head_len)
 		return -EINVAL;
-	ies_len = info->beacon.head + info->beacon.head_len - ies;
 
 	if (info->ssid == NULL)
 		return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 1379906bf849..8da9506f8c2b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1001,7 +1001,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
 
 	while (amsdu_len > mac_hdr_len) {
 		hdr = (struct ethhdr *) framep;
-		payload_8023_len = ntohs(hdr->h_proto);
+		payload_8023_len = be16_to_cpu(hdr->h_proto);
 
 		if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
 		    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 783a38f1a626..1f3523019509 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -61,13 +61,12 @@ config ATH9K_DEBUGFS
 	depends on ATH9K && DEBUG_FS
 	select MAC80211_DEBUGFS
 	select ATH9K_COMMON_DEBUG
-	select RELAY
 	---help---
 	  Say Y, if you need access to ath9k's statistics for
 	  interrupts, rate control, etc.
 
-	  Also required for changing debug message flags at run time.
-	  As well as access to the FFT/spectral data and TX99.
+	  Also required for changing debug message flags at run time and for
+	  TX99.
 
 config ATH9K_STATION_STATISTICS
 	bool "Detailed station statistics"
@@ -177,7 +176,6 @@ config ATH9K_HTC_DEBUGFS
 	bool "Atheros ath9k_htc debugging"
 	depends on ATH9K_HTC && DEBUG_FS
 	select ATH9K_COMMON_DEBUG
-	select RELAY
 	---help---
 	  Say Y, if you need access to ath9k_htc's statistics.
 	  As well as access to the FFT/spectral data.
@@ -192,3 +190,11 @@ config ATH9K_HWRNG
 
 	  Say Y, feeds the entropy directly from the WiFi driver to the input
 	  pool.
+
+config ATH9K_COMMON_SPECTRAL
+	bool "Atheros ath9k/ath9k_htc spectral scan support"
+	depends on ATH9K_DEBUGFS || ATH9K_HTC_DEBUGFS
+	select RELAY
+	default n
+	---help---
+	  Say Y to enable access to the FFT/spectral data via debugfs.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index d804ce7391a0..f71b2ad8275c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -62,8 +62,8 @@ ath9k_common-y:=	common.o \
 			common-init.o \
 			common-beacon.o \
 
-ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
-					     common-spectral.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_SPECTRAL) += common-spectral.o
 
 ath9k_htc-y +=	htc_hst.o \
 		hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c2e210c0a770..f019a20e5a1f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3310,6 +3310,12 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
 	if (ar9300_check_eeprom_header(ah, read, cptr))
 		goto found;
 
+	cptr = AR9300_BASE_ADDR_4K;
+	ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+		cptr);
+	if (ar9300_check_eeprom_header(ah, read, cptr))
+		goto found;
+
 	cptr = AR9300_BASE_ADDR_512;
 	ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
 		cptr);
@@ -3430,6 +3436,60 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
 	return len;
 }
 
+static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size,
+				bool is_2g)
+{
+	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+	struct ar9300_base_eep_hdr *pBase;
+	struct ar9300_cal_data_per_freq_op_loop *cal_pier;
+	int cal_pier_nr;
+	int freq;
+	int i, j;
+
+	pBase = &eep->baseEepHeader;
+
+	if (is_2g)
+		cal_pier_nr = AR9300_NUM_2G_CAL_PIERS;
+	else
+		cal_pier_nr = AR9300_NUM_5G_CAL_PIERS;
+
+	for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+		if (!((pBase->txrxMask >> i) & 1))
+			continue;
+
+		len += snprintf(buf + len, size - len, "Chain %d\n", i);
+
+		len += snprintf(buf + len, size - len,
+			"Freq\t ref\tvolt\ttemp\tnf_cal\tnf_pow\trx_temp\n");
+
+		for (j = 0; j < cal_pier_nr; j++) {
+			if (is_2g) {
+				cal_pier = &eep->calPierData2G[i][j];
+				freq = 2300 + eep->calFreqPier2G[j];
+			} else {
+				cal_pier = &eep->calPierData5G[i][j];
+				freq = 4800 + eep->calFreqPier5G[j] * 5;
+			}
+
+			len += snprintf(buf + len, size - len,
+				"%d\t", freq);
+
+			len += snprintf(buf + len, size - len,
+				"%d\t%d\t%d\t%d\t%d\t%d\n",
+				cal_pier->refPower,
+				cal_pier->voltMeas,
+				cal_pier->tempMeas,
+				cal_pier->rxTempMeas ?
+				N2DBM(cal_pier->rxNoisefloorCal) : 0,
+				cal_pier->rxTempMeas ?
+				N2DBM(cal_pier->rxNoisefloorPower) : 0,
+				cal_pier->rxTempMeas);
+		}
+	}
+
+	return len;
+}
+
 static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
 				       u8 *buf, u32 len, u32 size)
 {
@@ -3441,10 +3501,18 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
 				 "%20s :\n", "2GHz modal Header");
 		len = ar9003_dump_modal_eeprom(buf, len, size,
 						&eep->modalHeader2G);
-		len += scnprintf(buf + len, size - len,
+
+		len += scnprintf(buf + len, size - len, "Calibration data\n");
+		len = ar9003_dump_cal_data(ah, buf, len, size, true);
+
+		len +=  snprintf(buf + len, size - len,
 				 "%20s :\n", "5GHz modal Header");
 		len = ar9003_dump_modal_eeprom(buf, len, size,
 						&eep->modalHeader5G);
+
+		len += snprintf(buf + len, size - len, "Calibration data\n");
+		len = ar9003_dump_cal_data(ah, buf, len, size, false);
+
 		goto out;
 	}
 
@@ -4683,7 +4751,8 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
 				  int ichain,
 				  int *pfrequency,
 				  int *pcorrection,
-				  int *ptemperature, int *pvoltage)
+				  int *ptemperature, int *pvoltage,
+				  int *pnf_cal, int *pnf_power)
 {
 	u8 *pCalPier;
 	struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
@@ -4725,6 +4794,10 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
 	*pcorrection = pCalPierStruct->refPower;
 	*ptemperature = pCalPierStruct->tempMeas;
 	*pvoltage = pCalPierStruct->voltMeas;
+	*pnf_cal = pCalPierStruct->rxTempMeas ?
+			N2DBM(pCalPierStruct->rxNoisefloorCal) : 0;
+	*pnf_power = pCalPierStruct->rxTempMeas ?
+			N2DBM(pCalPierStruct->rxNoisefloorPower) : 0;
 
 	return 0;
 }
@@ -4889,14 +4962,18 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 	int mode;
 	int lfrequency[AR9300_MAX_CHAINS],
 	    lcorrection[AR9300_MAX_CHAINS],
-	    ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+	    ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS],
+	    lnf_cal[AR9300_MAX_CHAINS], lnf_pwr[AR9300_MAX_CHAINS];
 	int hfrequency[AR9300_MAX_CHAINS],
 	    hcorrection[AR9300_MAX_CHAINS],
-	    htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+	    htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS],
+	    hnf_cal[AR9300_MAX_CHAINS], hnf_pwr[AR9300_MAX_CHAINS];
 	int fdiff;
 	int correction[AR9300_MAX_CHAINS],
-	    voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
-	int pfrequency, pcorrection, ptemperature, pvoltage;
+	    voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS],
+	    nf_cal[AR9300_MAX_CHAINS], nf_pwr[AR9300_MAX_CHAINS];
+	int pfrequency, pcorrection, ptemperature, pvoltage,
+	    pnf_cal, pnf_pwr;
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	mode = (frequency >= 4000);
@@ -4914,7 +4991,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 		for (ipier = 0; ipier < npier; ipier++) {
 			if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
 						    &pfrequency, &pcorrection,
-						    &ptemperature, &pvoltage)) {
+						    &ptemperature, &pvoltage,
+						    &pnf_cal, &pnf_pwr)) {
 				fdiff = frequency - pfrequency;
 
 				/*
@@ -4936,6 +5014,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 						htemperature[ichain] =
 						    ptemperature;
 						hvoltage[ichain] = pvoltage;
+						hnf_cal[ichain] = pnf_cal;
+						hnf_pwr[ichain] = pnf_pwr;
 					}
 				}
 				if (fdiff >= 0) {
@@ -4952,6 +5032,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 						ltemperature[ichain] =
 						    ptemperature;
 						lvoltage[ichain] = pvoltage;
+						lnf_cal[ichain] = pnf_cal;
+						lnf_pwr[ichain] = pnf_pwr;
 					}
 				}
 			}
@@ -4960,15 +5042,20 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 
 	/* interpolate  */
 	for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
-		ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
+		ath_dbg(common, EEPROM,
+			"ch=%d f=%d low=%d %d h=%d %d n=%d %d p=%d %d\n",
 			ichain, frequency, lfrequency[ichain],
 			lcorrection[ichain], hfrequency[ichain],
-			hcorrection[ichain]);
+			hcorrection[ichain], lnf_cal[ichain],
+			hnf_cal[ichain], lnf_pwr[ichain],
+			hnf_pwr[ichain]);
 		/* they're the same, so just pick one */
 		if (hfrequency[ichain] == lfrequency[ichain]) {
 			correction[ichain] = lcorrection[ichain];
 			voltage[ichain] = lvoltage[ichain];
 			temperature[ichain] = ltemperature[ichain];
+			nf_cal[ichain] = lnf_cal[ichain];
+			nf_pwr[ichain] = lnf_pwr[ichain];
 		}
 		/* the low frequency is good */
 		else if (frequency - lfrequency[ichain] < 1000) {
@@ -4992,12 +5079,26 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 						hfrequency[ichain],
 						lvoltage[ichain],
 						hvoltage[ichain]);
+
+				nf_cal[ichain] = interpolate(frequency,
+						lfrequency[ichain],
+						hfrequency[ichain],
+						lnf_cal[ichain],
+						hnf_cal[ichain]);
+
+				nf_pwr[ichain] = interpolate(frequency,
+						lfrequency[ichain],
+						hfrequency[ichain],
+						lnf_pwr[ichain],
+						hnf_pwr[ichain]);
 			}
 			/* only low is good, use it */
 			else {
 				correction[ichain] = lcorrection[ichain];
 				temperature[ichain] = ltemperature[ichain];
 				voltage[ichain] = lvoltage[ichain];
+				nf_cal[ichain] = lnf_cal[ichain];
+				nf_pwr[ichain] = lnf_pwr[ichain];
 			}
 		}
 		/* only high is good, use it */
@@ -5005,10 +5106,14 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 			correction[ichain] = hcorrection[ichain];
 			temperature[ichain] = htemperature[ichain];
 			voltage[ichain] = hvoltage[ichain];
+			nf_cal[ichain] = hnf_cal[ichain];
+			nf_pwr[ichain] = hnf_pwr[ichain];
 		} else {	/* nothing is good, presume 0???? */
 			correction[ichain] = 0;
 			temperature[ichain] = 0;
 			voltage[ichain] = 0;
+			nf_cal[ichain] = 0;
+			nf_pwr[ichain] = 0;
 		}
 	}
 
@@ -5019,6 +5124,16 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
 		"for frequency=%d, calibration correction = %d %d %d\n",
 		frequency, correction[0], correction[1], correction[2]);
 
+	/* Store calibrated noise floor values */
+	for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++)
+		if (mode) {
+			ah->nf_5g.cal[ichain] = nf_cal[ichain];
+			ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
+		} else {
+			ah->nf_2g.cal[ichain] = nf_cal[ichain];
+			ah->nf_2g.pwr[ichain] = nf_pwr[ichain];
+		}
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index bd2269c7de6b..e8fda54acfe3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -62,6 +62,16 @@
  */
 #define AR9300_PWR_TABLE_OFFSET  0
 
+/* Noise power data definitions
+ * units are: 4 x dBm - NOISE_PWR_DATA_OFFSET
+ * (e.g. -25 = (-25/4 - 90) = -96.25 dBm)
+ * range (for 6 signed bits) is (-32 to 31) + offset => -122dBm to -59dBm
+ * resolution (2 bits) is 0.25dBm
+ */
+#define NOISE_PWR_DATA_OFFSET	-90
+#define NOISE_PWR_DBM_2_INT(_p)	((((_p) + 3) >> 2) + NOISE_PWR_DATA_OFFSET)
+#define N2DBM(_p)		NOISE_PWR_DBM_2_INT(_p)
+
 /* byte addressable */
 #define AR9300_EEPROM_SIZE (16*1024)
 
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 13ab6bc46775..3d9447e21025 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -58,19 +58,25 @@ static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
 }
 
 static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
-				   struct ath9k_channel *chan)
+				   struct ath9k_channel *chan,
+				   int chain)
 {
-	return ath9k_hw_get_nf_limits(ah, chan)->nominal;
+	s16 calib_nf = ath9k_hw_get_nf_limits(ah, chan)->cal[chain];
+
+	if (calib_nf)
+		return calib_nf;
+	else
+		return ath9k_hw_get_nf_limits(ah, chan)->nominal;
 }
 
 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
 			   s16 nf)
 {
-	s8 noise = ATH_DEFAULT_NOISE_FLOOR;
+	s8 noise = ath9k_hw_get_default_nf(ah, chan, 0);
 
 	if (nf) {
 		s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
-			   ath9k_hw_get_default_nf(ah, chan);
+			   ath9k_hw_get_default_nf(ah, chan, 0);
 		if (delta > 0)
 			noise += delta;
 	}
@@ -240,7 +246,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 	unsigned i, j;
 	u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
 	struct ath_common *common = ath9k_hw_common(ah);
-	s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
+	s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal;
 	u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL);
 
 	if (ah->caldata)
@@ -258,8 +264,13 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 				nfval = ah->nf_override;
 			else if (h)
 				nfval = h[i].privNF;
-			else
-				nfval = default_nf;
+			else {
+				/* Try to get calibrated noise floor value */
+				nfval =
+				    ath9k_hw_get_nf_limits(ah, chan)->cal[i];
+				if (nfval > -60 || nfval < -127)
+					nfval = default_nf;
+			}
 
 			REG_RMW(ah, ah->nf_regs[i],
 				(((u32) nfval << 1) & 0x1ff), 0x1ff);
@@ -429,20 +440,19 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
 				  struct ath9k_channel *chan)
 {
 	struct ath9k_nfcal_hist *h;
-	s16 default_nf;
-	int i, j;
+	int i, j, k = 0;
 
 	ah->caldata->channel = chan->channel;
 	ah->caldata->channelFlags = chan->channelFlags;
 	h = ah->caldata->nfCalHist;
-	default_nf = ath9k_hw_get_default_nf(ah, chan);
 	for (i = 0; i < NUM_NF_READINGS; i++) {
 		h[i].currIndex = 0;
-		h[i].privNF = default_nf;
+		h[i].privNF = ath9k_hw_get_default_nf(ah, chan, k);
 		h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
-		for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
-			h[i].nfCalBuffer[j] = default_nf;
-		}
+		for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++)
+			h[i].nfCalBuffer[j] = h[i].privNF;
+		if (++k >= AR5416_MAX_CHAINS)
+			k = 0;
 	}
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 5d1a51d83aa6..303ab470ce34 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -151,7 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
 	return bins[0] & 0x3f;
 }
 
-#ifdef CONFIG_ATH9K_COMMON_DEBUG
+#ifdef CONFIG_ATH9K_COMMON_SPECTRAL
 void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
 
@@ -183,6 +183,6 @@ static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
 {
 	return 0;
 }
-#endif /* CONFIG_ATH9K_COMMON_DEBUG */
+#endif /* CONFIG_ATH9K_COMMON_SPECTRAL */
 
 #endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 40a397fd0e0e..6fee9a464cce 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -123,11 +123,9 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
 			fft = (struct ath9k_dfs_fft_40 *) (data + 2);
 			ath_dbg(common, DFS, "fixing datalen by 2\n");
 		}
-		if (IS_CHAN_HT40MINUS(ah->curchan)) {
-			int temp = is_ctl;
-			is_ctl = is_ext;
-			is_ext = temp;
-		}
+		if (IS_CHAN_HT40MINUS(ah->curchan))
+			swap(is_ctl, is_ext);
+
 		for (i = 0; i < FFT_NUM_SAMPLES; i++)
 			max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
 							      is_ext);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f808e5833d7e..a82ad739ab80 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1683,6 +1683,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		if (tid >= ATH9K_HTC_MAX_TID) {
+			ret = -EINVAL;
+			break;
+		}
 		ista = (struct ath9k_htc_sta *) sta->drv_priv;
 		spin_lock_bh(&priv->tx.tx_lock);
 		ista->tid_state[tid] = AGGR_OPERATIONAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8c5c2dd8fa7f..cd0f023ccf77 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -922,6 +922,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
 		AR_IMR_RXERR |
 		AR_IMR_RXORN |
 		AR_IMR_BCNMISC;
+	u32 msi_cfg = 0;
 
 	if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
 	    AR_SREV_9561(ah))
@@ -929,22 +930,30 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		imr_reg |= AR_IMR_RXOK_HP;
-		if (ah->config.rx_intr_mitigation)
+		if (ah->config.rx_intr_mitigation) {
 			imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
-		else
+			msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR;
+		} else {
 			imr_reg |= AR_IMR_RXOK_LP;
-
+			msi_cfg |= AR_INTCFG_MSI_RXOK;
+		}
 	} else {
-		if (ah->config.rx_intr_mitigation)
+		if (ah->config.rx_intr_mitigation) {
 			imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
-		else
+			msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR;
+		} else {
 			imr_reg |= AR_IMR_RXOK;
+			msi_cfg |= AR_INTCFG_MSI_RXOK;
+		}
 	}
 
-	if (ah->config.tx_intr_mitigation)
+	if (ah->config.tx_intr_mitigation) {
 		imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
-	else
+		msi_cfg |= AR_INTCFG_MSI_TXINTM | AR_INTCFG_MSI_TXMINTR;
+	} else {
 		imr_reg |= AR_IMR_TXOK;
+		msi_cfg |= AR_INTCFG_MSI_TXOK;
+	}
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
@@ -952,6 +961,16 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
 	ah->imrs2_reg |= AR_IMR_S2_GTT;
 	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
 
+	if (ah->msi_enabled) {
+		ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+		ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN;
+		ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
+		REG_WRITE(ah, AR_INTCFG, msi_cfg);
+		ath_dbg(ath9k_hw_common(ah), ANY,
+			"value of AR_INTCFG=0x%X, msi_cfg=0x%X\n",
+			REG_READ(ah, AR_INTCFG), msi_cfg);
+	}
+
 	if (!AR_SREV_9100(ah)) {
 		REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
 		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 4ac70827d142..9804a24a2dc0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -754,6 +754,8 @@ struct ath_nf_limits {
 	s16 max;
 	s16 min;
 	s16 nominal;
+	s16 cal[AR5416_MAX_CHAINS];
+	s16 pwr[AR5416_MAX_CHAINS];
 };
 
 enum ath_cal_list {
@@ -977,6 +979,9 @@ struct ath_hw {
 	bool tpc_enabled;
 	u8 tx_power[Ar5416RateSize];
 	u8 tx_power_stbc[Ar5416RateSize];
+	bool msi_enabled;
+	u32 msi_mask;
+	u32 msi_reg;
 };
 
 struct ath_bus_ops {
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fa58a32227f5..e479fae5aab9 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -23,6 +23,7 @@
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/relay.h>
+#include <linux/dmi.h>
 #include <net/ieee80211_radiotap.h>
 
 #include "ath9k.h"
@@ -75,6 +76,10 @@ MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
 
 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
 
+int ath9k_use_msi;
+module_param_named(use_msi, ath9k_use_msi, int, 0444);
+MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible");
+
 bool is_ath9k_unloaded;
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -92,6 +97,56 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
 };
 #endif
 
+static int __init set_use_msi(const struct dmi_system_id *dmi)
+{
+	ath9k_use_msi = 1;
+	return 1;
+}
+
+static const struct dmi_system_id ath9k_quirks[] __initconst = {
+	{
+		.callback = set_use_msi,
+		.ident = "Dell Inspiron 24-3460",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 24-3460"),
+		},
+	},
+	{
+		.callback = set_use_msi,
+		.ident = "Dell Vostro 3262",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3262"),
+		},
+	},
+	{
+		.callback = set_use_msi,
+		.ident = "Dell Inspiron 3472",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3472"),
+		},
+	},
+	{
+		.callback = set_use_msi,
+		.ident = "Dell Vostro 15-3572",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15-3572"),
+		},
+	},
+	{
+		.callback = set_use_msi,
+		.ident = "Dell Inspiron 14-3473",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14-3473"),
+		},
+	},
+	{}
+};
+
 static void ath9k_deinit_softc(struct ath_softc *sc);
 
 static void ath9k_op_ps_wakeup(struct ath_common *common)
@@ -1100,6 +1155,8 @@ static int __init ath9k_init(void)
 		goto err_pci_exit;
 	}
 
+	dmi_check_system(ath9k_quirks);
+
 	return 0;
 
  err_pci_exit:
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 77c94f9e7b61..58d02c19b6d0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -832,6 +832,43 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
 	}
 	ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
 		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+
+	if (ah->msi_enabled) {
+		u32 _msi_reg = 0;
+		u32 i = 0;
+		u32 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
+
+		ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+			"Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
+
+		REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
+		REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
+		ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+			"AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
+			REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
+			REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
+
+		if (ah->msi_reg == 0)
+			ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+
+		ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+			"AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
+			AR_PCIE_MSI, ah->msi_reg);
+
+		i = 0;
+		do {
+			REG_WRITE(ah, AR_PCIE_MSI,
+				  (ah->msi_reg | AR_PCIE_MSI_ENABLE)
+				  & msi_pend_addr_mask);
+			_msi_reg = REG_READ(ah, AR_PCIE_MSI);
+			i++;
+		} while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
+
+		if (i >= 200)
+			ath_err(ath9k_hw_common(ah),
+				"%s: _msi_reg = 0x%X\n",
+				__func__, _msi_reg);
+	}
 }
 
 void ath9k_hw_resume_interrupts(struct ath_hw *ah)
@@ -878,12 +915,21 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
 	if (!(ints & ATH9K_INT_GLOBAL))
 		ath9k_hw_disable_interrupts(ah);
 
+	if (ah->msi_enabled) {
+		ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
+
+		REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+		REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
+	}
+
 	ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
 
 	mask = ints & ATH9K_INT_COMMON;
 	mask2 = 0;
 
+	ah->msi_mask = 0;
 	if (ints & ATH9K_INT_TX) {
+		ah->msi_mask |= AR_INTR_PRIO_TX;
 		if (ah->config.tx_intr_mitigation)
 			mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
 		else {
@@ -898,6 +944,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
 			mask |= AR_IMR_TXEOL;
 	}
 	if (ints & ATH9K_INT_RX) {
+		ah->msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
 		if (AR_SREV_9300_20_OR_LATER(ah)) {
 			mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
 			if (ah->config.rx_intr_mitigation) {
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 223606311261..645f0fbd9179 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -22,6 +22,8 @@
 #include <linux/module.h>
 #include "ath9k.h"
 
+extern int ath9k_use_msi;
+
 static const struct pci_device_id ath_pci_id_table[] = {
 	{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI   */
 	{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
@@ -889,6 +891,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	u32 val;
 	int ret = 0;
 	char hw_name[64];
+	int msi_enabled = 0;
 
 	if (pcim_enable_device(pdev))
 		return -EIO;
@@ -960,7 +963,20 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	sc->mem = pcim_iomap_table(pdev)[0];
 	sc->driver_data = id->driver_data;
 
-	ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+	if (ath9k_use_msi) {
+		if (pci_enable_msi(pdev) == 0) {
+			msi_enabled = 1;
+			dev_err(&pdev->dev, "Using MSI\n");
+		} else {
+			dev_err(&pdev->dev, "Using INTx\n");
+		}
+	}
+
+	if (!msi_enabled)
+		ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+	else
+		ret = request_irq(pdev->irq, ath_isr, 0, "ath9k", sc);
+
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq failed\n");
 		goto err_irq;
@@ -974,6 +990,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_init;
 	}
 
+	sc->sc_ah->msi_enabled = msi_enabled;
+	sc->sc_ah->msi_reg = 0;
+
 	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
 	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
 		   hw_name, (unsigned long)sc->mem, pdev->irq);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 2197aee2bb72..a8ac42c96d71 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -826,9 +826,9 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
 	sc->rx.discard_next = false;
 
 	/*
-	 * Discard zero-length packets.
+	 * Discard zero-length packets and packets smaller than an ACK
 	 */
-	if (!rx_stats->rs_datalen) {
+	if (rx_stats->rs_datalen < 10) {
 		RX_STAT_INC(rx_len_err);
 		goto corrupt;
 	}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 80ff69f99229..653e79611830 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -146,6 +146,14 @@
 #define AR_MACMISC_MISC_OBS_BUS_MSB_S   15
 #define AR_MACMISC_MISC_OBS_BUS_1       1
 
+#define AR_INTCFG               0x005C
+#define AR_INTCFG_MSI_RXOK      0x00000000
+#define AR_INTCFG_MSI_RXINTM    0x00000004
+#define AR_INTCFG_MSI_RXMINTR   0x00000006
+#define AR_INTCFG_MSI_TXOK      0x00000000
+#define AR_INTCFG_MSI_TXINTM    0x00000010
+#define AR_INTCFG_MSI_TXMINTR   0x00000018
+
 #define AR_DATABUF_SIZE		0x0060
 #define AR_DATABUF_SIZE_MASK	0x00000FFF
 
@@ -1256,6 +1264,13 @@ enum {
 #define AR_PCIE_MSI                             (AR_SREV_9340(ah) ? 0x40d8 : \
 						 (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
 #define AR_PCIE_MSI_ENABLE                       0x00000001
+#define AR_PCIE_MSI_HW_DBI_WR_EN                 0x02000000
+#define AR_PCIE_MSI_HW_INT_PENDING_ADDR          0xFFA0C1FF /* bits 8..11: value must be 0x5060 */
+#define AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64   0xFFA0C9FF /* bits 8..11: value must be 0x5064 */
+
+#define AR_INTR_PRIO_TX               0x00000001
+#define AR_INTR_PRIO_RXLP             0x00000002
+#define AR_INTR_PRIO_RXHP             0x00000004
 
 #define AR_INTR_PRIO_SYNC_ENABLE  (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
 #define AR_INTR_PRIO_ASYNC_MASK   (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index d5c810a8cc52..a3f1f7d042a4 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -236,6 +236,14 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
 	return 0;
 }
 
+static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
+{
+	size_t size;
+
+	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
+}
+
 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
 				   struct wcn36xx_dxe_mem_pool *pool)
 {
@@ -722,7 +730,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
 	/***************************************/
 	/* Init descriptors for TX LOW channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+	if (ret) {
+		dev_err(wcn->dev, "Error allocating descriptor\n");
+		return ret;
+	}
 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
 
 	/* Write channel head to a NEXT register */
@@ -740,7 +752,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
 	/***************************************/
 	/* Init descriptors for TX HIGH channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+	if (ret) {
+		dev_err(wcn->dev, "Error allocating descriptor\n");
+		goto out_err_txh_ch;
+	}
+
 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
 
 	/* Write channel head to a NEXT register */
@@ -760,7 +777,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
 	/***************************************/
 	/* Init descriptors for RX LOW channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+	if (ret) {
+		dev_err(wcn->dev, "Error allocating descriptor\n");
+		goto out_err_rxl_ch;
+	}
+
 
 	/* For RX we need to preallocated buffers */
 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -790,7 +812,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
 	/***************************************/
 	/* Init descriptors for RX HIGH channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+	if (ret) {
+		dev_err(wcn->dev, "Error allocating descriptor\n");
+		goto out_err_rxh_ch;
+	}
 
 	/* For RX we need to prealocat buffers */
 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
@@ -819,11 +845,19 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
 
 	ret = wcn36xx_dxe_request_irqs(wcn);
 	if (ret < 0)
-		goto out_err;
+		goto out_err_irq;
 
 	return 0;
 
-out_err:
+out_err_irq:
+	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+out_err_rxh_ch:
+	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+out_err_rxl_ch:
+	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+out_err_txh_ch:
+	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+
 	return ret;
 }
 
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b765c647319d..182963522941 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -348,6 +348,13 @@ enum wcn36xx_hal_host_msg_type {
 	WCN36XX_HAL_DHCP_START_IND = 189,
 	WCN36XX_HAL_DHCP_STOP_IND = 190,
 
+	/* Scan Offload(hw) APIs */
+	WCN36XX_HAL_START_SCAN_OFFLOAD_REQ = 204,
+	WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
+	WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
+	WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
+	WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
+
 	WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
 
 	WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
@@ -1115,6 +1122,101 @@ struct wcn36xx_hal_finish_scan_rsp_msg {
 
 } __packed;
 
+enum wcn36xx_hal_scan_type {
+	WCN36XX_HAL_SCAN_TYPE_PASSIVE = 0x00,
+	WCN36XX_HAL_SCAN_TYPE_ACTIVE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_mac_ssid {
+	u8 length;
+	u8 ssid[32];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* BSSIDs hot list */
+	u8 num_bssid;
+	u8 bssids[4][ETH_ALEN];
+
+	/* Directed probe-requests will be sent for listed SSIDs (max 10)*/
+	u8 num_ssid;
+	struct wcn36xx_hal_mac_ssid ssids[10];
+
+	/* Report AP with hidden ssid */
+	u8 scan_hidden;
+
+	/* Self MAC address */
+	u8 mac[ETH_ALEN];
+
+	/* BSS type */
+	enum wcn36xx_hal_bss_type bss_type;
+
+	/* Scan type */
+	enum wcn36xx_hal_scan_type scan_type;
+
+	/* Minimum scanning time on each channel (ms) */
+	u32 min_ch_time;
+
+	/* Maximum scanning time on each channel */
+	u32 max_ch_time;
+
+	/* Is a p2p search */
+	u8 p2p_search;
+
+	/* Channels to scan */
+	u8 num_channel;
+	u8 channels[80];
+
+	/* IE field */
+	u16 ie_len;
+	u8 ie[0];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+enum wcn36xx_hal_scan_offload_ind_type {
+	/* Scan has been started */
+	WCN36XX_HAL_SCAN_IND_STARTED = 0x01,
+	/* Scan has been completed */
+	WCN36XX_HAL_SCAN_IND_COMPLETED = 0x02,
+	/* Moved to foreign channel */
+	WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL = 0x08,
+	/* scan request has been dequeued */
+	WCN36XX_HAL_SCAN_IND_DEQUEUED = 0x10,
+	/* preempted by other high priority scan */
+	WCN36XX_HAL_SCAN_IND_PREEMPTED = 0x20,
+	/* scan start failed */
+	WCN36XX_HAL_SCAN_IND_FAILED = 0x40,
+	 /*scan restarted */
+	WCN36XX_HAL_SCAN_IND_RESTARTED = 0x80,
+	WCN36XX_HAL_SCAN_IND_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_scan_offload_ind {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 type;
+	u32 channel_mhz;
+	u32 scan_id;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_req_msg {
+	struct wcn36xx_hal_msg_header header;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
 enum wcn36xx_hal_rate_index {
 	HW_RATE_INDEX_1MBPS	= 0x82,
 	HW_RATE_INDEX_2MBPS	= 0x84,
@@ -1507,11 +1609,6 @@ struct wcn36xx_hal_edca_param_record {
 	u16 txop_limit;
 } __packed;
 
-struct wcn36xx_hal_mac_ssid {
-	u8 length;
-	u8 ssid[32];
-} __packed;
-
 /* Concurrency role. These are generic IDs that identify the various roles
  *  in the software system. */
 enum wcn36xx_hal_con_mode {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 987f1252a3cf..ab5be6d2c691 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -641,7 +641,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
 			   struct ieee80211_scan_request *hw_req)
 {
 	struct wcn36xx *wcn = hw->priv;
-
 	mutex_lock(&wcn->scan_lock);
 	if (wcn->scan_req) {
 		mutex_unlock(&wcn->scan_lock);
@@ -650,11 +649,16 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
 
 	wcn->scan_aborted = false;
 	wcn->scan_req = &hw_req->req;
+
 	mutex_unlock(&wcn->scan_lock);
 
-	schedule_work(&wcn->scan_work);
+	if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+		/* legacy manual/sw scan */
+		schedule_work(&wcn->scan_work);
+		return 0;
+	}
 
-	return 0;
+	return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
 }
 
 static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -662,6 +666,12 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
 {
 	struct wcn36xx *wcn = hw->priv;
 
+	if (!wcn36xx_smd_stop_hw_scan(wcn)) {
+		struct cfg80211_scan_info scan_info = { .aborted = true };
+
+		ieee80211_scan_completed(wcn->hw, &scan_info);
+	}
+
 	mutex_lock(&wcn->scan_lock);
 	wcn->scan_aborted = true;
 	mutex_unlock(&wcn->scan_lock);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 9c6590d5348a..2a4871ca9c72 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -73,6 +73,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
 	WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
 	WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
 	WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+	WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+	WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
 	WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
 	WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
 };
@@ -613,6 +615,85 @@ out:
 	return ret;
 }
 
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			      struct cfg80211_scan_request *req)
+{
+	struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
+	int ret, i;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
+
+	msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
+	msg_body.min_ch_time = 30;
+	msg_body.max_ch_time = 100;
+	msg_body.scan_hidden = 1;
+	memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+	msg_body.p2p_search = vif->p2p;
+
+	msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
+	for (i = 0; i < msg_body.num_ssid; i++) {
+		msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
+						sizeof(msg_body.ssids[i].ssid));
+		memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid,
+		       msg_body.ssids[i].length);
+	}
+
+	msg_body.num_channel = min_t(u8, req->n_channels,
+				     sizeof(msg_body.channels));
+	for (i = 0; i < msg_body.num_channel; i++)
+		msg_body.channels[i] = req->channels[i]->hw_value;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
+		    msg_body.num_channel, msg_body.num_ssid,
+		    msg_body.p2p_search ? "yes" : "no");
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_start_scan_offload failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_start_scan_offload response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_stop_scan_offload_req_msg msg_body;
+	int ret;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ);
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n");
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_stop_scan_offload failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_stop_scan_offload response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
 static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
 {
 	struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@@ -2039,6 +2120,40 @@ static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
 	return 0;
 }
 
+static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+	struct wcn36xx_hal_scan_offload_ind *rsp = buf;
+	struct cfg80211_scan_info scan_info = {};
+
+	if (len != sizeof(*rsp)) {
+		wcn36xx_warn("Corrupted delete scan indication\n");
+		return -EIO;
+	}
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+
+	switch (rsp->type) {
+	case WCN36XX_HAL_SCAN_IND_FAILED:
+		scan_info.aborted = true;
+	case WCN36XX_HAL_SCAN_IND_COMPLETED:
+		mutex_lock(&wcn->scan_lock);
+		wcn->scan_req = NULL;
+		mutex_unlock(&wcn->scan_lock);
+		ieee80211_scan_completed(wcn->hw, &scan_info);
+		break;
+	case WCN36XX_HAL_SCAN_IND_STARTED:
+	case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
+	case WCN36XX_HAL_SCAN_IND_DEQUEUED:
+	case WCN36XX_HAL_SCAN_IND_PREEMPTED:
+	case WCN36XX_HAL_SCAN_IND_RESTARTED:
+		break;
+	default:
+		wcn36xx_warn("Unknown scan indication type %x\n", rsp->type);
+	}
+
+	return 0;
+}
+
 static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
 					 void *buf,
 					 size_t len)
@@ -2250,6 +2365,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
 	case WCN36XX_HAL_CH_SWITCH_RSP:
 	case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
 	case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
+	case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
+	case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
 		memcpy(wcn->hal_buf, buf, len);
 		wcn->hal_rsp_len = len;
 		complete(&wcn->hal_rsp_compl);
@@ -2262,6 +2379,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
 	case WCN36XX_HAL_MISSED_BEACON_IND:
 	case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
 	case WCN36XX_HAL_PRINT_REG_INFO_IND:
+	case WCN36XX_HAL_SCAN_OFFLOAD_IND:
 		msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
 		if (!msg_ind) {
 			wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
@@ -2298,6 +2416,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
 	hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
 				       struct wcn36xx_hal_ind_msg,
 				       list);
+	list_del(wcn->hal_ind_queue.next);
+	spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
 
 	msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
 
@@ -2326,12 +2446,14 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
 					       hal_ind_msg->msg,
 					       hal_ind_msg->msg_len);
 		break;
+	case WCN36XX_HAL_SCAN_OFFLOAD_IND:
+		wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg,
+					hal_ind_msg->msg_len);
+		break;
 	default:
 		wcn36xx_err("SMD_EVENT (%d) not supported\n",
 			      msg_header->msg_type);
 	}
-	list_del(wcn->hal_ind_queue.next);
-	spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
 	kfree(hal_ind_msg);
 }
 int wcn36xx_smd_open(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 013fc9546f56..8076edf40ac8 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -65,6 +65,9 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
 int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
 			    enum wcn36xx_hal_sys_mode mode);
 int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			      struct cfg80211_scan_request *req);
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
 int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
 int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
 int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index c131b5e1292f..d32c1f4e533a 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,4 +1,5 @@
 /* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -39,7 +40,8 @@ struct bl_dedicated_registers_v1 {
 	/* valid only for version 2 and above */
 	__le32  bl_assert_code;         /* 0x880A58 BL Assert code */
 	__le32  bl_assert_blink;        /* 0x880A5C BL Assert Branch */
-	__le32  bl_reserved[22];        /* 0x880A60 - 0x880AB4 */
+	__le32  bl_shutdown_handshake;  /* 0x880A60 BL cleaner shutdown */
+	__le32  bl_reserved[21];        /* 0x880A64 - 0x880AB4 */
 	__le32  bl_magic_number;        /* 0x880AB8 BL Magic number */
 } __packed;
 
@@ -58,4 +60,9 @@ struct bl_dedicated_registers_v0 {
 	u8	mac_address[6];			/* 0x880A4c BL mac address */
 } __packed;
 
+/* bits for bl_shutdown_handshake */
+#define BL_SHUTDOWN_HS_GRTD		BIT(0)
+#define BL_SHUTDOWN_HS_RTD		BIT(1)
+#define BL_SHUTDOWN_HS_PROT_VER(x) WIL_GET_BITS(x, 28, 31)
+
 #endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 85d5c04618eb..768f63f38341 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -901,7 +901,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 u64 *cookie)
 {
 	const u8 *buf = params->buf;
-	size_t len = params->len;
+	size_t len = params->len, total;
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	int rc;
 	bool tx_status = false;
@@ -926,7 +926,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	if (len < sizeof(struct ieee80211_hdr_3addr))
 		return -EINVAL;
 
-	cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+	total = sizeof(*cmd) + len;
+	if (total < len)
+		return -EINVAL;
+
+	cmd = kmalloc(total, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
 		goto out;
@@ -936,7 +940,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	cmd->len = cpu_to_le16(len);
 	memcpy(cmd->payload, buf, len);
 
-	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
 		      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
 	if (rc == 0)
 		tx_status = !evt.evt.status;
@@ -952,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
 				    struct cfg80211_chan_def *chandef)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct wireless_dev *wdev = wil_to_wdev(wil);
 
-	wdev->preset_chandef = *chandef;
+	wil->monitor_chandef = *chandef;
 
 	return 0;
 }
@@ -1727,9 +1730,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy,
 
 	wil_dbg_pm(wil, "suspending\n");
 
-	wil_p2p_stop_discovery(wil);
-
+	mutex_lock(&wil->mutex);
+	mutex_lock(&wil->p2p_wdev_mutex);
+	wil_p2p_stop_radio_operations(wil);
 	wil_abort_scan(wil, true);
+	mutex_unlock(&wil->p2p_wdev_mutex);
+	mutex_unlock(&wil->mutex);
 
 out:
 	return rc;
@@ -1744,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy)
 	return 0;
 }
 
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+			      struct net_device *dev,
+			      struct cfg80211_sched_scan_request *request)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int i, rc;
+
+	wil_dbg_misc(wil,
+		     "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+		     request->n_ssids, request->ie_len, request->flags);
+	for (i = 0; i < request->n_ssids; i++) {
+		wil_dbg_misc(wil, "SSID[%d]:", i);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ssids[i].ssid,
+				  request->ssids[i].ssid_len, true);
+	}
+	wil_dbg_misc(wil, "channels:");
+	for (i = 0; i < request->n_channels; i++)
+		wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+			     i == request->n_channels - 1 ? "\n" : "");
+	wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+		     request->n_match_sets, request->min_rssi_thold,
+		     request->delay);
+	for (i = 0; i < request->n_match_sets; i++) {
+		struct cfg80211_match_set *ms = &request->match_sets[i];
+
+		wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+			     i, ms->rssi_thold);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  ms->ssid.ssid,
+				  ms->ssid.ssid_len, true);
+	}
+	wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+	for (i = 0; i < request->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+		wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+			     i, sp->interval, sp->iterations);
+	}
+
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+	if (rc)
+		return rc;
+	return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
+			     u64 reqid)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	rc = wmi_stop_sched_scan(wil);
+	/* device would return error if it thinks PNO is already stopped.
+	 * ignore the return code so user space and driver gets back in-sync
+	 */
+	wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+	return 0;
+}
+
 static const struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1777,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
 	.set_power_mgmt = wil_cfg80211_set_power_mgmt,
 	.suspend = wil_cfg80211_suspend,
 	.resume = wil_cfg80211_resume,
+	.sched_scan_start = wil_cfg80211_sched_scan_start,
+	.sched_scan_stop = wil_cfg80211_sched_scan_stop,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e58dc6dc1f9c..4a4888246e8c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -242,12 +242,19 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
 static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
 
 	wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, tx));
 	wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, rx));
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -265,15 +272,37 @@ static const struct file_operations fops_mbox = {
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-	writel(val, (void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	writel(val, (void __iomem *)d->offset);
 	wmb(); /* make sure write propagated to HW */
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-	*val = readl((void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	*val = readl((void __iomem *)d->offset);
+
+	wil_pm_runtime_put(wil);
 
 	return 0;
 }
@@ -284,10 +313,21 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
 static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
 						   umode_t mode,
 						   struct dentry *parent,
-						   void *value)
+						   void *value,
+						   struct wil6210_priv *wil)
 {
-	return debugfs_create_file(name, mode, parent, value,
-				   &fops_iomem_x32);
+	struct dentry *file;
+	struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+					      wil->dbg_data.iomem_data_count];
+
+	data->wil = wil;
+	data->offset = value;
+
+	file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+	if (!IS_ERR_OR_NULL(file))
+		wil->dbg_data.iomem_data_count++;
+
+	return file;
 }
 
 static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +386,8 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
 		case doff_io32:
 			f = wil_debugfs_create_iomem_x32(tbl[i].name,
 							 tbl[i].mode, dbg,
-							 base + tbl[i].off);
+							 base + tbl[i].off,
+							 wil);
 			break;
 		case doff_u8:
 			f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +516,22 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
 static int wil_memread_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
-	void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+	void __iomem *a;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
 		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -502,10 +552,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
 {
 	enum { max_count = 4096 };
 	struct wil_blob_wrapper *wil_blob = file->private_data;
+	struct wil6210_priv *wil = wil_blob->wil;
 	loff_t pos = *ppos;
 	size_t available = wil_blob->blob.size;
 	void *buf;
 	size_t ret;
+	int rc;
 
 	if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
 	    test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +578,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
 	if (!buf)
 		return -ENOMEM;
 
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0) {
+		kfree(buf);
+		return rc;
+	}
+
 	wil_memcpy_fromio_32(buf, (const void __iomem *)
 			     wil_blob->blob.data + pos, count);
 
 	ret = copy_to_user(user_buf, buf, count);
+
+	wil_pm_runtime_put(wil);
+
 	kfree(buf);
 	if (ret == count)
 		return -EFAULT;
@@ -808,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
 
 	params.buf = frame;
 	params.len = len;
-	params.chan = wdev->preset_chandef.chan;
 
 	rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
 
@@ -1571,8 +1631,6 @@ static ssize_t wil_write_suspend_stats(struct file *file,
 	struct wil6210_priv *wil = file->private_data;
 
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->suspend_stats.min_suspend_time = ULONG_MAX;
-	wil->suspend_stats.collection_start = ktime_get();
 
 	return len;
 }
@@ -1582,33 +1640,41 @@ static ssize_t wil_read_suspend_stats(struct file *file,
 				      size_t count, loff_t *ppos)
 {
 	struct wil6210_priv *wil = file->private_data;
-	static char text[400];
-	int n;
-	unsigned long long stats_collection_time =
-		ktime_to_us(ktime_sub(ktime_get(),
-				      wil->suspend_stats.collection_start));
+	char *text;
+	int n, ret, text_size = 500;
 
-	n = snprintf(text, sizeof(text),
-		     "Suspend statistics:\n"
+	text = kmalloc(text_size, GFP_KERNEL);
+	if (!text)
+		return -ENOMEM;
+
+	n = snprintf(text, text_size,
+		     "Radio on suspend statistics:\n"
+		     "successful suspends:%ld failed suspends:%ld\n"
+		     "successful resumes:%ld failed resumes:%ld\n"
+		     "rejected by device:%ld\n"
+		     "Radio off suspend statistics:\n"
 		     "successful suspends:%ld failed suspends:%ld\n"
 		     "successful resumes:%ld failed resumes:%ld\n"
-		     "rejected by host:%ld rejected by device:%ld\n"
-		     "total suspend time:%lld min suspend time:%lld\n"
-		     "max suspend time:%lld stats collection time: %lld\n",
-		     wil->suspend_stats.successful_suspends,
-		     wil->suspend_stats.failed_suspends,
-		     wil->suspend_stats.successful_resumes,
-		     wil->suspend_stats.failed_resumes,
-		     wil->suspend_stats.rejected_by_host,
+		     "General statistics:\n"
+		     "rejected by host:%ld\n",
+		     wil->suspend_stats.r_on.successful_suspends,
+		     wil->suspend_stats.r_on.failed_suspends,
+		     wil->suspend_stats.r_on.successful_resumes,
+		     wil->suspend_stats.r_on.failed_resumes,
 		     wil->suspend_stats.rejected_by_device,
-		     wil->suspend_stats.total_suspend_time,
-		     wil->suspend_stats.min_suspend_time,
-		     wil->suspend_stats.max_suspend_time,
-		     stats_collection_time);
+		     wil->suspend_stats.r_off.successful_suspends,
+		     wil->suspend_stats.r_off.failed_suspends,
+		     wil->suspend_stats.r_off.successful_resumes,
+		     wil->suspend_stats.r_off.failed_resumes,
+		     wil->suspend_stats.rejected_by_host);
+
+	n = min_t(int, n, text_size);
 
-	n = min_t(int, n, sizeof(text));
+	ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
 
-	return simple_read_from_buffer(user_buf, count, ppos, text, n);
+	kfree(text);
+
+	return ret;
 }
 
 static const struct file_operations fops_suspend_stats = {
@@ -1736,14 +1802,31 @@ static const struct dbg_off dbg_statics[] = {
 	{},
 };
 
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+				ARRAY_SIZE(dbg_wil_regs) - 1 +
+				ARRAY_SIZE(pseudo_isr_off) - 1 +
+				ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+				ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+				ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
 	struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
 			wil_to_wiphy(wil)->debugfsdir);
-
 	if (IS_ERR_OR_NULL(dbg))
 		return -ENODEV;
 
+	wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+					 sizeof(struct wil_debugfs_iomem_data),
+					 GFP_KERNEL);
+	if (!wil->dbg_data.data_arr) {
+		debugfs_remove_recursive(dbg);
+		wil->debug = NULL;
+		return -ENOMEM;
+	}
+
+	wil->dbg_data.iomem_data_count = 0;
+
 	wil_pmc_init(wil);
 
 	wil6210_debugfs_init_files(wil, dbg);
@@ -1758,8 +1841,6 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
 
 	wil6210_debugfs_create_ITR_CNT(wil, dbg);
 
-	wil->suspend_stats.collection_start = ktime_get();
-
 	return 0;
 }
 
@@ -1768,6 +1849,8 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
 	debugfs_remove_recursive(wil->debug);
 	wil->debug = NULL;
 
+	kfree(wil->dbg_data.data_arr);
+
 	/* free pmc memory without sending command to fw, as it will
 	 * be reset on the way down anyway
 	 */
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index adcfef4dabf7..66200f616a37 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -47,9 +47,14 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
 	u32 tx_itr_en, tx_itr_val = 0;
 	u32 rx_itr_en, rx_itr_val = 0;
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
 
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
 	if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
 		tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 	if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
 		rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
+	wil_pm_runtime_put(wil);
+
 	cp->tx_coalesce_usecs = tx_itr_val;
 	cp->rx_coalesce_usecs = rx_itr_val;
 	return 0;
@@ -67,6 +74,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 				       struct ethtool_coalesce *cp)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
 		     cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 
 	wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
 	wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	wil_configure_interrupt_moderation(wil);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 
 out_bad:
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 2f2b910501ba..2c7b24f61587 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -58,15 +59,30 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
 	u8 data[0]; /* free-form data [data_size], see above */
 } __packed;
 
+/* Comment header - common for all comment record types */
+struct wil_fw_record_comment_hdr {
+	__le32 magic;
+};
+
 /* FW capabilities encoded inside a comment record */
 #define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
 struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
 	/* identifies capabilities record */
-	__le32 magic;
+	struct wil_fw_record_comment_hdr hdr;
 	/* capabilities (variable size), see enum wmi_fw_capability */
 	u8 capabilities[0];
 };
 
+/* brd file info encoded inside a comment record */
+#define WIL_BRD_FILE_MAGIC (0xabcddcbb)
+struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
+	/* identifies brd file record */
+	struct wil_fw_record_comment_hdr hdr;
+	__le32 version;
+	__le32 base_addr;
+	__le32 max_size_bytes;
+} __packed;
+
 /* perform action
  * data_size = @head.size - offsetof(struct wil_fw_record_action, data)
  */
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index e01acac88825..914c0106e94b 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -26,14 +27,17 @@
 					     prefix_type, rowsize,	\
 					     groupsize, buf, len, ascii)
 
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
-		ioaddr = wmi_buffer(wil, val); \
-		if (!ioaddr) { \
-			wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
-				   le32_to_cpu(val)); \
-			return -EINVAL; \
-		} \
-	} while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+			      void __iomem **ioaddr, __le32 val,
+			      u32 size, const char *msg)
+{
+	*ioaddr = wmi_buffer_block(wil, val, size);
+	if (!(*ioaddr)) {
+		wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+		return false;
+	}
+	return true;
+}
 
 /**
  * wil_fw_verify - verify firmware file validity
@@ -124,14 +128,6 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
 	return 0;
 }
 
-static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
-			     size_t size)
-{
-	wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
-
-	return 0;
-}
-
 static int
 fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
 		       size_t size)
@@ -139,9 +135,11 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
 	const struct wil_fw_record_capabilities *rec = data;
 	size_t capa_size;
 
-	if (size < sizeof(*rec) ||
-	    le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+	if (size < sizeof(*rec)) {
+		wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+				data, size, true);
 		return 0;
+	}
 
 	capa_size = size - offsetof(struct wil_fw_record_capabilities,
 				    capabilities);
@@ -153,8 +151,56 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
 	return 0;
 }
 
-static int fw_handle_data(struct wil6210_priv *wil, const void *data,
-			  size_t size)
+static int
+fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
+		   size_t size)
+{
+	const struct wil_fw_record_brd_file *rec = data;
+
+	if (size < sizeof(*rec)) {
+		wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+				data, size, true);
+		return 0;
+	}
+
+	wil->brd_file_addr = le32_to_cpu(rec->base_addr);
+	wil->brd_file_max_size = le32_to_cpu(rec->max_size_bytes);
+
+	wil_dbg_fw(wil, "brd_file_addr 0x%x, brd_file_max_size %d\n",
+		   wil->brd_file_addr, wil->brd_file_max_size);
+
+	return 0;
+}
+
+static int
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+		  size_t size)
+{
+	const struct wil_fw_record_comment_hdr *hdr = data;
+	u32 magic;
+	int rc = 0;
+
+	if (size < sizeof(*hdr))
+		return 0;
+
+	magic = le32_to_cpu(hdr->magic);
+
+	switch (magic) {
+	case WIL_FW_CAPABILITIES_MAGIC:
+		wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n");
+		rc = fw_handle_capabilities(wil, data, size);
+		break;
+	case WIL_BRD_FILE_MAGIC:
+		wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n");
+		rc = fw_handle_brd_file(wil, data, size);
+		break;
+	}
+
+	return rc;
+}
+
+static int __fw_handle_data(struct wil6210_priv *wil, const void *data,
+			    size_t size, __le32 addr)
 {
 	const struct wil_fw_record_data *d = data;
 	void __iomem *dst;
@@ -165,15 +211,23 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
 		return -EINVAL;
 	}
 
-	FW_ADDR_CHECK(dst, d->addr, "address");
-	wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
-		   s);
+	if (!wil_fw_addr_check(wil, &dst, addr, s, "address"))
+		return -EINVAL;
+	wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s);
 	wil_memcpy_toio_32(dst, d->data, s);
 	wmb(); /* finish before processing next record */
 
 	return 0;
 }
 
+static int fw_handle_data(struct wil6210_priv *wil, const void *data,
+			  size_t size)
+{
+	const struct wil_fw_record_data *d = data;
+
+	return __fw_handle_data(wil, data, size, d->addr);
+}
+
 static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
 			  size_t size)
 {
@@ -197,7 +251,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
 		return -EINVAL;
 	}
 
-	FW_ADDR_CHECK(dst, d->addr, "address");
+	if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+		return -EINVAL;
 
 	v = le32_to_cpu(d->value);
 	wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -253,7 +308,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
 		u32 v = le32_to_cpu(block[i].value);
 		u32 x, y;
 
-		FW_ADDR_CHECK(dst, block[i].addr, "address");
+		if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+			return -EINVAL;
 
 		x = readl(dst);
 		y = (x & m) | (v & ~m);
@@ -319,10 +375,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
 	wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
 		   n, gw_cmd);
 
-	FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
-	FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
-	FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-	FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+	if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+			       "gateway_addr_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+			       "gateway_value_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+			       "gateway_cmd_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+			       "gateway_ctrl_address"))
+		return -EINVAL;
 
 	wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
 		   " cmd 0x%08x ctl 0x%08x\n",
@@ -378,12 +439,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
 	wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
 		   n, gw_cmd);
 
-	FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+	if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+			       "gateway_addr_addr"))
+		return -EINVAL;
 	for (k = 0; k < ARRAY_SIZE(block->value); k++)
-		FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
-			      "gateway_value_addr");
-	FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-	FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+		if (!wil_fw_addr_check(wil, &gwa_val[k],
+				       d->gateway_value_addr[k],
+				       0, "gateway_value_addr"))
+			return -EINVAL;
+	if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+			       "gateway_cmd_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+			       "gateway_ctrl_address"))
+		return -EINVAL;
 
 	wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
 		   le32_to_cpu(d->gateway_addr_addr),
@@ -422,7 +490,7 @@ static const struct {
 	int (*parse_handler)(struct wil6210_priv *wil, const void *data,
 			     size_t size);
 } wil_fw_handlers[] = {
-	{wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+	{wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
 	{wil_fw_type_data, fw_handle_data, fw_ignore_section},
 	{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
 	/* wil_fw_type_action */
@@ -517,7 +585,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 
 	rc = request_firmware(&fw, name, wil_to_dev(wil));
 	if (rc) {
-		wil_err_fw(wil, "Failed to load firmware %s\n", name);
+		wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
 		return rc;
 	}
 	wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
@@ -539,6 +607,100 @@ out:
 }
 
 /**
+ * wil_brd_process - process section from BRD file
+ *
+ * Return error code
+ */
+static int wil_brd_process(struct wil6210_priv *wil, const void *data,
+			   size_t size)
+{
+	int rc = 0;
+	const struct wil_fw_record_head *hdr = data;
+	size_t s, hdr_sz;
+	u16 type;
+
+	/* Assuming the board file includes only one header record and one data
+	 * record. Each record starts with wil_fw_record_head.
+	 */
+	if (size < sizeof(*hdr))
+		return -EINVAL;
+	s = sizeof(*hdr) + le32_to_cpu(hdr->size);
+	if (s > size)
+		return -EINVAL;
+
+	/* Skip the header record and handle the data record */
+	hdr = (const void *)hdr + s;
+	size -= s;
+	if (size < sizeof(*hdr))
+		return -EINVAL;
+	hdr_sz = le32_to_cpu(hdr->size);
+
+	if (wil->brd_file_max_size && hdr_sz > wil->brd_file_max_size)
+		return -EINVAL;
+	if (sizeof(*hdr) + hdr_sz > size)
+		return -EINVAL;
+	if (hdr_sz % 4) {
+		wil_err_fw(wil, "unaligned record size: %zu\n",
+			   hdr_sz);
+		return -EINVAL;
+	}
+	type = le16_to_cpu(hdr->type);
+	if (type != wil_fw_type_data) {
+		wil_err_fw(wil, "invalid record type for board file: %d\n",
+			   type);
+		return -EINVAL;
+	}
+	if (hdr_sz < sizeof(struct wil_fw_record_data)) {
+		wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+		return -EINVAL;
+	}
+
+	wil_dbg_fw(wil, "using addr from fw file: [0x%08x]\n",
+		   wil->brd_file_addr);
+
+	rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
+			      cpu_to_le32(wil->brd_file_addr));
+
+	return rc;
+}
+
+/**
+ * wil_request_board - Request board file
+ *
+ * Request board image from the file
+ * board file address and max size are read from FW file
+ * during initialization.
+ * brd file shall include one header and one data section.
+ *
+ * Return error code
+ */
+int wil_request_board(struct wil6210_priv *wil, const char *name)
+{
+	int rc, dlen;
+	const struct firmware *brd;
+
+	rc = request_firmware(&brd, name, wil_to_dev(wil));
+	if (rc) {
+		wil_err_fw(wil, "Failed to load brd %s\n", name);
+		return rc;
+	}
+	wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size);
+
+	/* Verify the header */
+	dlen = wil_fw_verify(wil, brd->data, brd->size);
+	if (dlen < 0) {
+		rc = dlen;
+		goto out;
+	}
+	/* Process the data record */
+	rc = wil_brd_process(wil, brd->data, dlen);
+
+out:
+	release_firmware(brd);
+	return rc;
+}
+
+/**
  * wil_fw_verify_file_exists - checks if firmware file exist
  *
  * @wil: driver context
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f3fcf3..1835187ea075 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -358,6 +359,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
 	wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
 }
 
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+	size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+		sizeof(struct wmi_cmd_hdr);
+
+	if (wil->mbox_ctl.rx.entry_size < min_size) {
+		wil_err(wil, "rx mbox entry too small (%d)\n",
+			wil->mbox_ctl.rx.entry_size);
+		return false;
+	}
+	if (wil->mbox_ctl.tx.entry_size < min_size) {
+		wil_err(wil, "tx mbox entry too small (%d)\n",
+			wil->mbox_ctl.tx.entry_size);
+		return false;
+	}
+
+	return true;
+}
+
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
@@ -376,8 +396,9 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 	wil6210_mask_irq_misc(wil, false);
 
 	if (isr & ISR_MISC_FW_ERROR) {
-		u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
-		u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+		u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
+		u32 ucode_assert_code =
+			wil_r(wil, wil->rgf_ucode_assert_code_addr);
 
 		wil_err(wil,
 			"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
@@ -393,7 +414,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 	if (isr & ISR_MISC_FW_READY) {
 		wil_dbg_irq(wil, "IRQ: FW ready\n");
 		wil_cache_mbox_regs(wil);
-		set_bit(wil_status_mbox_ready, wil->status);
+		if (wil_validate_mbox_regs(wil))
+			set_bit(wil_status_mbox_ready, wil->status);
 		/**
 		 * Actual FW ready indicated by the
 		 * WMI_FW_READY_EVENTID
@@ -545,7 +567,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
 	if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
 		return IRQ_NONE;
 
-	/* FIXME: IRQ mask debug */
+	/* IRQ mask debug */
 	if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
 		return IRQ_NONE;
 
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 885924abf61c..0c61a6c13991 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -579,7 +580,6 @@ int wil_priv_init(struct wil6210_priv *wil)
 	wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
 			      WMI_WAKEUP_TRIGGER_BCAST;
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->suspend_stats.min_suspend_time = ULONG_MAX;
 	wil->vring_idle_trsh = 16;
 
 	return 0;
@@ -638,6 +638,98 @@ void wil_priv_deinit(struct wil6210_priv *wil)
 	destroy_workqueue(wil->wmi_wq);
 }
 
+static void wil_shutdown_bl(struct wil6210_priv *wil)
+{
+	u32 val;
+
+	wil_s(wil, RGF_USER_BL +
+	      offsetof(struct bl_dedicated_registers_v1,
+		       bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD);
+
+	usleep_range(100, 150);
+
+	val = wil_r(wil, RGF_USER_BL +
+		    offsetof(struct bl_dedicated_registers_v1,
+			     bl_shutdown_handshake));
+	if (val & BL_SHUTDOWN_HS_RTD) {
+		wil_dbg_misc(wil, "BL is ready for halt\n");
+		return;
+	}
+
+	wil_err(wil, "BL did not report ready for halt\n");
+}
+
+/* this format is used by ARC embedded CPU for instruction memory */
+static inline u32 ARC_me_imm32(u32 d)
+{
+	return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16);
+}
+
+/* defines access to interrupt vectors for wil_freeze_bl */
+#define ARC_IRQ_VECTOR_OFFSET(N)	((N) * 8)
+/* ARC long jump instruction */
+#define ARC_JAL_INST			(0x20200f80)
+
+static void wil_freeze_bl(struct wil6210_priv *wil)
+{
+	u32 jal, upc, saved;
+	u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3);
+
+	jal = wil_r(wil, wil->iccm_base + ivt3);
+	if (jal != ARC_me_imm32(ARC_JAL_INST)) {
+		wil_dbg_misc(wil, "invalid IVT entry found, skipping\n");
+		return;
+	}
+
+	/* prevent the target from entering deep sleep
+	 * and disabling memory access
+	 */
+	saved = wil_r(wil, RGF_USER_USAGE_8);
+	wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP);
+	usleep_range(20, 25); /* let the BL process the bit */
+
+	/* redirect to endless loop in the INT_L1 context and let it trap */
+	wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3));
+	usleep_range(20, 25); /* let the BL get into the trap */
+
+	/* verify the BL is frozen */
+	upc = wil_r(wil, RGF_USER_CPU_PC);
+	if (upc < ivt3 || (upc > (ivt3 + 8)))
+		wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc);
+
+	wil_w(wil, RGF_USER_USAGE_8, saved);
+}
+
+static void wil_bl_prepare_halt(struct wil6210_priv *wil)
+{
+	u32 tmp, ver;
+
+	/* before halting device CPU driver must make sure BL is not accessing
+	 * host memory. This is done differently depending on BL version:
+	 * 1. For very old BL versions the procedure is skipped
+	 * (not supported).
+	 * 2. For old BL version we use a special trick to freeze the BL
+	 * 3. For new BL versions we shutdown the BL using handshake procedure.
+	 */
+	tmp = wil_r(wil, RGF_USER_BL +
+		    offsetof(struct bl_dedicated_registers_v0,
+			     boot_loader_struct_version));
+	if (!tmp) {
+		wil_dbg_misc(wil, "old BL, skipping halt preperation\n");
+		return;
+	}
+
+	tmp = wil_r(wil, RGF_USER_BL +
+		    offsetof(struct bl_dedicated_registers_v1,
+			     bl_shutdown_handshake));
+	ver = BL_SHUTDOWN_HS_PROT_VER(tmp);
+
+	if (ver > 0)
+		wil_shutdown_bl(wil);
+	else
+		wil_freeze_bl(wil);
+}
+
 static inline void wil_halt_cpu(struct wil6210_priv *wil)
 {
 	wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
@@ -671,7 +763,7 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
 	}
 }
 
-static int wil_target_reset(struct wil6210_priv *wil)
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
 {
 	int delay = 0;
 	u32 x, x1 = 0;
@@ -685,9 +777,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
 
 	wil_halt_cpu(wil);
 
-	/* clear all boot loader "ready" bits */
-	wil_w(wil, RGF_USER_BL +
-	      offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
+	if (!no_flash) {
+		/* clear all boot loader "ready" bits */
+		wil_w(wil, RGF_USER_BL +
+		      offsetof(struct bl_dedicated_registers_v0,
+			       boot_loader_ready), 0);
+		/* this should be safe to write even with old BLs */
+		wil_w(wil, RGF_USER_BL +
+		      offsetof(struct bl_dedicated_registers_v1,
+			       bl_shutdown_handshake), 0);
+	}
 	/* Clear Fw Download notification */
 	wil_c(wil, RGF_USER_USAGE_6, BIT(0));
 
@@ -728,21 +827,33 @@ static int wil_target_reset(struct wil6210_priv *wil)
 	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
 	/* wait until device ready. typical time is 20..80 msec */
-	do {
-		msleep(RST_DELAY);
-		x = wil_r(wil, RGF_USER_BL +
-			  offsetof(struct bl_dedicated_registers_v0,
-				   boot_loader_ready));
-		if (x1 != x) {
-			wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
-			x1 = x;
-		}
-		if (delay++ > RST_COUNT) {
-			wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
-				x);
-			return -ETIME;
-		}
-	} while (x != BL_READY);
+	if (no_flash)
+		do {
+			msleep(RST_DELAY);
+			x = wil_r(wil, USER_EXT_USER_PMU_3);
+			if (delay++ > RST_COUNT) {
+				wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
+					x);
+				return -ETIME;
+			}
+		} while ((x & BIT_PMU_DEVICE_RDY) == 0);
+	else
+		do {
+			msleep(RST_DELAY);
+			x = wil_r(wil, RGF_USER_BL +
+				  offsetof(struct bl_dedicated_registers_v0,
+					   boot_loader_ready));
+			if (x1 != x) {
+				wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+					     x1, x);
+				x1 = x;
+			}
+			if (delay++ > RST_COUNT) {
+				wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+					x);
+				return -ETIME;
+			}
+		} while (x != BL_READY);
 
 	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
@@ -750,6 +861,21 @@ static int wil_target_reset(struct wil6210_priv *wil)
 	wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
 	      BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
 
+	if (no_flash) {
+		/* Reset OTP HW vectors to fit 40MHz */
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001);
+		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001);
+		wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
+	}
+
 	wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
 	return 0;
 }
@@ -760,6 +886,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
 	u8 retry_short;
 	int rc;
 
+	wil_refresh_fw_capabilities(wil);
+
 	rc = wmi_get_mgmt_retry(wil, &retry_short);
 	if (!rc) {
 		wiphy->retry_short = retry_short;
@@ -767,6 +895,43 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
 	}
 }
 
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+	int features;
+
+	wil->keep_radio_on_during_sleep =
+		test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+			 wil->platform_capa) &&
+		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+		 wil->keep_radio_on_during_sleep);
+
+	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+		wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	else
+		wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+	if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+		wiphy->max_sched_scan_reqs = 1;
+		wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+		wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+		wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+		wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+	}
+
+	if (wil->platform_ops.set_features) {
+		features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+				     wil->fw_capabilities) &&
+			    test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+				     wil->platform_capa)) ?
+			BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+		wil->platform_ops.set_features(wil->platform_handle, features);
+	}
+}
+
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 {
 	le32_to_cpus(&r->base);
@@ -868,6 +1033,27 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
 	}
 }
 
+static int wil_get_otp_info(struct wil6210_priv *wil)
+{
+	struct net_device *ndev = wil_to_ndev(wil);
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+	u8 mac[8];
+
+	wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+			     sizeof(mac));
+	if (!is_valid_ether_addr(mac)) {
+		wil_err(wil, "Invalid MAC %pM\n", mac);
+		return -EINVAL;
+	}
+
+	ether_addr_copy(ndev->perm_addr, mac);
+	ether_addr_copy(wiphy->perm_addr, mac);
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		ether_addr_copy(ndev->dev_addr, mac);
+
+	return 0;
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
 	ulong to = msecs_to_jiffies(1000);
@@ -960,6 +1146,8 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
 int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
 	int rc;
+	unsigned long status_flags = BIT(wil_status_resetting);
+	int no_flash;
 
 	wil_dbg_misc(wil, "reset\n");
 
@@ -980,6 +1168,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+		wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+		wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+	}
+
+	if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+		wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+		wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+	}
+
 	if (wil->platform_ops.notify) {
 		rc = wil->platform_ops.notify(wil->platform_handle,
 					      WIL_PLATFORM_EVT_PRE_RESET);
@@ -989,6 +1187,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	}
 
 	set_bit(wil_status_resetting, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the reset.
+		 * following crash dump collection, reset would take place.
+		 */
+		wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+		rc = -EBUSY;
+		goto out;
+	}
 
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1003,7 +1209,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 	/* prevent NAPI from being scheduled and prevent wmi commands */
 	mutex_lock(&wil->wmi_mutex);
-	bitmap_zero(wil->status, wil_status_last);
+	if (test_bit(wil_status_suspending, wil->status))
+		status_flags |= BIT(wil_status_suspending);
+	bitmap_and(wil->status, wil->status, &status_flags,
+		   wil_status_last);
+	wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
 	mutex_unlock(&wil->wmi_mutex);
 
 	wil_mask_irq(wil);
@@ -1013,37 +1223,53 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	flush_workqueue(wil->wq_service);
 	flush_workqueue(wil->wmi_wq);
 
-	wil_bl_crash_info(wil, false);
+	no_flash = test_bit(hw_capa_no_flash, wil->hw_capa);
+	if (!no_flash)
+		wil_bl_crash_info(wil, false);
 	wil_disable_irq(wil);
-	rc = wil_target_reset(wil);
+	rc = wil_target_reset(wil, no_flash);
 	wil6210_clear_irq(wil);
 	wil_enable_irq(wil);
 	wil_rx_fini(wil);
 	if (rc) {
-		wil_bl_crash_info(wil, true);
-		return rc;
+		if (!no_flash)
+			wil_bl_crash_info(wil, true);
+		goto out;
 	}
 
-	rc = wil_get_bl_info(wil);
-	if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
-		rc = 0;
+	if (no_flash) {
+		rc = wil_get_otp_info(wil);
+	} else {
+		rc = wil_get_bl_info(wil);
+		if (rc == -EAGAIN && !load_fw)
+			/* ignore RF error if not going up */
+			rc = 0;
+	}
 	if (rc)
-		return rc;
+		goto out;
 
 	wil_set_oob_mode(wil, oob_mode);
 	if (load_fw) {
 		wil_info(wil, "Use firmware <%s> + board <%s>\n",
 			 wil->wil_fw_name, WIL_BOARD_FILE_NAME);
 
+		if (!no_flash)
+			wil_bl_prepare_halt(wil);
+
 		wil_halt_cpu(wil);
 		memset(wil->fw_version, 0, sizeof(wil->fw_version));
 		/* Loading f/w from the file */
 		rc = wil_request_firmware(wil, wil->wil_fw_name, true);
 		if (rc)
-			return rc;
-		rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
+			goto out;
+		if (wil->brd_file_addr)
+			rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+		else
+			rc = wil_request_firmware(wil,
+						  WIL_BOARD_FILE_NAME,
+						  true);
 		if (rc)
-			return rc;
+			goto out;
 
 		wil_pre_fw_config(wil);
 		wil_release_cpu(wil);
@@ -1055,6 +1281,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	reinit_completion(&wil->wmi_call);
 	reinit_completion(&wil->halp.comp);
 
+	clear_bit(wil_status_resetting, wil->status);
+
 	if (load_fw) {
 		wil_configure_interrupt_moderation(wil);
 		wil_unmask_irq(wil);
@@ -1071,11 +1299,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 			return rc;
 		}
 
+		wil_collect_fw_info(wil);
+
 		if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
 			wil_ps_update(wil, wil->ps_profile);
 
-		wil_collect_fw_info(wil);
-
 		if (wil->platform_ops.notify) {
 			rc = wil->platform_ops.notify(wil->platform_handle,
 						      WIL_PLATFORM_EVT_FW_RDY);
@@ -1088,6 +1316,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	}
 
 	return rc;
+
+out:
+	clear_bit(wil_status_resetting, wil->status);
+	return rc;
 }
 
 void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1193,9 +1425,7 @@ int __wil_down(struct wil6210_priv *wil)
 	wil_abort_scan(wil, false);
 	mutex_unlock(&wil->p2p_wdev_mutex);
 
-	wil_reset(wil, false);
-
-	return 0;
+	return wil_reset(wil, false);
 }
 
 int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 4a6ab2d0fdf1..7ba4e0af8f57 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -21,6 +21,7 @@
 static int wil_open(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "open\n");
 
@@ -30,16 +31,29 @@ static int wil_open(struct net_device *ndev)
 		return -EINVAL;
 	}
 
-	return wil_up(wil);
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0)
+		return rc;
+
+	rc = wil_up(wil);
+	if (rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static int wil_stop(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "stop\n");
 
-	return wil_down(wil);
+	rc = wil_down(wil);
+	if (!rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static const struct net_device_ops wil_netdev_ops = {
@@ -136,7 +150,7 @@ void *wil_if_alloc(struct device *dev)
 	wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
 	/* default monitor channel */
 	ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
-	cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+	cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
 
 	ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
 	if (!ndev) {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 6a3ab4bf916d..809092a49192 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -21,6 +22,7 @@
 #include <linux/suspend.h>
 #include "wil6210.h"
 #include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
 
 static bool use_msi = true;
 module_param(use_msi, bool, 0444);
@@ -30,29 +32,30 @@ static bool ftm_mode;
 module_param(ftm_mode, bool, 0444);
 MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
 
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
 			     unsigned long mode, void *unused);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
 
 static
-void wil_set_capabilities(struct wil6210_priv *wil)
+int wil_set_capabilities(struct wil6210_priv *wil)
 {
 	const char *wil_fw_name;
 	u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 	u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
 			    RGF_USER_REVISION_ID_MASK);
+	int platform_capa;
+	struct fw_map *iccm_section, *sct;
 
-	bitmap_zero(wil->hw_capabilities, hw_capability_last);
+	bitmap_zero(wil->hw_capa, hw_capa_last);
 	bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+	bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
 	wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
 			   WIL_FW_NAME_DEFAULT;
 	wil->chip_revision = chip_revision;
 
 	switch (jtag_id) {
 	case JTAG_DEV_ID_SPARROW:
+		memcpy(fw_mapping, sparrow_fw_mapping,
+		       sizeof(sparrow_fw_mapping));
 		switch (chip_revision) {
 		case REVISION_ID_SPARROW_D0:
 			wil->hw_name = "Sparrow D0";
@@ -62,6 +65,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
 			if (wil_fw_verify_file_exists(wil, wil_fw_name))
 				wil->wil_fw_name = wil_fw_name;
+			sct = wil_find_fw_mapping("mac_rgf_ext");
+			if (!sct) {
+				wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n");
+				return -EINVAL;
+			}
+			memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct));
 			break;
 		case REVISION_ID_SPARROW_B0:
 			wil->hw_name = "Sparrow B0";
@@ -72,21 +81,50 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 			wil->hw_version = HW_VER_UNKNOWN;
 			break;
 		}
+		wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE;
+		wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
+		break;
+	case JTAG_DEV_ID_TALYN:
+		wil->hw_name = "Talyn";
+		wil->hw_version = HW_VER_TALYN;
+		memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
+		wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+		wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+		if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
+		    BIT_NO_FLASH_INDICATION)
+			set_bit(hw_capa_no_flash, wil->hw_capa);
 		break;
 	default:
 		wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
 			jtag_id, chip_revision);
 		wil->hw_name = "Unknown";
 		wil->hw_version = HW_VER_UNKNOWN;
+		return -EINVAL;
+	}
+
+	iccm_section = wil_find_fw_mapping("fw_code");
+	if (!iccm_section) {
+		wil_err(wil, "fw_code section not found in fw_mapping\n");
+		return -EINVAL;
 	}
+	wil->iccm_base = iccm_section->host;
 
-	wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+	wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name,
+		 test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : "");
+
+	/* Get platform capabilities */
+	if (wil->platform_ops.get_capa) {
+		platform_capa =
+			wil->platform_ops.get_capa(wil->platform_handle);
+		memcpy(wil->platform_capa, &platform_capa,
+		       min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+	}
 
 	/* extract FW capabilities from file without loading the FW */
 	wil_request_firmware(wil, wil->wil_fw_name, false);
+	wil_refresh_fw_capabilities(wil);
 
-	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
-		wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	return 0;
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -209,6 +247,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		.fw_recovery = wil_platform_rop_fw_recovery,
 	};
 	u32 bar_size = pci_resource_len(pdev, 0);
+	int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+	int i;
 
 	/* check HW */
 	dev_info(&pdev->dev, WIL_NAME
@@ -244,21 +284,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 	/* rollback to err_plat */
 
-	/* device supports 48bit addresses */
-	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
-	if (rc) {
-		dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
-		rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	/* device supports >32bit addresses */
+	for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+		rc = dma_set_mask_and_coherent(dev,
+					       DMA_BIT_MASK(dma_addr_size[i]));
 		if (rc) {
-			dev_err(dev,
-				"dma_set_mask_and_coherent(32) failed: %d\n",
-				rc);
-			goto err_plat;
+			dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+				dma_addr_size[i], rc);
+			continue;
 		}
-	} else {
-		wil->use_extended_dma_addr = 1;
+		dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+		wil->dma_addr_size = dma_addr_size[i];
+		break;
 	}
 
+	if (wil->dma_addr_size == 0)
+		goto err_plat;
+
 	rc = pci_enable_device(pdev);
 	if (rc && pdev->msi_enabled == 0) {
 		wil_err(wil,
@@ -293,18 +335,13 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	/* rollback to err_iounmap */
 	wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
 
-	wil_set_capabilities(wil);
+	rc = wil_set_capabilities(wil);
+	if (rc) {
+		wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
+		goto err_iounmap;
+	}
 	wil6210_clear_irq(wil);
 
-	wil->keep_radio_on_during_sleep =
-		wil->platform_ops.keep_radio_on_during_sleep &&
-		wil->platform_ops.keep_radio_on_during_sleep(
-			wil->platform_handle) &&
-		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
-
-	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
-		 wil->keep_radio_on_during_sleep);
-
 	/* FW should raise IRQ when ready */
 	rc = wil_if_pcie_enable(wil);
 	if (rc) {
@@ -319,20 +356,19 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto bus_disable;
 	}
 
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-	wil->pm_notify.notifier_call = wil6210_pm_notify;
+	if (IS_ENABLED(CONFIG_PM))
+		wil->pm_notify.notifier_call = wil6210_pm_notify;
+
 	rc = register_pm_notifier(&wil->pm_notify);
 	if (rc)
 		/* Do not fail the driver initialization, as suspend can
 		 * be prevented in a later phase if needed
 		 */
 		wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
 
 	wil6210_debugfs_init(wil);
 
+	wil_pm_runtime_allow(wil);
 
 	return 0;
 
@@ -359,11 +395,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 
 	wil_dbg_misc(wil, "pcie_remove\n");
 
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
+
+	wil_pm_runtime_forbid(wil);
 
 	wil6210_debugfs_remove(wil);
 	rtnl_lock();
@@ -381,18 +415,19 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 static const struct pci_device_id wil6210_pcie_ids[] = {
 	{ PCI_DEVICE(0x1ae9, 0x0310) },
 	{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
+	{ PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */
 	{ /* end: all zeroes */	},
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
 	int rc = 0;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -400,16 +435,18 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
 	if (rc)
 		goto out;
 
-	rc = wil_suspend(wil, is_runtime);
+	rc = wil_suspend(wil, is_runtime, keep_radio_on);
 	if (!rc) {
-		wil->suspend_stats.successful_suspends++;
-
-		/* If platform device supports keep_radio_on_during_sleep
-		 * it will control PCIe master
+		/* In case radio stays on, platform device will control
+		 * PCIe master
 		 */
-		if (!wil->keep_radio_on_during_sleep)
+		if (!keep_radio_on) {
 			/* disable bus mastering */
 			pci_clear_master(pdev);
+			wil->suspend_stats.r_off.successful_suspends++;
+		} else {
+			wil->suspend_stats.r_on.successful_suspends++;
+		}
 	}
 out:
 	return rc;
@@ -420,23 +457,32 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
 	int rc = 0;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
-	/* If platform device supports keep_radio_on_during_sleep it will
-	 * control PCIe master
+	/* In case radio stays on, platform device will control
+	 * PCIe master
 	 */
-	if (!wil->keep_radio_on_during_sleep)
+	if (!keep_radio_on)
 		/* allow master */
 		pci_set_master(pdev);
-	rc = wil_resume(wil, is_runtime);
+	rc = wil_resume(wil, is_runtime, keep_radio_on);
 	if (rc) {
 		wil_err(wil, "device failed to resume (%d)\n", rc);
-		wil->suspend_stats.failed_resumes++;
-		if (!wil->keep_radio_on_during_sleep)
+		if (!keep_radio_on) {
 			pci_clear_master(pdev);
+			wil->suspend_stats.r_off.failed_resumes++;
+		} else {
+			wil->suspend_stats.r_on.failed_resumes++;
+		}
 	} else {
-		wil->suspend_stats.successful_resumes++;
+		if (keep_radio_on)
+			wil->suspend_stats.r_on.successful_resumes++;
+		else
+			wil->suspend_stats.r_off.successful_resumes++;
 	}
 
 	return rc;
@@ -481,21 +527,49 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
 	return rc;
 }
 
-static int wil6210_pm_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_suspend(struct device *dev)
 {
 	return wil6210_suspend(dev, false);
 }
 
-static int wil6210_pm_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_resume(struct device *dev)
 {
 	return wil6210_resume(dev, false);
 }
-#endif /* CONFIG_PM_SLEEP */
 
-#endif /* CONFIG_PM */
+static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "Runtime idle\n");
+
+	return wil_can_suspend(wil, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
+{
+	return wil6210_resume(dev, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 1;
+	}
+
+	return wil6210_suspend(dev, true);
+}
 
 static const struct dev_pm_ops wil6210_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+	SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+			   wil6210_pm_runtime_resume,
+			   wil6210_pm_runtime_idle)
 };
 
 static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 8f5d1b447aaa..0a96518a566f 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -16,15 +16,30 @@
 
 #include "wil6210.h"
 #include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 {
 	int rc = 0;
 	struct wireless_dev *wdev = wil->wdev;
 	struct net_device *ndev = wil_to_ndev(wil);
+	bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+				 wil->fw_capabilities);
 
 	wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
 
+	if (wmi_only || debug_fw) {
+		wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+			   wmi_only ? "wmi_only" : "debug_fw");
+		rc = -EBUSY;
+		goto out;
+	}
+	if (is_runtime && !wil->platform_ops.suspend) {
+		rc = -EBUSY;
+		goto out;
+	}
 	if (!(ndev->flags & IFF_UP)) {
 		/* can always sleep when down */
 		wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +59,10 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 	/* interface is running */
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_MONITOR:
+		wil_dbg_pm(wil, "Sniffer\n");
+		rc = -EBUSY;
+		goto out;
+	/* for STA-like interface, don't runtime suspend */
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +70,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 			rc = -EBUSY;
 			goto out;
 		}
+		/* Runtime pm not supported in case the interface is up */
+		if (is_runtime) {
+			wil_dbg_pm(wil, "STA-like interface\n");
+			rc = -EBUSY;
+			goto out;
+		}
 		break;
 	/* AP-like interface - can't suspend */
 	default:
@@ -120,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 
 	/* Prevent handling of new tx and wmi commands */
 	set_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the suspend */
+		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+		clear_bit(wil_status_suspending, wil->status);
+		wil->suspend_stats.rejected_by_host++;
+		return -EBUSY;
+	}
 	wil_update_net_queues_bh(wil, NULL, true);
 
 	if (!wil_is_tx_idle(wil)) {
@@ -158,7 +190,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 					break;
 				wil_err(wil,
 					"TO waiting for idle RX, suspend failed\n");
-				wil->suspend_stats.failed_suspends++;
+				wil->suspend_stats.r_on.failed_suspends++;
 				goto resume_after_fail;
 			}
 			wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -174,7 +206,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 	 */
 	if (!wil_is_wmi_idle(wil)) {
 		wil_err(wil, "suspend failed due to pending WMI events\n");
-		wil->suspend_stats.failed_suspends++;
+		wil->suspend_stats.r_on.failed_suspends++;
 		goto resume_after_fail;
 	}
 
@@ -188,7 +220,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 		if (rc) {
 			wil_err(wil, "platform device failed to suspend (%d)\n",
 				rc);
-			wil->suspend_stats.failed_suspends++;
+			wil->suspend_stats.r_on.failed_suspends++;
 			wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 			wil_unmask_irq(wil);
 			goto resume_after_fail;
@@ -230,11 +262,21 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 
 	wil_dbg_pm(wil, "suspend radio off\n");
 
+	set_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the suspend */
+		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+		clear_bit(wil_status_suspending, wil->status);
+		wil->suspend_stats.rejected_by_host++;
+		return -EBUSY;
+	}
+
 	/* if netif up, hardware is alive, shut it down */
 	if (ndev->flags & IFF_UP) {
 		rc = wil_down(wil);
 		if (rc) {
 			wil_err(wil, "wil_down : %d\n", rc);
+			wil->suspend_stats.r_off.failed_suspends++;
 			goto out;
 		}
 	}
@@ -247,6 +289,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 		rc = wil->platform_ops.suspend(wil->platform_handle, false);
 		if (rc) {
 			wil_enable_irq(wil);
+			wil->suspend_stats.r_off.failed_suspends++;
 			goto out;
 		}
 	}
@@ -254,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 	set_bit(wil_status_suspended, wil->status);
 
 out:
+	clear_bit(wil_status_suspending, wil->status);
 	wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
 
 	return rc;
@@ -279,12 +323,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil)
 	return rc;
 }
 
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
 	int rc = 0;
-	struct net_device *ndev = wil_to_ndev(wil);
-	bool keep_radio_on = ndev->flags & IFF_UP &&
-			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -301,19 +342,12 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
 	wil_dbg_pm(wil, "suspend: %s => %d\n",
 		   is_runtime ? "runtime" : "system", rc);
 
-	if (!rc)
-		wil->suspend_stats.suspend_start_time = ktime_get();
-
 	return rc;
 }
 
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
 	int rc = 0;
-	struct net_device *ndev = wil_to_ndev(wil);
-	bool keep_radio_on = ndev->flags & IFF_UP &&
-			     wil->keep_radio_on_during_sleep;
-	unsigned long long suspend_time_usec = 0;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
@@ -331,20 +365,49 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
 	else
 		rc = wil_resume_radio_off(wil);
 
-	if (rc)
-		goto out;
-
-	suspend_time_usec =
-		ktime_to_us(ktime_sub(ktime_get(),
-				      wil->suspend_stats.suspend_start_time));
-	wil->suspend_stats.total_suspend_time += suspend_time_usec;
-	if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
-		wil->suspend_stats.min_suspend_time = suspend_time_usec;
-	if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
-		wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
 out:
-	wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
-		   is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+	wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+		   rc);
 	return rc;
 }
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_forbid(dev);
+	pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+	int rc;
+	struct device *dev = wil_to_dev(wil);
+
+	rc = pm_runtime_get_sync(dev);
+	if (rc < 0) {
+		wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+		pm_runtime_put_noidle(dev);
+		return rc;
+	}
+
+	return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 2e301b6b32a9..4ea27b0bd278 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
 	 *
 	 * HW has limitation that all vrings addresses must share the same
 	 * upper 16 msb bits part of 48 bits address. To workaround that,
-	 * if we are using 48 bit addresses switch to 32 bit allocation
-	 * before allocating vring memory.
+	 * if we are using more than 32 bit addresses switch to 32 bit
+	 * allocation before allocating vring memory.
 	 *
 	 * There's no check for the return value of dma_set_mask_and_coherent,
 	 * since we assume if we were able to set the mask during
 	 * initialization in this system it will not fail if we set it again
 	 */
-	if (wil->use_extended_dma_addr)
+	if (wil->dma_addr_size > 32)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
 			&pmc->pring_pa,
 			GFP_KERNEL);
 
-	if (wil->use_extended_dma_addr)
-		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (wil->dma_addr_size > 32)
+		dma_set_mask_and_coherent(dev,
+					  DMA_BIT_MASK(wil->dma_addr_size));
 
 	wil_dbg_misc(wil,
 		     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 389c718cd257..16b8a4e5201f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 	 *
 	 * HW has limitation that all vrings addresses must share the same
 	 * upper 16 msb bits part of 48 bits address. To workaround that,
-	 * if we are using 48 bit addresses switch to 32 bit allocation
-	 * before allocating vring memory.
+	 * if we are using more than 32 bit addresses switch to 32 bit
+	 * allocation before allocating vring memory.
 	 *
 	 * There's no check for the return value of dma_set_mask_and_coherent,
 	 * since we assume if we were able to set the mask during
 	 * initialization in this system it will not fail if we set it again
 	 */
-	if (wil->use_extended_dma_addr)
+	if (wil->dma_addr_size > 32)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 		return -ENOMEM;
 	}
 
-	if (wil->use_extended_dma_addr)
-		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (wil->dma_addr_size > 32)
+		dma_set_mask_and_coherent(dev,
+					  DMA_BIT_MASK(wil->dma_addr_size));
 
 	/* initially, all descriptors are SW owned
 	 * For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 				       struct sk_buff *skb)
 {
-	struct wireless_dev *wdev = wil->wdev;
 	struct wil6210_rtap {
 		struct ieee80211_radiotap_header rthdr;
 		/* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 	int rtap_len = sizeof(struct wil6210_rtap);
 	int phy_length = 0; /* phy info header size, bytes */
 	static char phy_data[128];
-	struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+	struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
 	if (rtap_include_phy_info) {
 		rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1e340d04bd70..0df2aada6659 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -82,18 +83,18 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
  */
 #define WIL_MAX_MPDU_OVERHEAD	(62)
 
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
 	unsigned long successful_suspends;
-	unsigned long failed_suspends;
 	unsigned long successful_resumes;
+	unsigned long failed_suspends;
 	unsigned long failed_resumes;
-	unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+	struct wil_suspend_count_stats r_off;
+	struct wil_suspend_count_stats r_on;
+	unsigned long rejected_by_device; /* only radio on */
 	unsigned long rejected_by_host;
-	unsigned long long total_suspend_time;
-	unsigned long long min_suspend_time;
-	unsigned long long max_suspend_time;
-	ktime_t collection_start;
-	ktime_t suspend_start_time;
 };
 
 /* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -161,10 +162,15 @@ struct RGF_ICR {
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
 	#define BIT_USER_OOB_R2_MODE		BIT(30)
+#define RGF_USER_USAGE_8		(0x880020)
+	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
+	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
+	#define BIT_USER_EXT_CLK		BIT(2)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
 	#define BIT_USER_USER_CPU_MAN_RST	BIT(1) /* user_cpu_man_rst */
+#define RGF_USER_CPU_PC			(0x8801e8)
 #define RGF_USER_MAC_CPU_0		(0x8801fc)
 	#define BIT_USER_MAC_CPU_MAN_RST	BIT(1) /* mac_cpu_man_rst */
 #define RGF_USER_USER_SCRATCH_PAD	(0x8802bc)
@@ -190,6 +196,19 @@ struct RGF_ICR {
 #define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1	(0x880c2c)
 #define RGF_USER_SPARROW_M_4			(0x880c50) /* Sparrow */
 	#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF	BIT(2)
+#define RGF_USER_OTP_HW_RD_MACHINE_1	(0x880ce0)
+	#define BIT_NO_FLASH_INDICATION		BIT(8)
+#define RGF_USER_XPM_IFC_RD_TIME1	(0x880cec)
+#define RGF_USER_XPM_IFC_RD_TIME2	(0x880cf0)
+#define RGF_USER_XPM_IFC_RD_TIME3	(0x880cf4)
+#define RGF_USER_XPM_IFC_RD_TIME4	(0x880cf8)
+#define RGF_USER_XPM_IFC_RD_TIME5	(0x880cfc)
+#define RGF_USER_XPM_IFC_RD_TIME6	(0x880d00)
+#define RGF_USER_XPM_IFC_RD_TIME7	(0x880d04)
+#define RGF_USER_XPM_IFC_RD_TIME8	(0x880d08)
+#define RGF_USER_XPM_IFC_RD_TIME9	(0x880d0c)
+#define RGF_USER_XPM_IFC_RD_TIME10	(0x880d10)
+#define RGF_USER_XPM_RD_DOUT_SAMPLE_TIME (0x880d64)
 
 #define RGF_DMA_EP_TX_ICR		(0x881bb4) /* struct RGF_ICR */
 	#define BIT_DMA_EP_TX_ICR_TX_DONE	BIT(0)
@@ -280,22 +299,33 @@ struct RGF_ICR {
 #define RGF_CAF_PLL_LOCK_STATUS		(0x88afec)
 	#define BIT_CAF_OSC_DIG_XTAL_STABLE	BIT(0)
 
+#define USER_EXT_USER_PMU_3		(0x88d00c)
+	#define BIT_PMU_DEVICE_RDY		BIT(0)
+
 #define RGF_USER_JTAG_DEV_ID	(0x880b34) /* device ID */
 	#define JTAG_DEV_ID_SPARROW	(0x2632072f)
+	#define JTAG_DEV_ID_TALYN	(0x7e0e1)
 
 #define RGF_USER_REVISION_ID		(0x88afe4)
 #define RGF_USER_REVISION_ID_MASK	(3)
 	#define REVISION_ID_SPARROW_B0	(0x0)
 	#define REVISION_ID_SPARROW_D0	(0x3)
 
+#define RGF_OTP_MAC			(0x8a0620)
+
 /* crash codes for FW/Ucode stored here */
-#define RGF_FW_ASSERT_CODE		(0x91f020)
-#define RGF_UCODE_ASSERT_CODE		(0x91f028)
+
+/* ASSERT RGFs */
+#define SPARROW_RGF_FW_ASSERT_CODE	(0x91f020)
+#define SPARROW_RGF_UCODE_ASSERT_CODE	(0x91f028)
+#define TALYN_RGF_FW_ASSERT_CODE	(0xa37020)
+#define TALYN_RGF_UCODE_ASSERT_CODE	(0xa37028)
 
 enum {
 	HW_VER_UNKNOWN,
 	HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
 	HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
+	HW_VER_TALYN,	/* JTAG_DEV_ID_TALYN */
 };
 
 /* popular locations */
@@ -311,6 +341,10 @@ enum {
 #define WIL_DATA_COMPLETION_TO_MS 200
 
 /* Hardware definitions end */
+#define SPARROW_FW_MAPPING_TABLE_SIZE 10
+#define TALYN_FW_MAPPING_TABLE_SIZE 13
+#define MAX_FW_MAPPING_TABLE_SIZE 13
+
 struct fw_map {
 	u32 from; /* linker address - from, inclusive */
 	u32 to;   /* linker address - to, exclusive */
@@ -320,7 +354,10 @@ struct fw_map {
 };
 
 /* array size should be in sync with actual definition in the wmi.c */
-extern const struct fw_map fw_mapping[10];
+extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map sparrow_d0_mac_rgf_ext;
+extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
 
 /**
  * mk_cidxtid - construct @cidxtid field
@@ -435,12 +472,13 @@ enum { /* for wil6210_priv.status */
 	wil_status_fwconnected,
 	wil_status_dontscan,
 	wil_status_mbox_ready, /* MBOX structures ready */
-	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+	wil_status_irqen, /* interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 	wil_status_resetting, /* reset in progress */
 	wil_status_suspending, /* suspend in progress */
 	wil_status_suspended, /* suspend completed, device is suspended */
 	wil_status_resuming, /* resume in progress */
+	wil_status_collecting_dumps, /* crashdump collection in progress */
 	wil_status_last /* keep last */
 };
 
@@ -565,7 +603,8 @@ enum {
 };
 
 enum {
-	hw_capability_last
+	hw_capa_no_flash,
+	hw_capa_last
 };
 
 struct wil_probe_client_req {
@@ -616,6 +655,16 @@ struct blink_on_off_time {
 	u32 off_ms;
 };
 
+struct wil_debugfs_iomem_data {
+	void *offset;
+	struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+	struct wil_debugfs_iomem_data *data_arr;
+	int iomem_data_count;
+};
+
 extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
 extern u8 led_id;
 extern u8 led_polarity;
@@ -631,14 +680,19 @@ struct wil6210_priv {
 	u8 chip_revision;
 	const char *hw_name;
 	const char *wil_fw_name;
-	DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+	char *board_file;
+	u32 brd_file_addr;
+	u32 brd_file_max_size;
+	DECLARE_BITMAP(hw_capa, hw_capa_last);
 	DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+	DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
 	u8 n_mids; /* number of additional MIDs as reported by FW */
 	u32 recovery_count; /* num of FW recovery attempts in a short time */
 	u32 recovery_state; /* FW recovery state machine */
 	unsigned long last_fw_recovery; /* jiffies of last fw recovery */
 	wait_queue_head_t wq; /* for all wait_event() use */
 	/* profile */
+	struct cfg80211_chan_def monitor_chandef;
 	u32 monitor_flags;
 	u32 privacy; /* secure connection? */
 	u8 hidden_ssid; /* relevant in AP mode */
@@ -694,7 +748,7 @@ struct wil6210_priv {
 	struct wil_sta_info sta[WIL6210_MAX_CID];
 	int bcast_vring;
 	u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once  */
-	bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+	u32 dma_addr_size; /* indicates dma addr size */
 	/* scan */
 	struct cfg80211_scan_request *scan_request;
 
@@ -703,11 +757,12 @@ struct wil6210_priv {
 	atomic_t isr_count_rx, isr_count_tx;
 	/* debugfs */
 	struct dentry *debug;
-	struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+	struct wil_blob_wrapper blobs[MAX_FW_MAPPING_TABLE_SIZE];
 	u8 discovery_mode;
 	u8 abft_len;
 	u8 wakeup_trigger;
 	struct wil_suspend_stats suspend_stats;
+	struct wil_debugfs_data dbg_data;
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
@@ -731,16 +786,16 @@ struct wil6210_priv {
 
 	int fw_calib_result;
 
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	struct notifier_block pm_notify;
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
 
 	bool suspend_resp_rcvd;
 	bool suspend_resp_comp;
 	u32 bus_request_kbps;
 	u32 bus_request_kbps_pre_suspend;
+
+	u32 rgf_fw_assert_code_addr;
+	u32 rgf_ucode_assert_code_addr;
+	u32 iccm_base;
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -861,10 +916,13 @@ int wil_up(struct wil6210_priv *wil);
 int __wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
 int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
 int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
 void wil_set_ethtoolops(struct net_device *ndev);
 
+struct fw_map *wil_find_fw_mapping(const char *section);
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -997,11 +1055,17 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
 int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 			 bool load);
+int wil_request_board(struct wil6210_priv *wil, const char *name);
 bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
 
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
 bool wil_is_wmi_idle(struct wil6210_priv *wil);
 int wmi_resume(struct wil6210_priv *wil);
 int wmi_suspend(struct wil6210_priv *wil);
@@ -1016,4 +1080,8 @@ void wil_halp_unvote(struct wil6210_priv *wil);
 void wil6210_set_halp(struct wil6210_priv *wil);
 void wil6210_clear_halp(struct wil6210_priv *wil);
 
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+			 struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index e53cf0cf7031..1ed330674d9b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 		return -EINVAL;
 	}
 
+	set_bit(wil_status_collecting_dumps, wil->status);
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status) ||
+	    test_bit(wil_status_resetting, wil->status)) {
+		wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+		clear_bit(wil_status_collecting_dumps, wil->status);
+		return -EINVAL;
+	}
+
 	/* copy to crash dump area */
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
 		map = &fw_mapping[i];
@@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 				     (const void __iomem * __force)data, len);
 	}
 
+	clear_bit(wil_status_collecting_dumps, wil->status);
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 5d9e4bfcb045..177026e5323b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -27,6 +27,18 @@ enum wil_platform_event {
 	WIL_PLATFORM_EVT_POST_SUSPEND = 4,
 };
 
+enum wil_platform_features {
+	WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+	WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+	WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+	WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+	WIL_PLATFORM_CAPA_EXT_CLK = 2,
+	WIL_PLATFORM_CAPA_MAX,
+};
+
 /**
  * struct wil_platform_ops - wil platform module calls from this
  * driver to platform driver
@@ -37,7 +49,8 @@ struct wil_platform_ops {
 	int (*resume)(void *handle, bool device_powered_on);
 	void (*uninit)(void *handle);
 	int (*notify)(void *handle, enum wil_platform_event evt);
-	bool (*keep_radio_on_during_sleep)(void *handle);
+	int (*get_capa)(void *handle);
+	void (*set_features)(void *handle, int features);
 };
 
 /**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ffdd2fa401b1..b31e2514f8c2 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +39,7 @@ MODULE_PARM_DESC(led_id,
 		 " 60G device led enablement. Set the led ID (0-2) to enable");
 
 #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
 
 /**
  * WMI event receiving - theory of operations
@@ -69,23 +71,23 @@ MODULE_PARM_DESC(led_id,
  * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
  * AHB addresses starting from 0x880000
  *
- * Internally, firmware uses addresses that allows faster access but
+ * Internally, firmware uses addresses that allow faster access but
  * are invisible from the host. To read from these addresses, alternative
  * AHB address must be used.
- *
- * Memory mapping
- * Linker address         PCI/Host address
- *                        0x880000 .. 0xa80000  2Mb BAR0
- * 0x800000 .. 0x807000   0x900000 .. 0x907000  28k DCCM
- * 0x840000 .. 0x857000   0x908000 .. 0x91f000  92k PERIPH
  */
 
 /**
- * @fw_mapping provides memory remapping table
+ * @sparrow_fw_mapping provides memory remapping table for sparrow
  *
  * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Sparrow memory mapping:
+ * Linker address         PCI/Host address
+ *                        0x880000 .. 0xa80000  2Mb BAR0
+ * 0x800000 .. 0x808000   0x900000 .. 0x908000  32k DCCM
+ * 0x840000 .. 0x860000   0x908000 .. 0x928000  128k PERIPH
  */
-const struct fw_map fw_mapping[] = {
+const struct fw_map sparrow_fw_mapping[] = {
 	/* FW code RAM 256k */
 	{0x000000, 0x040000, 0x8c0000, "fw_code", true},
 	/* FW data RAM 32k */
@@ -111,6 +113,59 @@ const struct fw_map fw_mapping[] = {
 	{0x800000, 0x804000, 0x940000, "uc_data", false},
 };
 
+/**
+ * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+ * it is a bit larger to support extra features
+ */
+const struct fw_map sparrow_d0_mac_rgf_ext = {
+	0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+};
+
+/**
+ * @talyn_fw_mapping provides memory remapping table for Talyn
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn memory mapping:
+ * Linker address         PCI/Host address
+ *                        0x880000 .. 0xc80000  4Mb BAR0
+ * 0x800000 .. 0x820000   0xa00000 .. 0xa20000  128k DCCM
+ * 0x840000 .. 0x858000   0xa20000 .. 0xa38000  96k PERIPH
+ */
+const struct fw_map talyn_fw_mapping[] = {
+	/* FW code RAM 1M */
+	{0x000000, 0x100000, 0x900000, "fw_code", true},
+	/* FW data RAM 128k */
+	{0x800000, 0x820000, 0xa00000, "fw_data", true},
+	/* periph. data RAM 96k */
+	{0x840000, 0x858000, 0xa20000, "fw_peri", true},
+	/* various RGF 40k */
+	{0x880000, 0x88a000, 0x880000, "rgf", true},
+	/* AGC table 4k */
+	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+	/* Pcie_ext_rgf 4k */
+	{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+	/* mac_ext_rgf 1344b */
+	{0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+	/* ext USER RGF 4k */
+	{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+	/* OTP 4k */
+	{0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+	/* DMA EXT RGF 64k */
+	{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+	/* upper area 1536k */
+	{0x900000, 0xa80000, 0x900000, "upper", true},
+	/* UCODE areas - accessible by debugfs blobs but not by
+	 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+	 */
+	/* ucode code RAM 256k */
+	{0x000000, 0x040000, 0xa38000, "uc_code", false},
+	/* ucode data RAM 32k */
+	{0x800000, 0x808000, 0xa78000, "uc_data", false},
+};
+
+struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
+
 struct blink_on_off_time led_blink_time[] = {
 	{WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
 	{WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
@@ -138,15 +193,35 @@ static u32 wmi_addr_remap(u32 x)
 }
 
 /**
+ * find fw_mapping entry by section name
+ * @section - section name
+ *
+ * Return pointer to section or NULL if not found
+ */
+struct fw_map *wil_find_fw_mapping(const char *section)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++)
+		if (fw_mapping[i].name &&
+		    !strcmp(section, fw_mapping[i].name))
+			return &fw_mapping[i];
+
+	return NULL;
+}
+
+/**
  * Check address validity for WMI buffer; remap if needed
  * @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ *  exceed the device memory (bar)
  *
  * Valid buffer should be DWORD aligned
  *
  * return address for accessing buffer from the host;
  * if buffer is not valid, return NULL.
  */
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
 {
 	u32 off;
 	u32 ptr = le32_to_cpu(ptr_);
@@ -161,10 +236,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
 	off = HOSTADDR(ptr);
 	if (off > wil->bar_size - 4)
 		return NULL;
+	if (size && ((off + size > wil->bar_size) || (off + size < off)))
+		return NULL;
 
 	return wil->csr + off;
 }
 
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+	return wmi_buffer_block(wil, ptr_, 0);
+}
+
 /**
  * Check address validity
  */
@@ -198,6 +280,242 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
 	return 0;
 }
 
+static const char *cmdid2name(u16 cmdid)
+{
+	switch (cmdid) {
+	case WMI_NOTIFY_REQ_CMDID:
+		return "WMI_NOTIFY_REQ_CMD";
+	case WMI_START_SCAN_CMDID:
+		return "WMI_START_SCAN_CMD";
+	case WMI_CONNECT_CMDID:
+		return "WMI_CONNECT_CMD";
+	case WMI_DISCONNECT_CMDID:
+		return "WMI_DISCONNECT_CMD";
+	case WMI_SW_TX_REQ_CMDID:
+		return "WMI_SW_TX_REQ_CMD";
+	case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+		return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+	case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+		return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+	case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+		return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+	case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+		return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+	case WMI_BRP_SET_ANT_LIMIT_CMDID:
+		return "WMI_BRP_SET_ANT_LIMIT_CMD";
+	case WMI_TOF_SESSION_START_CMDID:
+		return "WMI_TOF_SESSION_START_CMD";
+	case WMI_AOA_MEAS_CMDID:
+		return "WMI_AOA_MEAS_CMD";
+	case WMI_PMC_CMDID:
+		return "WMI_PMC_CMD";
+	case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+		return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+	case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+		return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+	case WMI_VRING_CFG_CMDID:
+		return "WMI_VRING_CFG_CMD";
+	case WMI_BCAST_VRING_CFG_CMDID:
+		return "WMI_BCAST_VRING_CFG_CMD";
+	case WMI_TRAFFIC_SUSPEND_CMDID:
+		return "WMI_TRAFFIC_SUSPEND_CMD";
+	case WMI_TRAFFIC_RESUME_CMDID:
+		return "WMI_TRAFFIC_RESUME_CMD";
+	case WMI_ECHO_CMDID:
+		return "WMI_ECHO_CMD";
+	case WMI_SET_MAC_ADDRESS_CMDID:
+		return "WMI_SET_MAC_ADDRESS_CMD";
+	case WMI_LED_CFG_CMDID:
+		return "WMI_LED_CFG_CMD";
+	case WMI_PCP_START_CMDID:
+		return "WMI_PCP_START_CMD";
+	case WMI_PCP_STOP_CMDID:
+		return "WMI_PCP_STOP_CMD";
+	case WMI_SET_SSID_CMDID:
+		return "WMI_SET_SSID_CMD";
+	case WMI_GET_SSID_CMDID:
+		return "WMI_GET_SSID_CMD";
+	case WMI_SET_PCP_CHANNEL_CMDID:
+		return "WMI_SET_PCP_CHANNEL_CMD";
+	case WMI_GET_PCP_CHANNEL_CMDID:
+		return "WMI_GET_PCP_CHANNEL_CMD";
+	case WMI_P2P_CFG_CMDID:
+		return "WMI_P2P_CFG_CMD";
+	case WMI_START_LISTEN_CMDID:
+		return "WMI_START_LISTEN_CMD";
+	case WMI_START_SEARCH_CMDID:
+		return "WMI_START_SEARCH_CMD";
+	case WMI_DISCOVERY_STOP_CMDID:
+		return "WMI_DISCOVERY_STOP_CMD";
+	case WMI_DELETE_CIPHER_KEY_CMDID:
+		return "WMI_DELETE_CIPHER_KEY_CMD";
+	case WMI_ADD_CIPHER_KEY_CMDID:
+		return "WMI_ADD_CIPHER_KEY_CMD";
+	case WMI_SET_APPIE_CMDID:
+		return "WMI_SET_APPIE_CMD";
+	case WMI_CFG_RX_CHAIN_CMDID:
+		return "WMI_CFG_RX_CHAIN_CMD";
+	case WMI_TEMP_SENSE_CMDID:
+		return "WMI_TEMP_SENSE_CMD";
+	case WMI_DEL_STA_CMDID:
+		return "WMI_DEL_STA_CMD";
+	case WMI_DISCONNECT_STA_CMDID:
+		return "WMI_DISCONNECT_STA_CMD";
+	case WMI_VRING_BA_EN_CMDID:
+		return "WMI_VRING_BA_EN_CMD";
+	case WMI_VRING_BA_DIS_CMDID:
+		return "WMI_VRING_BA_DIS_CMD";
+	case WMI_RCP_DELBA_CMDID:
+		return "WMI_RCP_DELBA_CMD";
+	case WMI_RCP_ADDBA_RESP_CMDID:
+		return "WMI_RCP_ADDBA_RESP_CMD";
+	case WMI_PS_DEV_PROFILE_CFG_CMDID:
+		return "WMI_PS_DEV_PROFILE_CFG_CMD";
+	case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+		return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+	case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+		return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+	case WMI_ABORT_SCAN_CMDID:
+		return "WMI_ABORT_SCAN_CMD";
+	case WMI_NEW_STA_CMDID:
+		return "WMI_NEW_STA_CMD";
+	case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+		return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+	case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+		return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+	case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+		return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+	case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+		return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+	case WMI_START_SCHED_SCAN_CMDID:
+		return "WMI_START_SCHED_SCAN_CMD";
+	case WMI_STOP_SCHED_SCAN_CMDID:
+		return "WMI_STOP_SCHED_SCAN_CMD";
+	default:
+		return "Untracked CMD";
+	}
+}
+
+static const char *eventid2name(u16 eventid)
+{
+	switch (eventid) {
+	case WMI_NOTIFY_REQ_DONE_EVENTID:
+		return "WMI_NOTIFY_REQ_DONE_EVENT";
+	case WMI_DISCONNECT_EVENTID:
+		return "WMI_DISCONNECT_EVENT";
+	case WMI_SW_TX_COMPLETE_EVENTID:
+		return "WMI_SW_TX_COMPLETE_EVENT";
+	case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+		return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+	case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+		return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+	case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+		return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+	case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+		return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+	case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+		return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+	case WMI_FW_READY_EVENTID:
+		return "WMI_FW_READY_EVENT";
+	case WMI_TRAFFIC_RESUME_EVENTID:
+		return "WMI_TRAFFIC_RESUME_EVENT";
+	case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+		return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+	case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+		return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+	case WMI_VRING_CFG_DONE_EVENTID:
+		return "WMI_VRING_CFG_DONE_EVENT";
+	case WMI_READY_EVENTID:
+		return "WMI_READY_EVENT";
+	case WMI_RX_MGMT_PACKET_EVENTID:
+		return "WMI_RX_MGMT_PACKET_EVENT";
+	case WMI_TX_MGMT_PACKET_EVENTID:
+		return "WMI_TX_MGMT_PACKET_EVENT";
+	case WMI_SCAN_COMPLETE_EVENTID:
+		return "WMI_SCAN_COMPLETE_EVENT";
+	case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+		return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+	case WMI_CONNECT_EVENTID:
+		return "WMI_CONNECT_EVENT";
+	case WMI_EAPOL_RX_EVENTID:
+		return "WMI_EAPOL_RX_EVENT";
+	case WMI_BA_STATUS_EVENTID:
+		return "WMI_BA_STATUS_EVENT";
+	case WMI_RCP_ADDBA_REQ_EVENTID:
+		return "WMI_RCP_ADDBA_REQ_EVENT";
+	case WMI_DELBA_EVENTID:
+		return "WMI_DELBA_EVENT";
+	case WMI_VRING_EN_EVENTID:
+		return "WMI_VRING_EN_EVENT";
+	case WMI_DATA_PORT_OPEN_EVENTID:
+		return "WMI_DATA_PORT_OPEN_EVENT";
+	case WMI_AOA_MEAS_EVENTID:
+		return "WMI_AOA_MEAS_EVENT";
+	case WMI_TOF_SESSION_END_EVENTID:
+		return "WMI_TOF_SESSION_END_EVENT";
+	case WMI_TOF_GET_CAPABILITIES_EVENTID:
+		return "WMI_TOF_GET_CAPABILITIES_EVENT";
+	case WMI_TOF_SET_LCR_EVENTID:
+		return "WMI_TOF_SET_LCR_EVENT";
+	case WMI_TOF_SET_LCI_EVENTID:
+		return "WMI_TOF_SET_LCI_EVENT";
+	case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+		return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+	case WMI_TOF_CHANNEL_INFO_EVENTID:
+		return "WMI_TOF_CHANNEL_INFO_EVENT";
+	case WMI_TRAFFIC_SUSPEND_EVENTID:
+		return "WMI_TRAFFIC_SUSPEND_EVENT";
+	case WMI_ECHO_RSP_EVENTID:
+		return "WMI_ECHO_RSP_EVENT";
+	case WMI_LED_CFG_DONE_EVENTID:
+		return "WMI_LED_CFG_DONE_EVENT";
+	case WMI_PCP_STARTED_EVENTID:
+		return "WMI_PCP_STARTED_EVENT";
+	case WMI_PCP_STOPPED_EVENTID:
+		return "WMI_PCP_STOPPED_EVENT";
+	case WMI_GET_SSID_EVENTID:
+		return "WMI_GET_SSID_EVENT";
+	case WMI_GET_PCP_CHANNEL_EVENTID:
+		return "WMI_GET_PCP_CHANNEL_EVENT";
+	case WMI_P2P_CFG_DONE_EVENTID:
+		return "WMI_P2P_CFG_DONE_EVENT";
+	case WMI_LISTEN_STARTED_EVENTID:
+		return "WMI_LISTEN_STARTED_EVENT";
+	case WMI_SEARCH_STARTED_EVENTID:
+		return "WMI_SEARCH_STARTED_EVENT";
+	case WMI_DISCOVERY_STOPPED_EVENTID:
+		return "WMI_DISCOVERY_STOPPED_EVENT";
+	case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+		return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+	case WMI_TEMP_SENSE_DONE_EVENTID:
+		return "WMI_TEMP_SENSE_DONE_EVENT";
+	case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+		return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+	case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+		return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+	case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+		return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+	case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+		return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+	case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+		return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+	case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+		return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+	case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+		return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+	case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+		return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+	case WMI_START_SCHED_SCAN_EVENTID:
+		return "WMI_START_SCHED_SCAN_EVENT";
+	case WMI_STOP_SCHED_SCAN_EVENTID:
+		return "WMI_STOP_SCHED_SCAN_EVENT";
+	case WMI_SCHED_SCAN_RESULT_EVENTID:
+		return "WMI_SCHED_SCAN_RESULT_EVENT";
+	default:
+		return "Untracked EVENT";
+	}
+}
+
 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 {
 	struct {
@@ -222,7 +540,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 	uint retry;
 	int rc = 0;
 
-	if (sizeof(cmd) + len > r->entry_size) {
+	if (len > r->entry_size - sizeof(cmd)) {
 		wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
 			(int)(sizeof(cmd) + len), r->entry_size);
 		return -ERANGE;
@@ -294,7 +612,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 	}
 	cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
 	/* set command */
-	wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+	wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+		    cmdid2name(cmdid), cmdid, len);
 	wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
 			 sizeof(cmd), true);
 	wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -566,8 +885,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 		}
 	}
 
-	/* FIXME FW can transmit only ucast frames to peer */
-	/* FIXME real ring_id instead of hard coded 0 */
 	ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
 	wil->sta[evt->cid].status = wil_sta_conn_pending;
 
@@ -830,6 +1147,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	spin_unlock_bh(&sta->tid_rx_lock);
 }
 
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+	struct wmi_sched_scan_result_event *data = d;
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+	struct ieee80211_mgmt *rx_mgmt_frame =
+		(struct ieee80211_mgmt *)data->payload;
+	int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+	int ch_no;
+	u32 freq;
+	struct ieee80211_channel *channel;
+	s32 signal;
+	__le16 fc;
+	u32 d_len;
+	struct cfg80211_bss *bss;
+
+	if (flen < 0) {
+		wil_err(wil, "sched scan result event too short, len %d\n",
+			len);
+		return;
+	}
+
+	d_len = le32_to_cpu(data->info.len);
+	if (d_len != flen) {
+		wil_err(wil,
+			"sched scan result length mismatch, d_len %d should be %d\n",
+			d_len, flen);
+		return;
+	}
+
+	fc = rx_mgmt_frame->frame_control;
+	if (!ieee80211_is_probe_resp(fc)) {
+		wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+			fc);
+		return;
+	}
+
+	ch_no = data->info.channel + 1;
+	freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+		signal = 100 * data->info.rssi;
+	else
+		signal = data->info.sqi;
+
+	wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+		    data->info.channel, data->info.mcs, data->info.rssi);
+	wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+		    d_len, data->info.qid, data->info.mid, data->info.cid);
+	wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+			 d_len, true);
+
+	if (!channel) {
+		wil_err(wil, "Frame on unsupported channel\n");
+		return;
+	}
+
+	bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+					d_len, signal, GFP_KERNEL);
+	if (bss) {
+		wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+		cfg80211_put_bss(wiphy, bss);
+	} else {
+		wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+	}
+
+	cfg80211_sched_scan_results(wiphy, 0);
+}
+
 /**
  * Some events are ignored for purpose; and need not be interpreted as
  * "unhandled events"
@@ -857,6 +1243,7 @@ static const struct {
 	{WMI_DELBA_EVENTID,		wmi_evt_delba},
 	{WMI_VRING_EN_EVENTID,		wmi_evt_vring_en},
 	{WMI_DATA_PORT_OPEN_EVENTID,		wmi_evt_ignore},
+	{WMI_SCHED_SCAN_RESULT_EVENTID,		wmi_evt_sched_scan_result},
 };
 
 /*
@@ -963,8 +1350,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
 			}
 			spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 
-			wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
-				    id, wmi->mid, tstamp);
+			wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+				    eventid2name(id), id, wmi->mid, tstamp);
 			trace_wil6210_wmi_event(wmi, &wmi[1],
 						len - sizeof(*wmi));
 		}
@@ -1380,8 +1767,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
 	};
 	int rc;
 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
-	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+	struct wmi_set_appie_cmd *cmd;
+
+	if (len < ie_len) {
+		rc = -EINVAL;
+		goto out;
+	}
 
+	cmd = kzalloc(len, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
 		goto out;
@@ -1461,7 +1854,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
 	int rc;
 
 	if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
-		struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+		struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
 		cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
 		if (ch)
@@ -1801,6 +2194,16 @@ void wmi_event_flush(struct wil6210_priv *wil)
 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 }
 
+static const char *suspend_status2name(u8 status)
+{
+	switch (status) {
+	case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+		return "LINK_NOT_IDLE";
+	default:
+		return "Untracked status";
+	}
+}
+
 int wmi_suspend(struct wil6210_priv *wil)
 {
 	int rc;
@@ -1816,7 +2219,7 @@ int wmi_suspend(struct wil6210_priv *wil)
 	wil->suspend_resp_rcvd = false;
 	wil->suspend_resp_comp = false;
 
-	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
 
 	rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
 		      WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -1848,8 +2251,9 @@ int wmi_suspend(struct wil6210_priv *wil)
 	}
 
 	wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
-	if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
-		wil_dbg_pm(wil, "device rejected the suspend\n");
+	if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+		wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+			   suspend_status2name(reply.evt.status));
 		wil->suspend_stats.rejected_by_device++;
 	}
 	rc = reply.evt.status;
@@ -1861,21 +2265,50 @@ out:
 	return rc;
 }
 
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+	string[0] = '\0';
+
+	if (!triggers) {
+		strlcat(string, " UNKNOWN", str_size);
+		return;
+	}
+
+	if (triggers & WMI_RESUME_TRIGGER_HOST)
+		strlcat(string, " HOST", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+		strlcat(string, " UCAST_RX", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+		strlcat(string, " BCAST_RX", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+		strlcat(string, " WMI_EVT", str_size);
+}
+
 int wmi_resume(struct wil6210_priv *wil)
 {
 	int rc;
+	char string[100];
 	struct {
 		struct wmi_cmd_hdr wmi;
 		struct wmi_traffic_resume_event evt;
 	} __packed reply;
 
 	reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+	reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
 
 	rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
 		      WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
 		      WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
 	if (rc)
 		return rc;
+	resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+			       sizeof(string));
+	wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+		   reply.evt.status ? "failed" : "passed", string,
+		   le32_to_cpu(reply.evt.resume_triggers));
 
 	return reply.evt.status;
 }
@@ -1906,8 +2339,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
 		void *evt_data = (void *)(&wmi[1]);
 		u16 id = le16_to_cpu(wmi->command_id);
 
-		wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
-			    id, wil->reply_id);
+		wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+			    eventid2name(id), id, wil->reply_id);
 		/* check if someone waits for this event */
 		if (wil->reply_id && wil->reply_id == id) {
 			WARN_ON(wil->reply_buf);
@@ -2002,3 +2435,159 @@ out:
 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 	return rc;
 }
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+			 struct wmi_start_sched_scan_cmd *cmd,
+			 struct cfg80211_ssid *ssids, int n_ssids,
+			 struct cfg80211_match_set *match_sets,
+			 int n_match_sets)
+{
+	int i;
+
+	if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+		wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+			    n_match_sets, WMI_MAX_PNO_SSID_NUM);
+		n_match_sets = WMI_MAX_PNO_SSID_NUM;
+	}
+	cmd->num_of_ssids = n_match_sets;
+
+	for (i = 0; i < n_match_sets; i++) {
+		struct wmi_sched_scan_ssid_match *wmi_match =
+			&cmd->ssid_for_match[i];
+		struct cfg80211_match_set *cfg_match = &match_sets[i];
+		int j;
+
+		wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+		memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+		       min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+		wmi_match->rssi_threshold = S8_MIN;
+		if (cfg_match->rssi_thold >= S8_MIN &&
+		    cfg_match->rssi_thold <= S8_MAX)
+			wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+		for (j = 0; j < n_ssids; j++)
+			if (wmi_match->ssid_len == ssids[j].ssid_len &&
+			    memcmp(wmi_match->ssid, ssids[j].ssid,
+				   wmi_match->ssid_len) == 0)
+				wmi_match->add_ssid_to_probe = true;
+	}
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+			    struct wmi_start_sched_scan_cmd *cmd,
+			    u32 n_channels,
+			    struct ieee80211_channel **channels)
+{
+	int i;
+
+	if (n_channels > WMI_MAX_CHANNEL_NUM) {
+		wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+			    n_channels, WMI_MAX_CHANNEL_NUM);
+		n_channels = WMI_MAX_CHANNEL_NUM;
+	}
+	cmd->num_of_channels = n_channels;
+
+	for (i = 0; i < n_channels; i++) {
+		struct ieee80211_channel *cfg_chan = channels[i];
+
+		cmd->channel_list[i] = cfg_chan->hw_value - 1;
+	}
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+			 struct wmi_start_sched_scan_cmd *cmd,
+			 struct cfg80211_sched_scan_plan *scan_plans,
+			 int n_scan_plans)
+{
+	int i;
+
+	if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+		wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+			    n_scan_plans, WMI_MAX_PLANS_NUM);
+		n_scan_plans = WMI_MAX_PLANS_NUM;
+	}
+
+	for (i = 0; i < n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+		cmd->scan_plans[i].interval_sec =
+			cpu_to_le16(cfg_plan->interval);
+		cmd->scan_plans[i].num_of_iterations =
+			cpu_to_le16(cfg_plan->iterations);
+	}
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+			 struct cfg80211_sched_scan_request *request)
+{
+	int rc;
+	struct wmi_start_sched_scan_cmd cmd = {
+		.min_rssi_threshold = S8_MIN,
+		.initial_delay_sec = cpu_to_le16(request->delay),
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_start_sched_scan_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+		return -ENOTSUPP;
+
+	if (request->min_rssi_thold >= S8_MIN &&
+	    request->min_rssi_thold <= S8_MAX)
+		cmd.min_rssi_threshold = request->min_rssi_thold;
+
+	wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+				 request->match_sets, request->n_match_sets);
+	wmi_sched_scan_set_channels(wil, &cmd,
+				    request->n_channels, request->channels);
+	wmi_sched_scan_set_plans(wil, &cmd,
+				 request->scan_plans, request->n_scan_plans);
+
+	reply.evt.result = WMI_PNO_REJECT;
+
+	rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+		      WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.result != WMI_PNO_SUCCESS) {
+		wil_err(wil, "start sched scan failed, result %d\n",
+			reply.evt.result);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_stop_sched_scan_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+		return -ENOTSUPP;
+
+	reply.evt.result = WMI_PNO_REJECT;
+
+	rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+		      WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.result != WMI_PNO_SUCCESS) {
+		wil_err(wil, "stop sched scan failed, result %d\n",
+			reply.evt.result);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee717a4f..d3e75f0ff245 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,8 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_RSSI_REPORTING		= 12,
 	WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE		= 13,
 	WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP	= 14,
+	WMI_FW_CAPABILITY_PNO				= 15,
+	WMI_FW_CAPABILITY_REF_CLOCK_CONTROL		= 18,
 	WMI_FW_CAPABILITY_MAX,
 };
 
@@ -87,6 +89,8 @@ enum wmi_command_id {
 	WMI_CONNECT_CMDID				= 0x01,
 	WMI_DISCONNECT_CMDID				= 0x03,
 	WMI_DISCONNECT_STA_CMDID			= 0x04,
+	WMI_START_SCHED_SCAN_CMDID			= 0x05,
+	WMI_STOP_SCHED_SCAN_CMDID			= 0x06,
 	WMI_START_SCAN_CMDID				= 0x07,
 	WMI_SET_BSS_FILTER_CMDID			= 0x09,
 	WMI_SET_PROBED_SSID_CMDID			= 0x0A,
@@ -385,6 +389,38 @@ struct wmi_start_scan_cmd {
 	} channel_list[0];
 } __packed;
 
+#define WMI_MAX_PNO_SSID_NUM	(16)
+#define WMI_MAX_CHANNEL_NUM	(6)
+#define WMI_MAX_PLANS_NUM	(2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+	u8 ssid_len;
+	u8 ssid[WMI_MAX_SSID_LEN];
+	s8 rssi_threshold;
+	/* boolean */
+	u8 add_ssid_to_probe;
+	u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+	__le16 interval_sec;
+	__le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+	struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+	u8 num_of_ssids;
+	s8 min_rssi_threshold;
+	u8 channel_list[WMI_MAX_CHANNEL_NUM];
+	u8 num_of_channels;
+	u8 reserved;
+	__le16 initial_delay_sec;
+	struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
 /* WMI_SET_PROBED_SSID_CMDID */
 #define MAX_PROBED_SSID_INDEX	(3)
 
@@ -1238,6 +1274,9 @@ enum wmi_event_id {
 	WMI_READY_EVENTID				= 0x1001,
 	WMI_CONNECT_EVENTID				= 0x1002,
 	WMI_DISCONNECT_EVENTID				= 0x1003,
+	WMI_START_SCHED_SCAN_EVENTID			= 0x1005,
+	WMI_STOP_SCHED_SCAN_EVENTID			= 0x1006,
+	WMI_SCHED_SCAN_RESULT_EVENTID			= 0x1007,
 	WMI_SCAN_COMPLETE_EVENTID			= 0x100A,
 	WMI_REPORT_STATISTICS_EVENTID			= 0x100B,
 	WMI_RD_MEM_RSP_EVENTID				= 0x1800,
@@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event {
 	__le32 status;
 } __packed;
 
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+	u8 mcs;
+	s8 rssi;
+	u8 range;
+	u8 sqi;
+	__le16 stype;
+	__le16 status;
+	__le32 len;
+	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+	u8 qid;
+	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+	u8 mid;
+	u8 cid;
+	/* From Radio MNGR */
+	u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+	WMI_PNO_SUCCESS			= 0x00,
+	WMI_PNO_REJECT			= 0x01,
+	WMI_PNO_INVALID_PARAMETERS	= 0x02,
+	WMI_PNO_NOT_ENABLED		= 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+	/* pno_result */
+	u8 result;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+	/* pno_result */
+	u8 result;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+	struct wmi_rx_mgmt_info info;
+	u8 payload[0];
+} __packed;
+
 /* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
 enum wmi_acs_info_bitmask {
 	WMI_ACS_INFO_BITMASK_BEACON_FOUND	= 0x01,
@@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event {
 	u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
-	u8 mcs;
-	s8 rssi;
-	u8 range;
-	u8 sqi;
-	__le16 stype;
-	__le16 status;
-	__le32 len;
-	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-	u8 qid;
-	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-	u8 mid;
-	u8 cid;
-	/* From Radio MNGR */
-	u8 channel;
-} __packed;
-
 /* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
 struct wmi_rf_xpm_read_result_event {
 	/* enum wmi_fw_status_e - success=0 or fail=1 */
@@ -2267,8 +2331,8 @@ struct wmi_link_maintain_cfg_read_done_event {
 } __packed;
 
 enum wmi_traffic_suspend_status {
-	WMI_TRAFFIC_SUSPEND_APPROVED	= 0x0,
-	WMI_TRAFFIC_SUSPEND_REJECTED	= 0x1,
+	WMI_TRAFFIC_SUSPEND_APPROVED			= 0x0,
+	WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE	= 0x1,
 };
 
 /* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2282,10 +2346,21 @@ enum wmi_traffic_resume_status {
 	WMI_TRAFFIC_RESUME_FAILED	= 0x1,
 };
 
+enum wmi_resume_trigger {
+	WMI_RESUME_TRIGGER_UNKNOWN	= 0x0,
+	WMI_RESUME_TRIGGER_HOST		= 0x1,
+	WMI_RESUME_TRIGGER_UCAST_RX	= 0x2,
+	WMI_RESUME_TRIGGER_BCAST_RX	= 0x4,
+	WMI_RESUME_TRIGGER_WMI_EVT	= 0x8,
+};
+
 /* WMI_TRAFFIC_RESUME_EVENTID */
 struct wmi_traffic_resume_event {
-	/* enum wmi_traffic_resume_status_e */
+	/* enum wmi_traffic_resume_status */
 	u8 status;
+	u8 reserved[3];
+	/* enum wmi_resume_trigger bitmap */
+	__le32 resume_triggers;
 } __packed;
 
 /* Power Save command completion status codes */
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index a5557d70689f..f2a2f41e3c96 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -1031,7 +1031,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
 
 	b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
 	b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
-	mdelay(2);
+	usleep_range(2000, 3000);
 	b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
 	b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 9f2d0b0cf6e5..2d3a5dd07a3f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -165,7 +165,7 @@ static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
 
 static int
 brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-			    void *buf, uint len)
+			    void *buf, uint len, int *fwerr)
 {
 	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
 	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
@@ -175,6 +175,7 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
 
 	brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
 
+	*fwerr = 0;
 	ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
 	if (ret < 0) {
 		brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
@@ -211,25 +212,27 @@ retry:
 		memcpy(buf, info, len);
 	}
 
+	ret = 0;
+
 	/* Check the ERROR flag */
 	if (flags & BCDC_DCMD_ERROR)
-		ret = le32_to_cpu(msg->status);
-
+		*fwerr = le32_to_cpu(msg->status);
 done:
 	return ret;
 }
 
 static int
 brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-			  void *buf, uint len)
+			  void *buf, uint len, int *fwerr)
 {
 	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
 	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
-	int ret = 0;
+	int ret;
 	u32 flags, id;
 
 	brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
 
+	*fwerr = 0;
 	ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
 	if (ret < 0)
 		goto done;
@@ -249,9 +252,11 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
 		goto done;
 	}
 
+	ret = 0;
+
 	/* Check the ERROR flag */
 	if (flags & BCDC_DCMD_ERROR)
-		ret = le32_to_cpu(msg->status);
+		*fwerr = le32_to_cpu(msg->status);
 
 done:
 	return ret;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index cd587325e286..0b68240ec7b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -118,7 +118,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
 
 		ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
 				  pdata->oob_irq_flags, "brcmf_oob_intr",
-				  &sdiodev->func[1]->dev);
+				  &sdiodev->func1->dev);
 		if (ret != 0) {
 			brcmf_err("request_irq failed %d\n", ret);
 			return ret;
@@ -132,39 +132,40 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
 		}
 		sdiodev->irq_wake = true;
 
-		sdio_claim_host(sdiodev->func[1]);
+		sdio_claim_host(sdiodev->func1);
 
 		if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
 			/* assign GPIO to SDIO core */
 			addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
-			gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+			gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
 			gpiocontrol |= 0x2;
-			brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+			brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
 
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
-					  &ret);
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
+					   0xf, &ret);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
 		}
 
 		/* must configure SDIO_CCCR_IENx to enable irq */
-		data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
-		data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+		data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
+		data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
+			SDIO_CCCR_IEN_FUNC0;
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
 		/* redirect, configure and enable io for interrupt signal */
-		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+		data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
 		if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
-			data |= SDIO_SEPINT_ACT_HI;
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
-
-		sdio_release_host(sdiodev->func[1]);
+			data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
+				     data, &ret);
+		sdio_release_host(sdiodev->func1);
 	} else {
 		brcmf_dbg(SDIO, "Entering\n");
-		sdio_claim_host(sdiodev->func[1]);
-		sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
-		sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
-		sdio_release_host(sdiodev->func[1]);
+		sdio_claim_host(sdiodev->func1);
+		sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
+		sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
+		sdio_release_host(sdiodev->func1);
 		sdiodev->sd_irq_requested = true;
 	}
 
@@ -182,26 +183,26 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 		struct brcmfmac_sdio_pd *pdata;
 
 		pdata = &sdiodev->settings->bus.sdio;
-		sdio_claim_host(sdiodev->func[1]);
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
-		sdio_release_host(sdiodev->func[1]);
+		sdio_claim_host(sdiodev->func1);
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+		sdio_release_host(sdiodev->func1);
 
 		sdiodev->oob_irq_requested = false;
 		if (sdiodev->irq_wake) {
 			disable_irq_wake(pdata->oob_irq_nr);
 			sdiodev->irq_wake = false;
 		}
-		free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
+		free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
 		sdiodev->irq_en = false;
 		sdiodev->oob_irq_requested = false;
 	}
 
 	if (sdiodev->sd_irq_requested) {
-		sdio_claim_host(sdiodev->func[1]);
-		sdio_release_irq(sdiodev->func[2]);
-		sdio_release_irq(sdiodev->func[1]);
-		sdio_release_host(sdiodev->func[1]);
+		sdio_claim_host(sdiodev->func1);
+		sdio_release_irq(sdiodev->func2);
+		sdio_release_irq(sdiodev->func1);
+		sdio_release_host(sdiodev->func1);
 		sdiodev->sd_irq_requested = false;
 	}
 }
@@ -230,271 +231,121 @@ void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
 	sdiodev->state = state;
 }
 
-static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
-					uint regaddr, u8 byte)
-{
-	int err_ret;
-
-	/*
-	 * Can only directly write to some F0 registers.
-	 * Handle CCCR_IENx and CCCR_ABORT command
-	 * as a special case.
-	 */
-	if ((regaddr == SDIO_CCCR_ABORT) ||
-	    (regaddr == SDIO_CCCR_IENx))
-		sdio_writeb(func, byte, regaddr, &err_ret);
-	else
-		sdio_f0_writeb(func, byte, regaddr, &err_ret);
-
-	return err_ret;
-}
-
-static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
-				    u32 addr, u8 regsz, void *data, bool write)
-{
-	struct sdio_func *func;
-	int ret = -EINVAL;
-
-	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
-		  write, fn, addr, regsz);
-
-	/* only allow byte access on F0 */
-	if (WARN_ON(regsz > 1 && !fn))
-		return -EINVAL;
-	func = sdiodev->func[fn];
-
-	switch (regsz) {
-	case sizeof(u8):
-		if (write) {
-			if (fn)
-				sdio_writeb(func, *(u8 *)data, addr, &ret);
-			else
-				ret = brcmf_sdiod_f0_writeb(func, addr,
-							    *(u8 *)data);
-		} else {
-			if (fn)
-				*(u8 *)data = sdio_readb(func, addr, &ret);
-			else
-				*(u8 *)data = sdio_f0_readb(func, addr, &ret);
-		}
-		break;
-	case sizeof(u16):
-		if (write)
-			sdio_writew(func, *(u16 *)data, addr, &ret);
-		else
-			*(u16 *)data = sdio_readw(func, addr, &ret);
-		break;
-	case sizeof(u32):
-		if (write)
-			sdio_writel(func, *(u32 *)data, addr, &ret);
-		else
-			*(u32 *)data = sdio_readl(func, addr, &ret);
-		break;
-	default:
-		brcmf_err("invalid size: %d\n", regsz);
-		break;
-	}
-
-	if (ret)
-		brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-			  write ? "write" : "read", fn, addr, ret);
-
-	return ret;
-}
-
-static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-				   u8 regsz, void *data, bool write)
-{
-	u8 func;
-	s32 retry = 0;
-	int ret;
-
-	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
-		return -ENOMEDIUM;
-
-	/*
-	 * figure out how to read the register based on address range
-	 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
-	 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
-	 * The rest: function 1 silicon backplane core registers
-	 */
-	if ((addr & ~REG_F0_REG_MASK) == 0)
-		func = SDIO_FUNC_0;
-	else
-		func = SDIO_FUNC_1;
-
-	do {
-		if (!write)
-			memset(data, 0, regsz);
-		/* for retry wait for 1 ms till bus get settled down */
-		if (retry)
-			usleep_range(1000, 2000);
-		ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
-					       data, write);
-	} while (ret != 0 && ret != -ENOMEDIUM &&
-		 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
-	if (ret == -ENOMEDIUM)
-		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
-	else if (ret != 0) {
-		/*
-		 * SleepCSR register access can fail when
-		 * waking up the device so reduce this noise
-		 * in the logs.
-		 */
-		if (addr != SBSDIO_FUNC1_SLEEPCSR)
-			brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
-				  write ? "write" : "read", func, addr, ret);
-		else
-			brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-				  write ? "write" : "read", func, addr, ret);
-	}
-	return ret;
-}
-
-static int
-brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
+					    u32 addr)
 {
+	u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
 	int err = 0, i;
-	u8 addr[3];
-
-	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
-		return -ENOMEDIUM;
-
-	addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
-	addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
-	addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
-
-	for (i = 0; i < 3; i++) {
-		err = brcmf_sdiod_regrw_helper(sdiodev,
-					       SBSDIO_FUNC1_SBADDRLOW + i,
-					       sizeof(u8), &addr[i], true);
-		if (err) {
-			brcmf_err("failed at addr: 0x%0x\n",
-				  SBSDIO_FUNC1_SBADDRLOW + i);
-			break;
-		}
-	}
 
-	return err;
-}
+	if (bar0 == sdiodev->sbwad)
+		return 0;
 
-static int
-brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
-{
-	uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
-	int err = 0;
+	v = bar0 >> 8;
 
-	if (bar0 != sdiodev->sbwad) {
-		err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
-		if (err)
-			return err;
+	for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
+				   v & 0xff, &err);
 
+	if (!err)
 		sdiodev->sbwad = bar0;
-	}
-
-	*addr &= SBSDIO_SB_OFT_ADDR_MASK;
 
-	if (width == 4)
-		*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
-	return 0;
+	return err;
 }
 
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
 {
-	u8 data;
+	u32 data = 0;
 	int retval;
 
-	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  false);
-	brcmf_dbg(SDIO, "data:0x%02x\n", data);
+	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+	if (retval)
+		goto out;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	data = sdio_readl(sdiodev->func1, addr, &retval);
 
+out:
 	if (ret)
 		*ret = retval;
 
 	return data;
 }
 
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
+			u32 data, int *ret)
 {
-	u32 data = 0;
 	int retval;
 
-	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (retval)
-		goto done;
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  false);
-	brcmf_dbg(SDIO, "data:0x%08x\n", data);
-
-done:
-	if (ret)
-		*ret = retval;
+		goto out;
 
-	return data;
-}
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-		      u8 data, int *ret)
-{
-	int retval;
+	sdio_writel(sdiodev->func1, data, addr, &retval);
 
-	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  true);
+out:
 	if (ret)
 		*ret = retval;
 }
 
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-		      u32 data, int *ret)
+static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
+				   struct sdio_func *func, u32 addr,
+				   struct sk_buff *skb)
 {
-	int retval;
+	unsigned int req_sz;
+	int err;
 
-	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
-	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
-	if (retval)
-		goto done;
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  true);
+	/* Single skb use the standard mmc interface */
+	req_sz = skb->len + 3;
+	req_sz &= (uint)~3;
 
-done:
-	if (ret)
-		*ret = retval;
+	switch (func->num) {
+	case 1:
+		err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
+					 req_sz);
+		break;
+	case 2:
+		err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
+		break;
+	default:
+		/* bail out as things are really fishy here */
+		WARN(1, "invalid sdio function number: %d\n", func->num);
+		err = -ENOMEDIUM;
+	};
+
+	if (err == -ENOMEDIUM)
+		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
+	return err;
 }
 
-static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
-			     bool write, u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
+				    struct sdio_func *func, u32 addr,
+				    struct sk_buff *skb)
 {
 	unsigned int req_sz;
 	int err;
 
 	/* Single skb use the standard mmc interface */
-	req_sz = pkt->len + 3;
+	req_sz = skb->len + 3;
 	req_sz &= (uint)~3;
 
-	if (write)
-		err = sdio_memcpy_toio(sdiodev->func[fn], addr,
-				       ((u8 *)(pkt->data)), req_sz);
-	else if (fn == 1)
-		err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
-					 addr, req_sz);
-	else
-		/* function 2 read is FIFO operation */
-		err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
-				  req_sz);
+	err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
+
 	if (err == -ENOMEDIUM)
 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
 	return err;
 }
 
 /**
  * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  * @sdiodev: brcmfmac sdio device
- * @fn: SDIO function number
+ * @func: SDIO function
  * @write: direction flag
  * @addr: dongle memory address as source/destination
  * @pkt: skb pointer
@@ -503,7 +354,8 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  * stack for block data access. It assumes that the skb passed down by the
  * caller has already been padded and aligned.
  */
-static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
+				 struct sdio_func *func,
 				 bool write, u32 addr,
 				 struct sk_buff_head *pktlist)
 {
@@ -529,7 +381,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 		req_sz = 0;
 		skb_queue_walk(pktlist, pkt_next)
 			req_sz += pkt_next->len;
-		req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
+		req_sz = ALIGN(req_sz, func->cur_blksize);
 		while (req_sz > PAGE_SIZE) {
 			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
 			if (pkt_next == NULL) {
@@ -548,7 +400,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 		target_list = &local_list;
 	}
 
-	func_blk_sz = sdiodev->func[fn]->cur_blksize;
+	func_blk_sz = func->cur_blksize;
 	max_req_sz = sdiodev->max_request_size;
 	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
 			    target_list->qlen);
@@ -565,10 +417,10 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
 	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
 	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
-	mmc_cmd.arg |= (fn & 0x7) << 28;	/* SDIO func num */
-	mmc_cmd.arg |= 1<<27;			/* block mode */
+	mmc_cmd.arg |= (func->num & 0x7) << 28;	/* SDIO func num */
+	mmc_cmd.arg |= 1 << 27;			/* block mode */
 	/* for function 1 the addr will be incremented */
-	mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+	mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
 	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
 	mmc_req.cmd = &mmc_cmd;
 	mmc_req.data = &mmc_dat;
@@ -614,11 +466,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;	/* address */
 		mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;	/* block count */
 		/* incrementing addr for function 1 */
-		if (fn == 1)
+		if (func->num == 1)
 			addr += req_sz;
 
-		mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
-		mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
+		mmc_set_data_timeout(&mmc_dat, func->card);
+		mmc_wait_for_req(func->card->host, &mmc_req);
 
 		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
 		if (ret == -ENOMEDIUM) {
@@ -686,16 +538,19 @@ int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 
 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
 {
-	u32 addr = sdiodev->sbwad;
+	u32 addr = sdiodev->cc_core->base;
 	int err = 0;
 
 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (err)
 		goto done;
 
-	err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
 
 done:
 	return err;
@@ -706,25 +561,28 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
 {
 	struct sk_buff *glom_skb = NULL;
 	struct sk_buff *skb;
-	u32 addr = sdiodev->sbwad;
+	u32 addr = sdiodev->cc_core->base;
 	int err = 0;
 
 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
 		  addr, pktq->qlen);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (err)
 		goto done;
 
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
 	if (pktq->qlen == 1)
-		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
-					 pktq->next);
+		err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
+					      pktq->next);
 	else if (!sdiodev->sg_support) {
 		glom_skb = brcmu_pkt_buf_get_skb(totlen);
 		if (!glom_skb)
 			return -ENOMEM;
-		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
-					 glom_skb);
+		err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
+					      glom_skb);
 		if (err)
 			goto done;
 
@@ -733,8 +591,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
 			skb_pull(glom_skb, skb->len);
 		}
 	} else
-		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
-					    pktq);
+		err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
+					    addr, pktq);
 
 done:
 	brcmu_pkt_buf_free_skb(glom_skb);
@@ -744,10 +602,11 @@ done:
 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 {
 	struct sk_buff *mypkt;
-	u32 addr = sdiodev->sbwad;
+	u32 addr = sdiodev->cc_core->base;
 	int err;
 
 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+
 	if (!mypkt) {
 		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
 			  nbytes);
@@ -756,40 +615,49 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 
 	memcpy(mypkt->data, buf, nbytes);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+	if (err)
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
 	if (!err)
-		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
-					 mypkt);
+		err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
+					       mypkt);
 
 	brcmu_pkt_buf_free_skb(mypkt);
-	return err;
 
+	return err;
 }
 
 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
 			 struct sk_buff_head *pktq)
 {
 	struct sk_buff *skb;
-	u32 addr = sdiodev->sbwad;
+	u32 addr = sdiodev->cc_core->base;
 	int err;
 
 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (err)
 		return err;
 
-	if (pktq->qlen == 1 || !sdiodev->sg_support)
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	if (pktq->qlen == 1 || !sdiodev->sg_support) {
 		skb_queue_walk(pktq, skb) {
-			err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
-						 addr, skb);
+			err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
+						       addr, skb);
 			if (err)
 				break;
 		}
-	else
-		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
-					    pktq);
+	} else {
+		err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
+					    addr, pktq);
+	}
 
 	return err;
 }
@@ -798,7 +666,7 @@ int
 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		  u8 *data, uint size)
 {
-	int bcmerror = 0;
+	int err = 0;
 	struct sk_buff *pkt;
 	u32 sdaddr;
 	uint dsize;
@@ -818,13 +686,13 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 	else
 		dsize = size;
 
-	sdio_claim_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
 
 	/* Do the transfer(s) */
 	while (size) {
 		/* Set the backplane window to include the start address */
-		bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
-		if (bcmerror)
+		err = brcmf_sdiod_set_backplane_window(sdiodev, address);
+		if (err)
 			break;
 
 		brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
@@ -835,11 +703,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
 		skb_put(pkt, dsize);
-		if (write)
+
+		if (write) {
 			memcpy(pkt->data, data, dsize);
-		bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
-					      sdaddr, pkt);
-		if (bcmerror) {
+			err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
+						       sdaddr, pkt);
+		} else {
+			err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
+						      sdaddr, pkt);
+		}
+
+		if (err) {
 			brcmf_err("membytes transfer failed\n");
 			break;
 		}
@@ -859,24 +733,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
 	dev_kfree_skb(pkt);
 
-	/* Return the window to backplane enumeration space for core access */
-	if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
-		brcmf_err("FAILED to set window back to 0x%x\n",
-			  sdiodev->sbwad);
+	sdio_release_host(sdiodev->func1);
 
-	sdio_release_host(sdiodev->func[1]);
-
-	return bcmerror;
+	return err;
 }
 
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
 {
-	char t_func = (char)fn;
 	brcmf_dbg(SDIO, "Enter\n");
 
-	/* issue abort cmd52 command through F0 */
-	brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
-				 sizeof(t_func), &t_func, true);
+	/* Issue abort cmd52 command through F0 */
+	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
 
 	brcmf_dbg(SDIO, "Exit\n");
 	return 0;
@@ -890,7 +757,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
 	uint nents;
 	int err;
 
-	func = sdiodev->func[2];
+	func = sdiodev->func2;
 	host = func->card->host;
 	sdiodev->sg_support = host->max_segs > 1;
 	max_blocks = min_t(uint, host->max_blk_count, 511u);
@@ -951,17 +818,17 @@ static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
 	brcmf_sdio_trigger_dpc(sdiodev->bus);
 	wait_event(sdiodev->freezer->thread_freeze,
 		   atomic_read(expect) == sdiodev->freezer->frozen_count);
-	sdio_claim_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
 	res = brcmf_sdio_sleep(sdiodev->bus, true);
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 	return res;
 }
 
 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
 {
-	sdio_claim_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
 	brcmf_sdio_sleep(sdiodev->bus, false);
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 	atomic_set(&sdiodev->freezer->freezing, 0);
 	complete_all(&sdiodev->freezer->resumed);
 }
@@ -1011,19 +878,19 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 	brcmf_sdiod_freezer_detach(sdiodev);
 
 	/* Disable Function 2 */
-	sdio_claim_host(sdiodev->func[2]);
-	sdio_disable_func(sdiodev->func[2]);
-	sdio_release_host(sdiodev->func[2]);
+	sdio_claim_host(sdiodev->func2);
+	sdio_disable_func(sdiodev->func2);
+	sdio_release_host(sdiodev->func2);
 
 	/* Disable Function 1 */
-	sdio_claim_host(sdiodev->func[1]);
-	sdio_disable_func(sdiodev->func[1]);
-	sdio_release_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
+	sdio_disable_func(sdiodev->func1);
+	sdio_release_host(sdiodev->func1);
 
 	sg_free_table(&sdiodev->sgtable);
 	sdiodev->sbwad = 0;
 
-	pm_runtime_allow(sdiodev->func[1]->card->host->parent);
+	pm_runtime_allow(sdiodev->func1->card->host->parent);
 	return 0;
 }
 
@@ -1039,29 +906,27 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
 	int ret = 0;
 
-	sdiodev->num_funcs = 2;
+	sdio_claim_host(sdiodev->func1);
 
-	sdio_claim_host(sdiodev->func[1]);
-
-	ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+	ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
 	if (ret) {
 		brcmf_err("Failed to set F1 blocksize\n");
-		sdio_release_host(sdiodev->func[1]);
+		sdio_release_host(sdiodev->func1);
 		goto out;
 	}
-	ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+	ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
 	if (ret) {
 		brcmf_err("Failed to set F2 blocksize\n");
-		sdio_release_host(sdiodev->func[1]);
+		sdio_release_host(sdiodev->func1);
 		goto out;
 	}
 
 	/* increase F2 timeout */
-	sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+	sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
 
 	/* Enable Function 1 */
-	ret = sdio_enable_func(sdiodev->func[1]);
-	sdio_release_host(sdiodev->func[1]);
+	ret = sdio_enable_func(sdiodev->func1);
+	sdio_release_host(sdiodev->func1);
 	if (ret) {
 		brcmf_err("Failed to enable F1: err=%d\n", ret);
 		goto out;
@@ -1077,7 +942,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 		ret = -ENODEV;
 		goto out;
 	}
-	brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
+	brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
 out:
 	if (ret)
 		brcmf_sdiod_remove(sdiodev);
@@ -1138,6 +1003,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
 
 	dev = &func->dev;
+
+	/* Set MMC_QUIRK_LENIENT_FN0 for this card */
+	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
 	/* prohibit ACPI power management for this device */
 	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
 
@@ -1161,17 +1030,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 	/* store refs to functions used. mmc_card does
 	 * not hold the F0 function pointer.
 	 */
-	sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
-	sdiodev->func[0]->num = 0;
-	sdiodev->func[1] = func->card->sdio_func[0];
-	sdiodev->func[2] = func;
+	sdiodev->func1 = func->card->sdio_func[0];
+	sdiodev->func2 = func;
 
 	sdiodev->bus_if = bus_if;
 	bus_if->bus_priv.sdio = sdiodev;
 	bus_if->proto_type = BRCMF_PROTO_BCDC;
 	dev_set_drvdata(&func->dev, bus_if);
-	dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
-	sdiodev->dev = &sdiodev->func[1]->dev;
+	dev_set_drvdata(&sdiodev->func1->dev, bus_if);
+	sdiodev->dev = &sdiodev->func1->dev;
 
 	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
 
@@ -1187,8 +1054,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 
 fail:
 	dev_set_drvdata(&func->dev, NULL);
-	dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
-	kfree(sdiodev->func[0]);
+	dev_set_drvdata(&sdiodev->func1->dev, NULL);
 	kfree(sdiodev);
 	kfree(bus_if);
 	return err;
@@ -1217,11 +1083,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
 		/* only proceed with rest of cleanup if func 1 */
 		brcmf_sdiod_remove(sdiodev);
 
-		dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
-		dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+		dev_set_drvdata(&sdiodev->func1->dev, NULL);
+		dev_set_drvdata(&sdiodev->func2->dev, NULL);
 
 		kfree(bus_if);
-		kfree(sdiodev->func[0]);
 		kfree(sdiodev);
 	}
 
@@ -1247,7 +1112,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
 
 	func = container_of(dev, struct sdio_func, dev);
 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
-	if (func->num != SDIO_FUNC_1)
+	if (func->num != 1)
 		return 0;
 
 
@@ -1264,7 +1129,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
 		else
 			sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
 	}
-	if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+	if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
 		brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
 	return 0;
 }
@@ -1276,7 +1141,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
 	struct sdio_func *func = container_of(dev, struct sdio_func, dev);
 
 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
-	if (func->num != SDIO_FUNC_2)
+	if (func->num != 2)
 		return 0;
 
 	brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index c5d1a1cbf601..f7b30ce2300d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1338,6 +1338,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
 	switch (pub->chip) {
 	case BRCM_CC_4354_CHIP_ID:
 	case BRCM_CC_4356_CHIP_ID:
+	case BRCM_CC_4345_CHIP_ID:
 		/* explicitly check SR engine enable bit */
 		pmu_cc3_mask = BIT(2);
 		/* fall-through */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 53ae30259989..47de35a33853 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -130,13 +130,19 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
 	}
 }
 
+#define MAX_CAPS_BUFFER_SIZE	512
 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
 {
-	char caps[256];
+	char caps[MAX_CAPS_BUFFER_SIZE];
 	enum brcmf_feat_id id;
-	int i;
+	int i, err;
+
+	err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
+	if (err) {
+		brcmf_err("could not get firmware cap (%d)\n", err);
+		return;
+	}
 
-	brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
 	brcmf_dbg(INFO, "[ %s]\n", caps);
 
 	for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f6a2df94dba7..f2cfdd3b2bf1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -107,7 +107,7 @@ static s32
 brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
-	s32 err;
+	s32 err, fwerr;
 
 	if (drvr->bus_if->state != BRCMF_BUS_UP) {
 		brcmf_err("bus is down. we have nothing to do.\n");
@@ -117,16 +117,20 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
 	if (data != NULL)
 		len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
 	if (set)
-		err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
+		err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd,
+					   data, len, &fwerr);
 	else
-		err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
-
-	if (err >= 0)
-		return 0;
-
-	brcmf_dbg(FIL, "Failed: %s (%d)\n",
-		  brcmf_fil_get_errstr((u32)(-err)), err);
-
+		err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd,
+					     data, len, &fwerr);
+
+	if (err) {
+		brcmf_dbg(FIL, "Failed: %s (%d)\n",
+			  brcmf_fil_get_errstr((u32)(-err)), err);
+	} else if (fwerr < 0) {
+		brcmf_dbg(FIL, "Firmware error: %s (%d)\n",
+			  brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
+		err = -EBADE;
+	}
 	return err;
 }
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index d2c834c3b2fc..e212a791a072 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -477,7 +477,7 @@ static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
 
 
 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
-				   uint cmd, void *buf, uint len)
+				   uint cmd, void *buf, uint len, int *fwerr)
 {
 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 	struct sk_buff *skb = NULL;
@@ -485,6 +485,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 	int err;
 
 	brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
+	*fwerr = 0;
 	msgbuf->ctl_completed = false;
 	err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
 	if (err)
@@ -508,14 +509,15 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 	}
 	brcmu_pkt_buf_free_skb(skb);
 
-	return msgbuf->ioctl_resp_status;
+	*fwerr = msgbuf->ioctl_resp_status;
+	return 0;
 }
 
 
 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
-				 uint cmd, void *buf, uint len)
+				 uint cmd, void *buf, uint len, int *fwerr)
 {
-	return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
+	return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
 }
 
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3c87157f5b85..8752707557bf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1251,14 +1251,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
 	u64 address;
 	u32 addr;
 
-	devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
-		BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
-		&devinfo->shared.scratch_dmahandle, GFP_KERNEL);
+	devinfo->shared.scratch =
+		dma_zalloc_coherent(&devinfo->pdev->dev,
+					BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+					&devinfo->shared.scratch_dmahandle,
+					GFP_KERNEL);
 	if (!devinfo->shared.scratch)
 		goto fail;
 
-	memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
-
 	addr = devinfo->shared.tcm_base_address +
 	       BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
 	address = (u64)devinfo->shared.scratch_dmahandle;
@@ -1268,14 +1268,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
 	       BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
 	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
 
-	devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
-		BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
-		&devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
+	devinfo->shared.ringupd =
+		dma_zalloc_coherent(&devinfo->pdev->dev,
+					BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+					&devinfo->shared.ringupd_dmahandle,
+					GFP_KERNEL);
 	if (!devinfo->shared.ringupd)
 		goto fail;
 
-	memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
-
 	addr = devinfo->shared.tcm_base_address +
 	       BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
 	address = (u64)devinfo->shared.ringupd_dmahandle;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 2404f8a7c31c..8a8e08f09ea0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -30,9 +30,9 @@ struct brcmf_proto {
 	int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
 		       struct sk_buff *skb, struct brcmf_if **ifp);
 	int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
-			  void *buf, uint len);
+			  void *buf, uint len, int *fwerr);
 	int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
-			uint len);
+			uint len, int *fwerr);
 	int (*tx_queue_data)(struct brcmf_pub *drvr, int ifidx,
 			     struct sk_buff *skb);
 	int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
@@ -71,14 +71,16 @@ static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
 	return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
 }
 static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
-					 uint cmd, void *buf, uint len)
+					 uint cmd, void *buf, uint len,
+					 int *fwerr)
 {
-	return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+	return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len,fwerr);
 }
 static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
-				       uint cmd, void *buf, uint len)
+				       uint cmd, void *buf, uint len,
+				       int *fwerr)
 {
-	return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+	return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
 }
 
 static inline int brcmf_proto_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index cdf9e4161592..08686147b59d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -159,8 +159,8 @@ struct rte_console {
 /* manfid tuple length, include tuple, link bytes */
 #define SBSDIO_CIS_MANFID_TUPLE_LEN	6
 
-#define CORE_BUS_REG(base, field) \
-		(base + offsetof(struct sdpcmd_regs, field))
+#define SD_REG(field) \
+		(offsetof(struct sdpcmd_regs, field))
 
 /* SDIO function 1 register CHIPCLKCSR */
 /* Force ALP request to backplane */
@@ -436,6 +436,7 @@ struct brcmf_sdio_count {
 struct brcmf_sdio {
 	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
 	struct brcmf_chip *ci;	/* Chip info struct */
+	struct brcmf_core *sdio_core; /* sdio core info struct */
 
 	u32 hostintmask;	/* Copy of Host Interrupt Mask */
 	atomic_t intstatus;	/* Intstatus bits (events) pending */
@@ -659,32 +660,6 @@ static bool data_ok(struct brcmf_sdio *bus)
 	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
 }
 
-/*
- * Reads a register in the SDIO hardware block. This block occupies a series of
- * adresses on the 32 bit backplane bus.
- */
-static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
-{
-	struct brcmf_core *core;
-	int ret;
-
-	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-	*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
-
-	return ret;
-}
-
-static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
-{
-	struct brcmf_core *core;
-	int ret;
-
-	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-	brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
-
-	return ret;
-}
-
 static int
 brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 {
@@ -697,8 +672,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 
 	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
 	/* 1st KSO write goes to AOS wake up core if device is asleep  */
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-			  wr_val, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
 
 	if (on) {
 		/* device WAKEUP through KSO:
@@ -724,7 +698,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 		 * just one write attempt may fail,
 		 * read it back until it matches written value
 		 */
-		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+		rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
 					   &err);
 		if (!err) {
 			if ((rd_val & bmask) == cmp_val)
@@ -734,9 +708,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 		/* bail out upon subsequent access errors */
 		if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
 			break;
+
 		udelay(KSO_WAIT_US);
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-				  wr_val, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val,
+				   &err);
+
 	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
 
 	if (try_cnt > 2)
@@ -772,15 +748,15 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 		clkreq =
 		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
 
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  clkreq, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   clkreq, &err);
 		if (err) {
 			brcmf_err("HT Avail request error: %d\n", err);
 			return -EBADE;
 		}
 
 		/* Check current status */
-		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+		clkctl = brcmf_sdiod_readb(bus->sdiodev,
 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 		if (err) {
 			brcmf_err("HT Avail read error: %d\n", err);
@@ -790,35 +766,34 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 		/* Go to pending and await interrupt if appropriate */
 		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
 			/* Allow only clock-available interrupt */
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			if (err) {
-				brcmf_err("Devctl error setting CA: %d\n",
-					  err);
+				brcmf_err("Devctl error setting CA: %d\n", err);
 				return -EBADE;
 			}
 
 			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   devctl, &err);
 			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
 			bus->clkstate = CLK_PENDING;
 
 			return 0;
 		} else if (bus->clkstate == CLK_PENDING) {
 			/* Cancel CA-only interrupt filter */
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   devctl, &err);
 		}
 
 		/* Otherwise, wait here (polling) for HT Avail */
 		timeout = jiffies +
 			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
 		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
-			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+			clkctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_FUNC1_CHIPCLKCSR,
 						   &err);
 			if (time_after(jiffies, timeout))
@@ -852,16 +827,16 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 
 		if (bus->clkstate == CLK_PENDING) {
 			/* Cancel CA-only interrupt filter */
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   devctl, &err);
 		}
 
 		bus->clkstate = CLK_SDONLY;
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  clkreq, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   clkreq, &err);
 		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
 		if (err) {
 			brcmf_err("Failed access turning clock off: %d\n",
@@ -951,14 +926,14 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 
 		/* Going to sleep */
 		if (sleep) {
-			clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+			clkcsr = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_FUNC1_CHIPCLKCSR,
 						   &err);
 			if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
 				brcmf_dbg(SDIO, "no clock, set ALP\n");
-				brcmf_sdiod_regwb(bus->sdiodev,
-						  SBSDIO_FUNC1_CHIPCLKCSR,
-						  SBSDIO_ALP_AVAIL_REQ, &err);
+				brcmf_sdiod_writeb(bus->sdiodev,
+						   SBSDIO_FUNC1_CHIPCLKCSR,
+						   SBSDIO_ALP_AVAIL_REQ, &err);
 			}
 			err = brcmf_sdio_kso_control(bus, false);
 		} else {
@@ -1004,7 +979,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
 	struct sdpcm_shared_le sh_le;
 	__le32 addr_le;
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 	brcmf_sdio_bus_sleep(bus, false, false);
 
 	/*
@@ -1038,7 +1013,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
 	if (rv < 0)
 		goto fail;
 
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 
 	/* Endianness */
 	sh->flags = le32_to_cpu(sh_le.flags);
@@ -1060,7 +1035,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
 fail:
 	brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
 		  rv, addr);
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 	return rv;
 }
 
@@ -1079,6 +1054,8 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
 
 static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 {
+	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+	struct brcmf_core *core = bus->sdio_core;
 	u32 intstatus = 0;
 	u32 hmb_data;
 	u8 fcbits;
@@ -1087,12 +1064,14 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 	brcmf_dbg(SDIO, "Enter\n");
 
 	/* Read mailbox data and ack that we did so */
-	ret = r_sdreg32(bus, &hmb_data,
-			offsetof(struct sdpcmd_regs, tohostmailboxdata));
+	hmb_data = brcmf_sdiod_readl(sdiod,
+				     core->base + SD_REG(tohostmailboxdata),
+				     &ret);
+
+	if (!ret)
+		brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+				   SMB_INT_ACK, &ret);
 
-	if (ret == 0)
-		w_sdreg32(bus, SMB_INT_ACK,
-			  offsetof(struct sdpcmd_regs, tosbmailbox));
 	bus->sdcnt.f1regdata += 2;
 
 	/* dongle indicates the firmware has halted/crashed */
@@ -1166,6 +1145,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 
 static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 {
+	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+	struct brcmf_core *core = bus->sdio_core;
 	uint retries = 0;
 	u16 lastrbc;
 	u8 hi, lo;
@@ -1176,18 +1157,18 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 		  rtx ? ", send NAK" : "");
 
 	if (abort)
-		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+		brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2);
 
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-			  SFC_RF_TERM, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
+			   &err);
 	bus->sdcnt.f1regdata++;
 
 	/* Wait until the packet has been flushed (device/FIFO stable) */
 	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
-		hi = brcmf_sdiod_regrb(bus->sdiodev,
-				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
-		lo = brcmf_sdiod_regrb(bus->sdiodev,
-				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI,
+				       &err);
+		lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO,
+				       &err);
 		bus->sdcnt.f1regdata += 2;
 
 		if ((hi == 0) && (lo == 0))
@@ -1207,8 +1188,8 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 
 	if (rtx) {
 		bus->sdcnt.rxrtx++;
-		err = w_sdreg32(bus, SMB_NAK,
-				offsetof(struct sdpcmd_regs, tosbmailbox));
+		brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+				   SMB_NAK, &err);
 
 		bus->sdcnt.f1regdata++;
 		if (err == 0)
@@ -1228,13 +1209,13 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
 	brcmf_err("sdio error, abort command and terminate frame\n");
 	bus->sdcnt.tx_sderrs++;
 
-	brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+	brcmf_sdiod_abort(sdiodev, sdiodev->func2);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
 	bus->sdcnt.f1regdata++;
 
 	for (i = 0; i < 3; i++) {
-		hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-		lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+		hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+		lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
 		bus->sdcnt.f1regdata += 2;
 		if ((hi == 0) && (lo == 0))
 			break;
@@ -1584,10 +1565,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 		 * read directly into the chained packet, or allocate a large
 		 * packet and and copy into the chain.
 		 */
-		sdio_claim_host(bus->sdiodev->func[1]);
+		sdio_claim_host(bus->sdiodev->func1);
 		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
 						 &bus->glom, dlen);
-		sdio_release_host(bus->sdiodev->func[1]);
+		sdio_release_host(bus->sdiodev->func1);
 		bus->sdcnt.f2rxdata++;
 
 		/* On failure, kill the superframe */
@@ -1595,11 +1576,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 			brcmf_err("glom read of %d bytes failed: %d\n",
 				  dlen, errcode);
 
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			brcmf_sdio_rxfail(bus, true, false);
 			bus->sdcnt.rxglomfail++;
 			brcmf_sdio_free_glom(bus);
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 			return 0;
 		}
 
@@ -1609,10 +1590,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
 		rd_new.seq_num = rxseq;
 		rd_new.len = dlen;
-		sdio_claim_host(bus->sdiodev->func[1]);
+		sdio_claim_host(bus->sdiodev->func1);
 		errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
 					     BRCMF_SDIO_FT_SUPER);
-		sdio_release_host(bus->sdiodev->func[1]);
+		sdio_release_host(bus->sdiodev->func1);
 		bus->cur_read.len = rd_new.len_nxtfrm << 4;
 
 		/* Remove superframe header, remember offset */
@@ -1628,10 +1609,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
 			rd_new.len = pnext->len;
 			rd_new.seq_num = rxseq++;
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
 						     BRCMF_SDIO_FT_SUB);
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
 					   pnext->data, 32, "subframe:\n");
 
@@ -1640,11 +1621,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
 		if (errcode) {
 			/* Terminate frame on error */
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			brcmf_sdio_rxfail(bus, true, false);
 			bus->sdcnt.rxglomfail++;
 			brcmf_sdio_free_glom(bus);
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 			bus->cur_read.len = 0;
 			return 0;
 		}
@@ -1852,7 +1833,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 
 		rd->len_left = rd->len;
 		/* read header first for unknow frame length */
-		sdio_claim_host(bus->sdiodev->func[1]);
+		sdio_claim_host(bus->sdiodev->func1);
 		if (!rd->len) {
 			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
 						   bus->rxhdr, BRCMF_FIRSTREAD);
@@ -1862,7 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 					  ret);
 				bus->sdcnt.rx_hdrfail++;
 				brcmf_sdio_rxfail(bus, true, true);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 				continue;
 			}
 
@@ -1872,7 +1853,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 
 			if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
 					       BRCMF_SDIO_FT_NORMAL)) {
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 				if (!bus->rxpending)
 					break;
 				else
@@ -1888,7 +1869,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 				rd->len_nxtfrm = 0;
 				/* treat all packet as event if we don't know */
 				rd->channel = SDPCM_EVENT_CHANNEL;
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 				continue;
 			}
 			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@@ -1905,7 +1886,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
 			brcmf_sdio_rxfail(bus, false,
 					    RETRYCHAN(rd->channel));
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 			continue;
 		}
 		skb_pull(pkt, head_read);
@@ -1913,16 +1894,16 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 
 		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
 		bus->sdcnt.f2rxdata++;
-		sdio_release_host(bus->sdiodev->func[1]);
+		sdio_release_host(bus->sdiodev->func1);
 
 		if (ret < 0) {
 			brcmf_err("read %d bytes from channel %d failed: %d\n",
 				  rd->len, rd->channel, ret);
 			brcmu_pkt_buf_free_skb(pkt);
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			brcmf_sdio_rxfail(bus, true,
 					    RETRYCHAN(rd->channel));
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 			continue;
 		}
 
@@ -1933,7 +1914,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 		} else {
 			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
 			rd_new.seq_num = rd->seq_num;
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
 					       BRCMF_SDIO_FT_NORMAL)) {
 				rd->len = 0;
@@ -1946,11 +1927,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 					  roundup(rd_new.len, 16) >> 4);
 				rd->len = 0;
 				brcmf_sdio_rxfail(bus, true, true);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 				brcmu_pkt_buf_free_skb(pkt);
 				continue;
 			}
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 			rd->len_nxtfrm = rd_new.len_nxtfrm;
 			rd->channel = rd_new.channel;
 			rd->dat_offset = rd_new.dat_offset;
@@ -1966,9 +1947,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 					  rd_new.seq_num);
 				/* Force retry w/normal header read */
 				rd->len = 0;
-				sdio_claim_host(bus->sdiodev->func[1]);
+				sdio_claim_host(bus->sdiodev->func1);
 				brcmf_sdio_rxfail(bus, false, true);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 				brcmu_pkt_buf_free_skb(pkt);
 				continue;
 			}
@@ -1991,9 +1972,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 			} else {
 				brcmf_err("%s: glom superframe w/o "
 					  "descriptor!\n", __func__);
-				sdio_claim_host(bus->sdiodev->func[1]);
+				sdio_claim_host(bus->sdiodev->func1);
 				brcmf_sdio_rxfail(bus, false, false);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 			}
 			/* prepare the descriptor for the next read */
 			rd->len = rd->len_nxtfrm << 4;
@@ -2091,7 +2072,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
 	int ntail, ret;
 
 	sdiodev = bus->sdiodev;
-	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+	blksize = sdiodev->func2->cur_blksize;
 	/* sg entry alignment should be a divisor of block size */
 	WARN_ON(blksize % bus->sgentry_align);
 
@@ -2270,14 +2251,14 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
 	if (ret)
 		goto done;
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
 	bus->sdcnt.f2txdata++;
 
 	if (ret < 0)
 		brcmf_sdio_txfail(bus);
 
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 
 done:
 	brcmf_sdio_txpkt_postp(bus, pktq);
@@ -2295,6 +2276,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 {
 	struct sk_buff *pkt;
 	struct sk_buff_head pktq;
+	u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
 	u32 intstatus = 0;
 	int ret = 0, prec_out, i;
 	uint cnt = 0;
@@ -2332,11 +2314,11 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 		/* In poll mode, need to check for other events */
 		if (!bus->intr) {
 			/* Check device status, signal pending interrupt */
-			sdio_claim_host(bus->sdiodev->func[1]);
-			ret = r_sdreg32(bus, &intstatus,
-					offsetof(struct sdpcmd_regs,
-						 intstatus));
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
+			intstatus = brcmf_sdiod_readl(bus->sdiodev,
+						      intstat_addr, &ret);
+			sdio_release_host(bus->sdiodev->func1);
+
 			bus->sdcnt.f2txdata++;
 			if (ret != 0)
 				break;
@@ -2419,12 +2401,13 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
 
 static void brcmf_sdio_bus_stop(struct device *dev)
 {
-	u32 local_hostintmask;
-	u8 saveclk;
-	int err;
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	struct brcmf_sdio *bus = sdiodev->bus;
+	struct brcmf_core *core = bus->sdio_core;
+	u32 local_hostintmask;
+	u8 saveclk;
+	int err;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -2435,35 +2418,37 @@ static void brcmf_sdio_bus_stop(struct device *dev)
 	}
 
 	if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
-		sdio_claim_host(sdiodev->func[1]);
+		sdio_claim_host(sdiodev->func1);
 
 		/* Enable clock for device interrupts */
 		brcmf_sdio_bus_sleep(bus, false, false);
 
 		/* Disable and clear interrupts at the chip level also */
-		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+		brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask),
+				   0, NULL);
+
 		local_hostintmask = bus->hostintmask;
 		bus->hostintmask = 0;
 
 		/* Force backplane clocks to assure F2 interrupt propagates */
-		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+		saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
 					    &err);
 		if (!err)
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-					  (saveclk | SBSDIO_FORCE_HT), &err);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					   (saveclk | SBSDIO_FORCE_HT), &err);
 		if (err)
 			brcmf_err("Failed to force clock for F2: err %d\n",
 				  err);
 
 		/* Turn off the bus (F2), free any pending packets */
 		brcmf_dbg(INTR, "disable SDIO interrupts\n");
-		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+		sdio_disable_func(sdiodev->func2);
 
 		/* Clear any pending interrupts now that F2 is disabled */
-		w_sdreg32(bus, local_hostintmask,
-			  offsetof(struct sdpcmd_regs, intstatus));
+		brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus),
+				   local_hostintmask, NULL);
 
-		sdio_release_host(sdiodev->func[1]);
+		sdio_release_host(sdiodev->func1);
 	}
 	/* Clear the data packet queues */
 	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@@ -2501,15 +2486,14 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
 
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
-	struct brcmf_core *buscore;
+	struct brcmf_core *core = bus->sdio_core;
 	u32 addr;
 	unsigned long val;
 	int ret;
 
-	buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-	addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+	addr = core->base + SD_REG(intstatus);
 
-	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+	val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
 	bus->sdcnt.f1regdata++;
 	if (ret != 0)
 		return ret;
@@ -2519,7 +2503,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 
 	/* Clear interrupts */
 	if (val) {
-		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+		brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret);
 		bus->sdcnt.f1regdata++;
 		atomic_or(val, &bus->intstatus);
 	}
@@ -2529,7 +2513,9 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 
 static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
+	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
 	u32 newstatus = 0;
+	u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
 	unsigned long intstatus;
 	uint txlimit = bus->txbound;	/* Tx frames to send before resched */
 	uint framecnt;			/* Temporary counter of tx/rx frames */
@@ -2537,7 +2523,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 
 	/* If waiting for HTAVAIL, check status */
 	if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
@@ -2545,23 +2531,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
 #ifdef DEBUG
 		/* Check for inconsistent device control */
-		devctl = brcmf_sdiod_regrb(bus->sdiodev,
-					   SBSDIO_DEVICE_CTL, &err);
+		devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   &err);
 #endif				/* DEBUG */
 
 		/* Read CSR, if clock on switch to AVAIL, else ignore */
-		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+		clkctl = brcmf_sdiod_readb(bus->sdiodev,
 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 
 		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
 			  devctl, clkctl);
 
 		if (SBSDIO_HTAV(clkctl)) {
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev,
+					   SBSDIO_DEVICE_CTL, devctl, &err);
 			bus->clkstate = CLK_AVAIL;
 		}
 	}
@@ -2584,11 +2570,10 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 	 */
 	if (intstatus & I_HMB_FC_CHANGE) {
 		intstatus &= ~I_HMB_FC_CHANGE;
-		err = w_sdreg32(bus, I_HMB_FC_CHANGE,
-				offsetof(struct sdpcmd_regs, intstatus));
+		brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err);
+
+		newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err);
 
-		err = r_sdreg32(bus, &newstatus,
-				offsetof(struct sdpcmd_regs, intstatus));
 		bus->sdcnt.f1regdata += 2;
 		atomic_set(&bus->fcstate,
 			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@@ -2601,7 +2586,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 		intstatus |= brcmf_sdio_hostmail(bus);
 	}
 
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 
 	/* Generally don't ask for these, can get CRC errors... */
 	if (intstatus & I_WR_OOSYNC) {
@@ -2644,7 +2629,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
 	if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
 	    data_ok(bus)) {
-		sdio_claim_host(bus->sdiodev->func[1]);
+		sdio_claim_host(bus->sdiodev->func1);
 		if (bus->ctrl_frame_stat) {
 			err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
 						      bus->ctrl_frame_len);
@@ -2652,7 +2637,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 			wmb();
 			bus->ctrl_frame_stat = false;
 		}
-		sdio_release_host(bus->sdiodev->func[1]);
+		sdio_release_host(bus->sdiodev->func1);
 		brcmf_sdio_wait_event_wakeup(bus);
 	}
 	/* Send queued frames (limit 1 if rx may still be pending) */
@@ -2668,14 +2653,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 		brcmf_err("failed backplane access over SDIO, halting operation\n");
 		atomic_set(&bus->intstatus, 0);
 		if (bus->ctrl_frame_stat) {
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			if (bus->ctrl_frame_stat) {
 				bus->ctrl_frame_err = -ENODEV;
 				wmb();
 				bus->ctrl_frame_stat = false;
 				brcmf_sdio_wait_event_wakeup(bus);
 			}
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 		}
 	} else if (atomic_read(&bus->intstatus) ||
 		   atomic_read(&bus->ipend) > 0 ||
@@ -2890,13 +2875,13 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 					 CTL_DONE_TIMEOUT);
 	ret = 0;
 	if (bus->ctrl_frame_stat) {
-		sdio_claim_host(bus->sdiodev->func[1]);
+		sdio_claim_host(bus->sdiodev->func1);
 		if (bus->ctrl_frame_stat) {
 			brcmf_dbg(SDIO, "ctrl_frame timeout\n");
 			bus->ctrl_frame_stat = false;
 			ret = -ETIMEDOUT;
 		}
-		sdio_release_host(bus->sdiodev->func[1]);
+		sdio_release_host(bus->sdiodev->func1);
 	}
 	if (!ret) {
 		brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
@@ -3020,7 +3005,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
 		return 0;
 	}
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 	if (sh->assert_file_addr != 0) {
 		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
 					  sh->assert_file_addr, (u8 *)file, 80);
@@ -3033,7 +3018,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
 		if (error < 0)
 			return error;
 	}
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 
 	seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
 		   file, sh->assert_line, expr);
@@ -3307,7 +3292,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
 	int bcmerror;
 	u32 rstvec;
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 
 	rstvec = get_unaligned_le32(fw->data);
@@ -3336,7 +3321,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
 
 err:
 	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 	return bcmerror;
 }
 
@@ -3347,31 +3332,31 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
+	val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
 	if (err) {
 		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
 		return;
 	}
 
 	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
 	if (err) {
 		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
 		return;
 	}
 
 	/* Add CMD14 Support */
-	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
-			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
-			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
-			  &err);
+	brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+			     (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+			      SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+			     &err);
 	if (err) {
 		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
 		return;
 	}
 
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-			  SBSDIO_FORCE_HT, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+			   SBSDIO_FORCE_HT, &err);
 	if (err) {
 		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
 		return;
@@ -3385,16 +3370,17 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 /* enable KSO bit */
 static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
 {
+	struct brcmf_core *core = bus->sdio_core;
 	u8 val;
 	int err = 0;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* KSO bit added in SDIO core rev 12 */
-	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+	if (core->rev < 12)
 		return 0;
 
-	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+	val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
 	if (err) {
 		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
 		return err;
@@ -3403,8 +3389,8 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
 	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
 		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
 			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-				  val, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+				   val, &err);
 		if (err) {
 			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
 			return err;
@@ -3420,6 +3406,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	struct brcmf_sdio *bus = sdiodev->bus;
+	struct brcmf_core *core = bus->sdio_core;
 	uint pad_size;
 	u32 value;
 	int err;
@@ -3428,7 +3415,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 	 * a device perspective, ie. bus:txglom affects the
 	 * bus transfers from device to host.
 	 */
-	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+	if (core->rev < 12) {
 		/* for sdio core rev < 12, disable txgloming */
 		value = 0;
 		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3449,7 +3436,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 	if (sdiodev->sg_support) {
 		bus->txglom = false;
 		value = 1;
-		pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+		pad_size = bus->sdiodev->func2->cur_blksize << 1;
 		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
 					   &value, sizeof(u32));
 		if (err < 0) {
@@ -3491,7 +3478,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
 
 	address = bus->ci->rambase;
 	offset = err = 0;
-	sdio_claim_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
 	while (offset < mem_size) {
 		len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
 		      mem_size - offset;
@@ -3507,7 +3494,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
 	}
 
 done:
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 	return err;
 }
 
@@ -3564,11 +3551,10 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 			if (!bus->dpc_triggered) {
 				u8 devpend;
 
-				sdio_claim_host(bus->sdiodev->func[1]);
-				devpend = brcmf_sdiod_regrb(bus->sdiodev,
-							    SDIO_CCCR_INTx,
-							    NULL);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_claim_host(bus->sdiodev->func1);
+				devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
+						  SDIO_CCCR_INTx, NULL);
+				sdio_release_host(bus->sdiodev->func1);
 				intstatus = devpend & (INTR_STATUS_FUNC1 |
 						       INTR_STATUS_FUNC2);
 			}
@@ -3594,13 +3580,13 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 		bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL);
 		if (bus->console.count >= bus->console_interval) {
 			bus->console.count -= bus->console_interval;
-			sdio_claim_host(bus->sdiodev->func[1]);
+			sdio_claim_host(bus->sdiodev->func1);
 			/* Make sure backplane clock is on */
 			brcmf_sdio_bus_sleep(bus, false, false);
 			if (brcmf_sdio_readconsole(bus) < 0)
 				/* stop on error */
 				bus->console_interval = 0;
-			sdio_release_host(bus->sdiodev->func[1]);
+			sdio_release_host(bus->sdiodev->func1);
 		}
 	}
 #endif				/* DEBUG */
@@ -3613,11 +3599,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 			bus->idlecount++;
 			if (bus->idlecount > bus->idletime) {
 				brcmf_dbg(SDIO, "idle\n");
-				sdio_claim_host(bus->sdiodev->func[1]);
+				sdio_claim_host(bus->sdiodev->func1);
 				brcmf_sdio_wd_timer(bus, false);
 				bus->idlecount = 0;
 				brcmf_sdio_bus_sleep(bus, true, false);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 			}
 		} else {
 			bus->idlecount = 0;
@@ -3705,12 +3691,12 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
 			}
 		}
 		addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
-		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
-		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+		brcmf_sdiod_writel(sdiodev, addr, 1, NULL);
+		cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL);
 		cc_data_temp &= ~str_mask;
 		drivestrength_sel <<= str_shift;
 		cc_data_temp |= drivestrength_sel;
-		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+		brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL);
 
 		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
 			  str_tab[i].strength, drivestrength, cc_data_temp);
@@ -3725,7 +3711,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 
 	/* Try forcing SDIO core to do ALPAvail request only */
 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
 	if (err) {
 		brcmf_err("error writing for HT off\n");
 		return err;
@@ -3733,8 +3719,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 
 	/* If register supported, wait for ALPAvail and then force ALP */
 	/* This may take up to 15 milliseconds */
-	clkval = brcmf_sdiod_regrb(sdiodev,
-				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+	clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
 
 	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
 		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -3742,10 +3727,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 		return -EACCES;
 	}
 
-	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
-					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
-			!SBSDIO_ALPAV(clkval)),
-			PMU_MAX_TRANSITION_DLY);
+	SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					      NULL)),
+		 !SBSDIO_ALPAV(clkval)),
+		 PMU_MAX_TRANSITION_DLY);
+
 	if (!SBSDIO_ALPAV(clkval)) {
 		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
 			  clkval);
@@ -3753,11 +3739,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 	}
 
 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
 	udelay(65);
 
 	/* Also, disable the extra SDIO pull-ups */
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
 
 	return 0;
 }
@@ -3766,13 +3752,12 @@ static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
 					u32 rstvec)
 {
 	struct brcmf_sdio_dev *sdiodev = ctx;
-	struct brcmf_core *core;
+	struct brcmf_core *core = sdiodev->bus->sdio_core;
 	u32 reg_addr;
 
 	/* clear all interrupts */
-	core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
-	reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
-	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+	reg_addr = core->base + SD_REG(intstatus);
+	brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
 
 	if (rstvec)
 		/* Write reset vector to address 0 */
@@ -3785,16 +3770,25 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
 	struct brcmf_sdio_dev *sdiodev = ctx;
 	u32 val, rev;
 
-	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
-	if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
-	     sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
-	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+	val = brcmf_sdiod_readl(sdiodev, addr, NULL);
+
+	/*
+	 * this is a bit of special handling if reading the chipcommon chipid
+	 * register. The 4339 is a next-gen of the 4335. It uses the same
+	 * SDIO device id as 4335 and the chipid register returns 4335 as well.
+	 * It can be identified as 4339 by looking at the chip revision. It
+	 * is corrected here so the chip.c module has the right info.
+	 */
+	if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) &&
+	    (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 ||
+	     sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) {
 		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
 		if (rev >= 2) {
 			val &= ~CID_ID_MASK;
 			val |= BRCM_CC_4339_CHIP_ID;
 		}
 	}
+
 	return val;
 }
 
@@ -3802,7 +3796,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
 {
 	struct brcmf_sdio_dev *sdiodev = ctx;
 
-	brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+	brcmf_sdiod_writel(sdiodev, addr, val, NULL);
 }
 
 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
@@ -3823,21 +3817,21 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 	u32 drivestrength;
 
 	sdiodev = bus->sdiodev;
-	sdio_claim_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
 
 	pr_debug("F1 signature read @0x18000000=0x%4x\n",
-		 brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL));
+		 brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL));
 
 	/*
 	 * Force PLL off until brcmf_chip_attach()
 	 * programs PLL control regs
 	 */
 
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-			  BRCMF_INIT_CLKCTL1, &err);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1,
+			   &err);
 	if (!err)
-		clkctl = brcmf_sdiod_regrb(sdiodev,
-					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					   &err);
 
 	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
 		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3851,6 +3845,17 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 		bus->ci = NULL;
 		goto fail;
 	}
+
+	/* Pick up the SDIO core info struct from chip.c */
+	bus->sdio_core   = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+	if (!bus->sdio_core)
+		goto fail;
+
+	/* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
+	sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON);
+	if (!sdiodev->cc_core)
+		goto fail;
+
 	sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
 						   BRCMF_BUSTYPE_SDIO,
 						   bus->ci->chip,
@@ -3879,8 +3884,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 	/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
 	 * is true or when platform data OOB irq is true).
 	 */
-	if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
-	    ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+	if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+	    ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
 	     (sdiodev->settings->bus.sdio.oob_irq_supported)))
 		sdiodev->bus_if->wowl_supported = true;
 #endif
@@ -3897,29 +3902,29 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 	brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
 
 	/* Set card control so an SDIO card reset does a WLAN backplane reset */
-	reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
+	reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
 	if (err)
 		goto fail;
 
 	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
 
-	brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
 	if (err)
 		goto fail;
 
 	/* set PMUControl so a backplane reset does PMU state reload */
 	reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
-	reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err);
+	reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err);
 	if (err)
 		goto fail;
 
 	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
 
-	brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err);
+	brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err);
 	if (err)
 		goto fail;
 
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 
 	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
 
@@ -3940,7 +3945,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 	return true;
 
 fail:
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 	return false;
 }
 
@@ -4020,22 +4025,21 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 					 const struct firmware *code,
 					 void *nvram, u32 nvram_len)
 {
-	struct brcmf_bus *bus_if;
-	struct brcmf_sdio_dev *sdiodev;
-	struct brcmf_sdio *bus;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+	struct brcmf_core *core = bus->sdio_core;
 	u8 saveclk;
 
 	brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
-	bus_if = dev_get_drvdata(dev);
-	sdiodev = bus_if->bus_priv.sdio;
+
 	if (err)
 		goto fail;
 
 	if (!bus_if->drvr)
 		return;
 
-	bus = sdiodev->bus;
-
 	/* try to download image and nvram to the dongle */
 	bus->alp_only = true;
 	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4047,7 +4051,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	bus->sdcnt.tickcnt = 0;
 	brcmf_sdio_wd_timer(bus, true);
 
-	sdio_claim_host(sdiodev->func[1]);
+	sdio_claim_host(sdiodev->func1);
 
 	/* Make sure backplane clock is on, needed to generate F2 interrupt */
 	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
@@ -4055,10 +4059,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 		goto release;
 
 	/* Force clocks on backplane to be sure F2 interrupt propagates */
-	saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
 	if (!err) {
-		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  (saveclk | SBSDIO_FORCE_HT), &err);
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   (saveclk | SBSDIO_FORCE_HT), &err);
 	}
 	if (err) {
 		brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -4066,10 +4070,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	}
 
 	/* Enable function 2 (frame transfers) */
-	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
-		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
-	err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+	brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata),
+			   SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL);
 
+	err = sdio_enable_func(sdiodev->func2);
 
 	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
 
@@ -4077,13 +4081,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	if (!err) {
 		/* Set up the interrupt mask and enable interrupts */
 		bus->hostintmask = HOSTINTMASK;
-		w_sdreg32(bus, bus->hostintmask,
-			  offsetof(struct sdpcmd_regs, hostintmask));
+		brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
+				   bus->hostintmask, NULL);
+
 
-		brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
 	} else {
 		/* Disable F2 again */
-		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+		sdio_disable_func(sdiodev->func2);
 		goto release;
 	}
 
@@ -4091,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 		brcmf_sdio_sr_init(bus);
 	} else {
 		/* Restore previous clock setting */
-		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  saveclk, &err);
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   saveclk, &err);
 	}
 
 	if (err == 0) {
@@ -4108,7 +4113,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	if (err != 0)
 		brcmf_sdio_clkctl(bus, CLK_NONE, false);
 
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 
 	err = brcmf_bus_started(dev);
 	if (err != 0) {
@@ -4118,10 +4123,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	return;
 
 release:
-	sdio_release_host(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func1);
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
-	device_release_driver(&sdiodev->func[2]->dev);
+	device_release_driver(&sdiodev->func2->dev);
 	device_release_driver(dev);
 }
 
@@ -4148,7 +4153,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 
 	/* single-threaded workqueue */
 	wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
-				     dev_name(&sdiodev->func[1]->dev));
+				     dev_name(&sdiodev->func1->dev));
 	if (!wq) {
 		brcmf_err("insufficient memory to create txworkqueue\n");
 		goto fail;
@@ -4174,7 +4179,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 	init_completion(&bus->watchdog_wait);
 	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
 					bus, "brcmf_wdog/%s",
-					dev_name(&sdiodev->func[1]->dev));
+					dev_name(&sdiodev->func1->dev));
 	if (IS_ERR(bus->watchdog_tsk)) {
 		pr_warn("brcmf_watchdog thread failed to start\n");
 		bus->watchdog_tsk = NULL;
@@ -4200,7 +4205,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 	}
 
 	/* Query the F2 block size, set roundup accordingly */
-	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+	bus->blocksize = bus->sdiodev->func2->cur_blksize;
 	bus->roundup = min(max_roundup, bus->blocksize);
 
 	/* Allocate buffers */
@@ -4216,17 +4221,17 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 		}
 	}
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 
 	/* Disable F2 to clear any intermediate frame state on the dongle */
-	sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+	sdio_disable_func(bus->sdiodev->func2);
 
 	bus->rxflow = false;
 
 	/* Done with backplane-dependent accesses, can drop clock... */
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
 
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 
 	/* ...and initialize clock/power states */
 	bus->clkstate = CLK_SDONLY;
@@ -4278,7 +4283,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
 
 		if (bus->ci) {
 			if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
-				sdio_claim_host(bus->sdiodev->func[1]);
+				sdio_claim_host(bus->sdiodev->func1);
 				brcmf_sdio_wd_timer(bus, false);
 				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 				/* Leave the device in state where it is
@@ -4288,7 +4293,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
 				msleep(20);
 				brcmf_chip_set_passive(bus->ci);
 				brcmf_sdio_clkctl(bus, CLK_NONE, false);
-				sdio_release_host(bus->sdiodev->func[1]);
+				sdio_release_host(bus->sdiodev->func1);
 			}
 			brcmf_chip_detach(bus->ci);
 		}
@@ -4335,9 +4340,9 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
 {
 	int ret;
 
-	sdio_claim_host(bus->sdiodev->func[1]);
+	sdio_claim_host(bus->sdiodev->func1);
 	ret = brcmf_sdio_bus_sleep(bus, sleep, false);
-	sdio_release_host(bus->sdiodev->func[1]);
+	sdio_release_host(bus->sdiodev->func1);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index f3da32fc6360..7faed831f07d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -21,10 +21,6 @@
 #include <linux/firmware.h>
 #include "firmware.h"
 
-#define SDIO_FUNC_0		0
-#define SDIO_FUNC_1		1
-#define SDIO_FUNC_2		2
-
 #define SDIOD_FBR_SIZE		0x100
 
 /* io_en */
@@ -39,28 +35,29 @@
 #define INTR_STATUS_FUNC1	0x2
 #define INTR_STATUS_FUNC2	0x4
 
-/* Maximum number of I/O funcs */
-#define SDIOD_MAX_IOFUNCS	7
-
 /* mask of register map */
 #define REG_F0_REG_MASK		0x7FF
 #define REG_F1_MISC_MASK	0x1FFFF
 
-/* as of sdiod rev 0, supports 3 functions */
-#define SBSDIO_NUM_FUNCTION		3
-
 /* function 0 vendor specific CCCR registers */
+
 #define SDIO_CCCR_BRCM_CARDCAP			0xf0
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
-#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
-#define SDIO_CCCR_BRCM_CARDCTRL		0xf1
-#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET	0x02
-#define SDIO_CCCR_BRCM_SEPINT			0xf2
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	BIT(1)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT	BIT(2)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC	BIT(3)
+
+/* Interrupt enable bits for each function */
+#define SDIO_CCCR_IEN_FUNC0			BIT(0)
+#define SDIO_CCCR_IEN_FUNC1			BIT(1)
+#define SDIO_CCCR_IEN_FUNC2			BIT(2)
+
+#define SDIO_CCCR_BRCM_CARDCTRL			0xf1
+#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET	BIT(1)
 
-#define  SDIO_SEPINT_MASK		0x01
-#define  SDIO_SEPINT_OE			0x02
-#define  SDIO_SEPINT_ACT_HI		0x04
+#define SDIO_CCCR_BRCM_SEPINT			0xf2
+#define SDIO_CCCR_BRCM_SEPINT_MASK		BIT(0)
+#define SDIO_CCCR_BRCM_SEPINT_OE		BIT(1)
+#define SDIO_CCCR_BRCM_SEPINT_ACT_HI		BIT(2)
 
 /* function 1 miscellaneous registers */
 
@@ -131,11 +128,6 @@
 /* with b15, maps to 32-bit SB access */
 #define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000
 
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-
-#define SBSDIO_SBADDRLOW_MASK		0x80	/* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK		0xff	/* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK		0xffU	/* Valid bits in SBADDRHIGH */
 /* Address bits from SBADDR regs */
 #define SBSDIO_SBWINDOW_MASK		0xffff8000
 
@@ -178,9 +170,10 @@ struct brcmf_sdio;
 struct brcmf_sdiod_freezer;
 
 struct brcmf_sdio_dev {
-	struct sdio_func *func[SDIO_MAX_FUNCS];
-	u8 num_funcs;			/* Supported funcs on client */
+	struct sdio_func *func1;
+	struct sdio_func *func2;
 	u32 sbwad;			/* Save backplane window address */
+	struct brcmf_core *cc_core;	/* chipcommon core info struct */
 	struct brcmf_sdio *bus;
 	struct device *dev;
 	struct brcmf_bus *bus_if;
@@ -296,13 +289,24 @@ struct sdpcmd_regs {
 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
-/* sdio device register access interface */
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
-		       int *ret);
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
-		       int *ret);
+/* SDIO device register access interface */
+/* Accessors for SDIO Function 0 */
+#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
+	sdio_f0_readb((sdiodev)->func1, (addr), (r))
+
+#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
+	sdio_f0_writeb((sdiodev)->func1, (v), (addr), (ret))
+
+/* Accessors for SDIO Function 1 */
+#define brcmf_sdiod_readb(sdiodev, addr, r) \
+	sdio_readb((sdiodev)->func1, (addr), (r))
+
+#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \
+	sdio_writeb((sdiodev)->func1, (v), (addr), (ret))
+
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+			int *ret);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -342,7 +346,8 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		      u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
+
 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
 			      enum brcmf_sdiod_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 763e8ba6b178..7e01981bc5c8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16049,8 +16049,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
 		wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
 				       rfseq_updategainu_events,
 				       rfseq_updategainu_dlys,
-				       sizeof(rfseq_updategainu_events) /
-				       sizeof(rfseq_updategainu_events[0]));
+				       ARRAY_SIZE(rfseq_updategainu_events));
 
 		mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index dbf50ef6cd75..533bd4b0277e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -14,6 +14,7 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/kernel.h>
 #include <types.h>
 #include "phytbl_n.h"
 
@@ -4437,109 +4438,39 @@ static const u16 loft_lut_core1_rev0[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev0_volatile[] = {
-	{&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0,
-	 16}
-	,
-	{&pltlut_tbl_rev0, sizeof(pltlut_tbl_rev0) / sizeof(pltlut_tbl_rev0[0]),
-	 20, 0, 32}
-	,
-	{&gainctrl_lut_core0_rev0,
-	 sizeof(gainctrl_lut_core0_rev0) / sizeof(gainctrl_lut_core0_rev0[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev0,
-	 sizeof(gainctrl_lut_core1_rev0) / sizeof(gainctrl_lut_core1_rev0[0]),
-	 27, 192, 32}
-	,
-
-	{&est_pwr_lut_core0_rev0,
-	 sizeof(est_pwr_lut_core0_rev0) / sizeof(est_pwr_lut_core0_rev0[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev0,
-	 sizeof(est_pwr_lut_core1_rev0) / sizeof(est_pwr_lut_core1_rev0[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev0,
-	 sizeof(adj_pwr_lut_core0_rev0) / sizeof(adj_pwr_lut_core0_rev0[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev0,
-	 sizeof(adj_pwr_lut_core1_rev0) / sizeof(adj_pwr_lut_core1_rev0[0]), 27,
-	 64, 8}
-	,
-	{&iq_lut_core0_rev0,
-	 sizeof(iq_lut_core0_rev0) / sizeof(iq_lut_core0_rev0[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev0,
-	 sizeof(iq_lut_core1_rev0) / sizeof(iq_lut_core1_rev0[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev0,
-	 sizeof(loft_lut_core0_rev0) / sizeof(loft_lut_core0_rev0[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev0,
-	 sizeof(loft_lut_core1_rev0) / sizeof(loft_lut_core1_rev0[0]), 27, 448,
-	 16}
-	,
+	{&bdi_tbl_rev0, ARRAY_SIZE(bdi_tbl_rev0), 21, 0, 16},
+	{&pltlut_tbl_rev0, ARRAY_SIZE(pltlut_tbl_rev0), 20, 0, 32},
+	{&gainctrl_lut_core0_rev0, ARRAY_SIZE(gainctrl_lut_core0_rev0), 26, 192, 32},
+	{&gainctrl_lut_core1_rev0, ARRAY_SIZE(gainctrl_lut_core1_rev0), 27, 192, 32},
+	{&est_pwr_lut_core0_rev0, ARRAY_SIZE(est_pwr_lut_core0_rev0), 26, 0, 8},
+	{&est_pwr_lut_core1_rev0, ARRAY_SIZE(est_pwr_lut_core1_rev0), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev0, ARRAY_SIZE(adj_pwr_lut_core0_rev0), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev0, ARRAY_SIZE(adj_pwr_lut_core1_rev0), 27, 64, 8},
+	{&iq_lut_core0_rev0, ARRAY_SIZE(iq_lut_core0_rev0), 26, 320, 32},
+	{&iq_lut_core1_rev0, ARRAY_SIZE(iq_lut_core1_rev0), 27, 320, 32},
+	{&loft_lut_core0_rev0, ARRAY_SIZE(loft_lut_core0_rev0), 26, 448, 16},
+	{&loft_lut_core1_rev0, ARRAY_SIZE(loft_lut_core1_rev0), 27, 448, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev0[] = {
-	{&frame_struct_rev0,
-	 sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32}
-	,
-	{&frame_lut_rev0, sizeof(frame_lut_rev0) / sizeof(frame_lut_rev0[0]),
-	 24, 0, 8}
-	,
-	{&tmap_tbl_rev0, sizeof(tmap_tbl_rev0) / sizeof(tmap_tbl_rev0[0]), 12,
-	 0, 32}
-	,
-	{&tdtrn_tbl_rev0, sizeof(tdtrn_tbl_rev0) / sizeof(tdtrn_tbl_rev0[0]),
-	 14, 0, 32}
-	,
-	{&intlv_tbl_rev0, sizeof(intlv_tbl_rev0) / sizeof(intlv_tbl_rev0[0]),
-	 13, 0, 32}
-	,
-	{&pilot_tbl_rev0, sizeof(pilot_tbl_rev0) / sizeof(pilot_tbl_rev0[0]),
-	 11, 0, 16}
-	,
-	{&tdi_tbl20_ant0_rev0,
-	 sizeof(tdi_tbl20_ant0_rev0) / sizeof(tdi_tbl20_ant0_rev0[0]), 19, 128,
-	 32}
-	,
-	{&tdi_tbl20_ant1_rev0,
-	 sizeof(tdi_tbl20_ant1_rev0) / sizeof(tdi_tbl20_ant1_rev0[0]), 19, 256,
-	 32}
-	,
-	{&tdi_tbl40_ant0_rev0,
-	 sizeof(tdi_tbl40_ant0_rev0) / sizeof(tdi_tbl40_ant0_rev0[0]), 19, 640,
-	 32}
-	,
-	{&tdi_tbl40_ant1_rev0,
-	 sizeof(tdi_tbl40_ant1_rev0) / sizeof(tdi_tbl40_ant1_rev0[0]), 19, 768,
-	 32}
-	,
-	{&chanest_tbl_rev0,
-	 sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32}
-	,
-	{&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0,
-	 8}
-	,
-	{&noise_var_tbl0_rev0,
-	 sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0,
-	 32}
-	,
-	{&noise_var_tbl1_rev0,
-	 sizeof(noise_var_tbl1_rev0) / sizeof(noise_var_tbl1_rev0[0]), 16, 128,
-	 32}
-	,
+	{&frame_struct_rev0, ARRAY_SIZE(frame_struct_rev0), 10, 0, 32},
+	{&frame_lut_rev0, ARRAY_SIZE(frame_lut_rev0), 24, 0, 8},
+	{&tmap_tbl_rev0, ARRAY_SIZE(tmap_tbl_rev0), 12, 0, 32},
+	{&tdtrn_tbl_rev0, ARRAY_SIZE(tdtrn_tbl_rev0), 14, 0, 32},
+	{&intlv_tbl_rev0, ARRAY_SIZE(intlv_tbl_rev0), 13, 0, 32},
+	{&pilot_tbl_rev0, ARRAY_SIZE(pilot_tbl_rev0), 11, 0, 16},
+	{&tdi_tbl20_ant0_rev0, ARRAY_SIZE(tdi_tbl20_ant0_rev0), 19, 128, 32},
+	{&tdi_tbl20_ant1_rev0, ARRAY_SIZE(tdi_tbl20_ant1_rev0), 19, 256, 32},
+	{&tdi_tbl40_ant0_rev0, ARRAY_SIZE(tdi_tbl40_ant0_rev0), 19, 640, 32},
+	{&tdi_tbl40_ant1_rev0, ARRAY_SIZE(tdi_tbl40_ant1_rev0), 19, 768, 32},
+	{&chanest_tbl_rev0, ARRAY_SIZE(chanest_tbl_rev0), 22, 0, 32},
+	{&mcs_tbl_rev0, ARRAY_SIZE(mcs_tbl_rev0), 18, 0, 8},
+	{&noise_var_tbl0_rev0, ARRAY_SIZE(noise_var_tbl0_rev0), 16, 0, 32},
+	{&noise_var_tbl1_rev0, ARRAY_SIZE(noise_var_tbl1_rev0), 16, 128, 32},
 };
 
-const u32 mimophytbl_info_sz_rev0 =
-	sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
-const u32 mimophytbl_info_sz_rev0_volatile =
-	sizeof(mimophytbl_info_rev0_volatile) /
-	sizeof(mimophytbl_info_rev0_volatile[0]);
+const u32 mimophytbl_info_sz_rev0 = ARRAY_SIZE(mimophytbl_info_rev0);
+const u32 mimophytbl_info_sz_rev0_volatile = ARRAY_SIZE(mimophytbl_info_rev0_volatile);
 
 static const u16 ant_swctrl_tbl_rev3[] = {
 	0x0082,
@@ -9363,132 +9294,53 @@ static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
-	{&ant_swctrl_tbl_rev3,
-	 sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16}
-	,
+	{&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile1[] = {
-	{&ant_swctrl_tbl_rev3_1,
-	 sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0,
-	 16}
-	,
+	{&ant_swctrl_tbl_rev3_1, ARRAY_SIZE(ant_swctrl_tbl_rev3_1), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile2[] = {
-	{&ant_swctrl_tbl_rev3_2,
-	 sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0,
-	 16}
-	,
+	{&ant_swctrl_tbl_rev3_2, ARRAY_SIZE(ant_swctrl_tbl_rev3_2), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile3[] = {
-	{&ant_swctrl_tbl_rev3_3,
-	 sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0,
-	 16}
-	,
+	{&ant_swctrl_tbl_rev3_3, ARRAY_SIZE(ant_swctrl_tbl_rev3_3), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3[] = {
-	{&frame_struct_rev3,
-	 sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
-	,
-	{&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
-	 11, 0, 16}
-	,
-	{&tmap_tbl_rev3, sizeof(tmap_tbl_rev3) / sizeof(tmap_tbl_rev3[0]), 12,
-	 0, 32}
-	,
-	{&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
-	 13, 0, 32}
-	,
-	{&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
-	 14, 0, 32}
-	,
-	{&noise_var_tbl_rev3,
-	 sizeof(noise_var_tbl_rev3) / sizeof(noise_var_tbl_rev3[0]), 16, 0, 32}
-	,
-	{&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
-	 16}
-	,
-	{&tdi_tbl20_ant0_rev3,
-	 sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
-	 32}
-	,
-	{&tdi_tbl20_ant1_rev3,
-	 sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
-	 32}
-	,
-	{&tdi_tbl40_ant0_rev3,
-	 sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
-	 32}
-	,
-	{&tdi_tbl40_ant1_rev3,
-	 sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
-	 32}
-	,
-	{&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
-	 20, 0, 32}
-	,
-	{&chanest_tbl_rev3,
-	 sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
-	,
-	{&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
-	 24, 0, 8}
-	,
-	{&est_pwr_lut_core0_rev3,
-	 sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev3,
-	 sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev3,
-	 sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev3,
-	 sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-	 64, 8}
-	,
-	{&gainctrl_lut_core0_rev3,
-	 sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev3,
-	 sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-	 27, 192, 32}
-	,
-	{&iq_lut_core0_rev3,
-	 sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev3,
-	 sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev3,
-	 sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev3,
-	 sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-	 16}
+	{&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+	{&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+	{&tmap_tbl_rev3, ARRAY_SIZE(tmap_tbl_rev3), 12, 0, 32},
+	{&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+	{&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+	{&noise_var_tbl_rev3, ARRAY_SIZE(noise_var_tbl_rev3), 16, 0, 32},
+	{&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+	{&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+	{&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+	{&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+	{&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+	{&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+	{&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+	{&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+	{&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+	{&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+	{&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+	{&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+	{&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+	{&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+	{&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+	{&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}
 };
 
-const u32 mimophytbl_info_sz_rev3 =
-	sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
-const u32 mimophytbl_info_sz_rev3_volatile =
-	sizeof(mimophytbl_info_rev3_volatile) /
-	sizeof(mimophytbl_info_rev3_volatile[0]);
-const u32 mimophytbl_info_sz_rev3_volatile1 =
-	sizeof(mimophytbl_info_rev3_volatile1) /
-	sizeof(mimophytbl_info_rev3_volatile1[0]);
-const u32 mimophytbl_info_sz_rev3_volatile2 =
-	sizeof(mimophytbl_info_rev3_volatile2) /
-	sizeof(mimophytbl_info_rev3_volatile2[0]);
-const u32 mimophytbl_info_sz_rev3_volatile3 =
-	sizeof(mimophytbl_info_rev3_volatile3) /
-	sizeof(mimophytbl_info_rev3_volatile3[0]);
+const u32 mimophytbl_info_sz_rev3 = ARRAY_SIZE(mimophytbl_info_rev3);
+const u32 mimophytbl_info_sz_rev3_volatile = ARRAY_SIZE(mimophytbl_info_rev3_volatile);
+const u32 mimophytbl_info_sz_rev3_volatile1 = ARRAY_SIZE(mimophytbl_info_rev3_volatile1);
+const u32 mimophytbl_info_sz_rev3_volatile2 = ARRAY_SIZE(mimophytbl_info_rev3_volatile2);
+const u32 mimophytbl_info_sz_rev3_volatile3 = ARRAY_SIZE(mimophytbl_info_rev3_volatile3);
 
 static const u32 tmap_tbl_rev7[] = {
 	0x8a88aa80,
@@ -10469,162 +10321,58 @@ static const u32 papd_cal_scalars_tbl_core1_rev7[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev7[] = {
-	{&frame_struct_rev3,
-	 sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
-	,
-	{&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
-	 11, 0, 16}
-	,
-	{&tmap_tbl_rev7, sizeof(tmap_tbl_rev7) / sizeof(tmap_tbl_rev7[0]), 12,
-	 0, 32}
-	,
-	{&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
-	 13, 0, 32}
-	,
-	{&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
-	 14, 0, 32}
-	,
-	{&noise_var_tbl_rev7,
-	 sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
-	,
-	{&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
-	 16}
-	,
-	{&tdi_tbl20_ant0_rev3,
-	 sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
-	 32}
-	,
-	{&tdi_tbl20_ant1_rev3,
-	 sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
-	 32}
-	,
-	{&tdi_tbl40_ant0_rev3,
-	 sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
-	 32}
-	,
-	{&tdi_tbl40_ant1_rev3,
-	 sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
-	 32}
-	,
-	{&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
-	 20, 0, 32}
-	,
-	{&chanest_tbl_rev3,
-	 sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
-	,
-	{&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
-	 24, 0, 8}
-	,
-	{&est_pwr_lut_core0_rev3,
-	 sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev3,
-	 sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev3,
-	 sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev3,
-	 sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-	 64, 8}
-	,
-	{&gainctrl_lut_core0_rev3,
-	 sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev3,
-	 sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-	 27, 192, 32}
-	,
-	{&iq_lut_core0_rev3,
-	 sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev3,
-	 sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev3,
-	 sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev3,
-	 sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-	 16}
-	,
+	{&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+	{&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+	{&tmap_tbl_rev7, ARRAY_SIZE(tmap_tbl_rev7), 12, 0, 32},
+	{&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+	{&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+	{&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+	{&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+	{&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+	{&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+	{&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+	{&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+	{&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+	{&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+	{&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+	{&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+	{&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+	{&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+	{&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+	{&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+	{&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+	{&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+	{&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
 	{&papd_comp_rfpwr_tbl_core0_rev3,
-	 sizeof(papd_comp_rfpwr_tbl_core0_rev3) /
-	 sizeof(papd_comp_rfpwr_tbl_core0_rev3[0]), 26, 576, 16}
-	,
+	 ARRAY_SIZE(papd_comp_rfpwr_tbl_core0_rev3), 26, 576, 16},
 	{&papd_comp_rfpwr_tbl_core1_rev3,
-	 sizeof(papd_comp_rfpwr_tbl_core1_rev3) /
-	 sizeof(papd_comp_rfpwr_tbl_core1_rev3[0]), 27, 576, 16}
-	,
+	 ARRAY_SIZE(papd_comp_rfpwr_tbl_core1_rev3), 27, 576, 16},
 	{&papd_comp_epsilon_tbl_core0_rev7,
-	 sizeof(papd_comp_epsilon_tbl_core0_rev7) /
-	 sizeof(papd_comp_epsilon_tbl_core0_rev7[0]), 31, 0, 32}
-	,
+	 ARRAY_SIZE(papd_comp_epsilon_tbl_core0_rev7), 31, 0, 32},
 	{&papd_cal_scalars_tbl_core0_rev7,
-	 sizeof(papd_cal_scalars_tbl_core0_rev7) /
-	 sizeof(papd_cal_scalars_tbl_core0_rev7[0]), 32, 0, 32}
-	,
+	 ARRAY_SIZE(papd_cal_scalars_tbl_core0_rev7), 32, 0, 32},
 	{&papd_comp_epsilon_tbl_core1_rev7,
-	 sizeof(papd_comp_epsilon_tbl_core1_rev7) /
-	 sizeof(papd_comp_epsilon_tbl_core1_rev7[0]), 33, 0, 32}
-	,
+	 ARRAY_SIZE(papd_comp_epsilon_tbl_core1_rev7), 33, 0, 32},
 	{&papd_cal_scalars_tbl_core1_rev7,
-	 sizeof(papd_cal_scalars_tbl_core1_rev7) /
-	 sizeof(papd_cal_scalars_tbl_core1_rev7[0]), 34, 0, 32}
-	,
+	 ARRAY_SIZE(papd_cal_scalars_tbl_core1_rev7), 34, 0, 32},
 };
 
-const u32 mimophytbl_info_sz_rev7 =
-	sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
+const u32 mimophytbl_info_sz_rev7 = ARRAY_SIZE(mimophytbl_info_rev7);
 
 const struct phytbl_info mimophytbl_info_rev16[] = {
-	{&noise_var_tbl_rev7,
-	 sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
-	,
-	{&est_pwr_lut_core0_rev3,
-	 sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev3,
-	 sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev3,
-	 sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev3,
-	 sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-	 64, 8}
-	,
-	{&gainctrl_lut_core0_rev3,
-	 sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev3,
-	 sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-	 27, 192, 32}
-	,
-	{&iq_lut_core0_rev3,
-	 sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev3,
-	 sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev3,
-	 sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev3,
-	 sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-	 16}
-	,
+	{&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+	{&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+	{&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+	{&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+	{&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+	{&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+	{&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+	{&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+	{&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
 };
 
-const u32 mimophytbl_info_sz_rev16 =
-	sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
+const u32 mimophytbl_info_sz_rev16 = ARRAY_SIZE(mimophytbl_info_rev16);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff136f299a0a..e6205eae51fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -9,12 +9,13 @@ iwlwifi-objs		+= iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs		+= pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-objs		+= pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
 iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
 iwlwifi-objs		+= iwl-trans.o
 iwlwifi-objs		+= fw/notif-wait.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
 iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
 
 iwlwifi-objs += $(iwlwifi-m)
 
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
new file mode 100644
index 000000000000..48f6f80eb24b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -0,0 +1,216 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL_22000_UCODE_API_MAX	36
+
+/* Lowest firmware API version supported */
+#define IWL_22000_UCODE_API_MIN	24
+
+/* NVM versions */
+#define IWL_22000_NVM_VERSION		0x0a1d
+#define IWL_22000_TX_POWER_VERSION	0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_22000_DCCM_OFFSET		0x800000 /* LMAC1 */
+#define IWL_22000_DCCM_LEN		0x10000 /* LMAC1 */
+#define IWL_22000_DCCM2_OFFSET		0x880000
+#define IWL_22000_DCCM2_LEN		0x8000
+#define IWL_22000_SMEM_OFFSET		0x400000
+#define IWL_22000_SMEM_LEN		0xD0000
+
+#define IWL_22000_JF_FW_PRE	"iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE	"iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE	"iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_JF_B0_FW_PRE	"iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE	"iwlwifi-QuQnj-a0-hr-a0-"
+
+#define IWL_22000_HR_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_MODULE_FIRMWARE(api) \
+	IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_22000		10
+
+static const struct iwl_base_params iwl_22000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+	.num_of_queues = 512,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_22000_ht_params = {
+	.stbc = true,
+	.ldpc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_22000						\
+	.ucode_api_max = IWL_22000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL_22000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_22000,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,				\
+	.max_data_size = IWL60_RTC_DATA_SIZE,				\
+	.base_params = &iwl_22000_base_params,				\
+	.led_mode = IWL_LED_RF_STATE,					\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000,		\
+	.non_shared_ant = ANT_A,					\
+	.dccm_offset = IWL_22000_DCCM_OFFSET,				\
+	.dccm_len = IWL_22000_DCCM_LEN,					\
+	.dccm2_offset = IWL_22000_DCCM2_OFFSET,				\
+	.dccm2_len = IWL_22000_DCCM2_LEN,				\
+	.smem_offset = IWL_22000_SMEM_OFFSET,				\
+	.smem_len = IWL_22000_SMEM_LEN,					\
+	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
+	.apmg_not_supported = true,					\
+	.mq_rx_supported = true,					\
+	.vht_mu_mimo_supported = true,					\
+	.mac_addr_from_csr = true,					\
+	.use_tfh = true,						\
+	.rf_id = true,							\
+	.gen2 = true,							\
+	.nvm_type = IWL_NVM_EXT,					\
+	.dbgc_supported = true,						\
+	.tx_cmd_queue_size = 32,					\
+	.min_umac_error_event_table = 0x400000
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.cdb = true,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_jf = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_JF_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_hr = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_F0_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 9bb7c19d48eb..3f4d9bac9f73 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -70,8 +70,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX	34
-#define IWL8265_UCODE_API_MAX	34
+#define IWL8000_UCODE_API_MAX	36
+#define IWL8265_UCODE_API_MAX	36
 
 /* Lowest firmware API version supported */
 #define IWL8000_UCODE_API_MIN	22
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e7e75b458005..90a1d14cf7d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,7 +55,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX	34
+#define IWL9000_UCODE_API_MAX	36
 
 /* Lowest firmware API version supported */
 #define IWL9000_UCODE_API_MIN	30
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
deleted file mode 100644
index 705f83b02e13..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-
-/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX	34
-
-/* Lowest firmware API version supported */
-#define IWL_A000_UCODE_API_MIN	24
-
-/* NVM versions */
-#define IWL_A000_NVM_VERSION		0x0a1d
-#define IWL_A000_TX_POWER_VERSION	0xffff /* meaningless */
-
-/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET		0x800000 /* LMAC1 */
-#define IWL_A000_DCCM_LEN		0x10000 /* LMAC1 */
-#define IWL_A000_DCCM2_OFFSET		0x880000
-#define IWL_A000_DCCM2_LEN		0x8000
-#define IWL_A000_SMEM_OFFSET		0x400000
-#define IWL_A000_SMEM_LEN		0xD0000
-
-#define IWL_A000_JF_FW_PRE	"iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE	"iwlwifi-Qu-a0-hr-a0-"
-#define IWL_A000_HR_CDB_FW_PRE	"iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_A000_HR_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_A000_JF_B0_FW_PRE	"iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_A000_HR_A0_FW_PRE	"iwlwifi-QuQnj-a0-hr-a0-"
-
-#define IWL_A000_HR_MODULE_FIRMWARE(api) \
-	IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_MODULE_FIRMWARE(api) \
-	IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
-
-#define NVM_HW_SECTION_NUM_FAMILY_A000		10
-
-static const struct iwl_base_params iwl_a000_base_params = {
-	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
-	.num_of_queues = 512,
-	.shadow_ram_support = true,
-	.led_compensation = 57,
-	.wd_timeout = IWL_LONG_WD_TIMEOUT,
-	.max_event_log_size = 512,
-	.shadow_reg_enable = true,
-	.pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl_a000_ht_params = {
-	.stbc = true,
-	.ldpc = true,
-	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-#define IWL_DEVICE_A000							\
-	.ucode_api_max = IWL_A000_UCODE_API_MAX,			\
-	.ucode_api_min = IWL_A000_UCODE_API_MIN,			\
-	.device_family = IWL_DEVICE_FAMILY_A000,			\
-	.max_inst_size = IWL60_RTC_INST_SIZE,				\
-	.max_data_size = IWL60_RTC_DATA_SIZE,				\
-	.base_params = &iwl_a000_base_params,				\
-	.led_mode = IWL_LED_RF_STATE,					\
-	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000,		\
-	.non_shared_ant = ANT_A,					\
-	.dccm_offset = IWL_A000_DCCM_OFFSET,				\
-	.dccm_len = IWL_A000_DCCM_LEN,					\
-	.dccm2_offset = IWL_A000_DCCM2_OFFSET,				\
-	.dccm2_len = IWL_A000_DCCM2_LEN,				\
-	.smem_offset = IWL_A000_SMEM_OFFSET,				\
-	.smem_len = IWL_A000_SMEM_LEN,					\
-	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
-	.apmg_not_supported = true,					\
-	.mq_rx_supported = true,					\
-	.vht_mu_mimo_supported = true,					\
-	.mac_addr_from_csr = true,					\
-	.use_tfh = true,						\
-	.rf_id = true,							\
-	.gen2 = true,							\
-	.nvm_type = IWL_NVM_EXT,					\
-	.dbgc_supported = true,						\
-	.tx_cmd_queue_size = 32,					\
-	.min_umac_error_event_table = 0x400000
-
-const struct iwl_cfg iwla000_2ac_cfg_hr = {
-	.name = "Intel(R) Dual Band Wireless AC a000",
-	.fw_name_pre = IWL_A000_HR_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
-	.name = "Intel(R) Dual Band Wireless AC a000",
-	.fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-	.cdb = true,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_jf = {
-	.name = "Intel(R) Dual Band Wireless AC a000",
-	.fw_name_pre = IWL_A000_JF_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_hr = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_HR_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_HR_F0_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_JF_B0_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_HR_A0_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 3684a3e180e5..007bfe7656a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -95,8 +95,8 @@ enum {
 #define IWL_ALIVE_FLG_RFKILL	BIT(0)
 
 struct iwl_lmac_alive {
-	__le32 ucode_minor;
 	__le32 ucode_major;
+	__le32 ucode_minor;
 	u8 ver_subtype;
 	u8 ver_type;
 	u8 mac;
@@ -113,8 +113,8 @@ struct iwl_lmac_alive {
 } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
 
 struct iwl_umac_alive {
-	__le32 umac_minor;		/* UMAC version: minor */
 	__le32 umac_major;		/* UMAC version: major */
+	__le32 umac_minor;		/* UMAC version: minor */
 	__le32 error_info_addr;		/* SRAM address for UMAC error log */
 	__le32 dbg_print_buff_addr;
 } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index d09555afe2c5..87c1ddea75ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -188,11 +188,6 @@ enum iwl_bt_mxbox_dw3 {
 	BT_MBOX(3, UPDATE_REQUEST, 21, 1),
 };
 
-enum iwl_bt_mxbox_dw4 {
-	BT_MBOX(4, ATS_BT_INTERVAL, 0, 7),
-	BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7),
-};
-
 #define BT_MBOX_MSG(_notif, _num, _field)				     \
 	((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
 	>> BT_MBOX##_num##_##_field##_POS)
@@ -232,31 +227,6 @@ enum iwl_bt_ci_compliance {
  * @reserved: reserved
  */
 struct iwl_bt_coex_profile_notif {
-	__le32 mbox_msg[8];
-	__le32 msg_idx;
-	__le32 bt_ci_compliance;
-
-	__le32 primary_ch_lut;
-	__le32 secondary_ch_lut;
-	__le32 bt_activity_grading;
-	u8 ttc_status;
-	u8 rrc_status;
-	__le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */
-
-/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_ci_compliance: enum %iwl_bt_ci_compliance
- * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
- * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
- * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
- * @ttc_status: is TTC enabled - one bit per PHY
- * @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
- */
-struct iwl_bt_coex_profile_notif_v4 {
 	__le32 mbox_msg[4];
 	__le32 msg_idx;
 	__le32 bt_ci_compliance;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 7ebbf097488b..f285bacc8726 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -215,7 +215,7 @@ enum iwl_legacy_cmds {
 	/**
 	 * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
 	 *	&struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
-	 *	for newer (A000) hardware.
+	 *	for newer (22000) hardware.
 	 */
 	SCD_QUEUE_CFG = 0x1d,
 
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index aa76dcc148bd..a57c7223df0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -83,6 +83,21 @@ enum iwl_data_path_subcmd_ids {
 	TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
 
 	/**
+	 * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+	 */
+	TLC_MNG_CONFIG_CMD = 0xF,
+
+	/**
+	 * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
+	 */
+	TLC_MNG_NOTIF_REQ_CMD = 0x10,
+
+	/**
+	 * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+	 */
+	TLC_MNG_UPDATE_NOTIF = 0xF7,
+
+	/**
 	 * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
 	 */
 	STA_PM_NOTIF = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 0a81fb1b6ed4..106782341544 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -250,10 +250,12 @@ struct iwl_mfu_assert_dump_notif {
  * The ids for different type of markers to insert into the usniffer logs
  *
  * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
+ * @MARKER_ID_SYNC_CLOCK: sync FW time and systime
  */
 enum iwl_mvm_marker_id {
 	MARKER_ID_TX_FRAME_LATENCY = 1,
-}; /* MARKER_ID_API_E_VER_1 */
+	MARKER_ID_SYNC_CLOCK = 2,
+}; /* MARKER_ID_API_E_VER_2 */
 
 /**
  * struct iwl_mvm_marker - mark info into the usniffer logs
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index ec42c84e5df2..17c7ef1662a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -68,6 +68,10 @@
  */
 enum iwl_mac_conf_subcmd_ids {
 	/**
+	 * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
+	 */
+	LOW_LATENCY_CMD = 0x3,
+	/**
 	 * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
 	 */
 	CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
@@ -82,4 +86,19 @@ struct iwl_channel_switch_noa_notif {
 	__le32 id_and_color;
 } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
 
+/**
+ * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
+ *
+ * @mac_id: MAC ID to whom to apply the low-latency configurations
+ * @low_latency_rx: 1/0 to set/clear Rx low latency direction
+ * @low_latency_tx: 1/0 to set/clear Tx low latency direction
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_mac_low_latency_cmd {
+	__le32 mac_id;
+	u8 low_latency_rx;
+	u8 low_latency_tx;
+	__le16 reserved;
+} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a13fd8a1be62..e49a6f7be613 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -62,6 +62,267 @@
 
 #include "mac.h"
 
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
+ * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
+ * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
+ */
+enum iwl_tlc_mng_cfg_flags {
+	IWL_TLC_MNG_CFG_FLAGS_CCK_MSK	= BIT(0),
+	IWL_TLC_MNG_CFG_FLAGS_DD_MSK	= BIT(1),
+	IWL_TLC_MNG_CFG_FLAGS_STBC_MSK	= BIT(2),
+	IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK	= BIT(3),
+	IWL_TLC_MNG_CFG_FLAGS_BF_MSK	= BIT(4),
+	IWL_TLC_MNG_CFG_FLAGS_DCM_MSK	= BIT(5),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw - channel width options
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw {
+	IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ * @IWL_TLC_MNG_CHAIN_C_MSK: chain C
+ */
+enum iwl_tlc_mng_cfg_chains {
+	IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+	IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+	IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_gi - guard interval options
+ * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
+ * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
+ * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
+ * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
+ */
+enum iwl_tlc_mng_cfg_gi {
+	IWL_TLC_MNG_SGI_20MHZ_MSK  = BIT(0),
+	IWL_TLC_MNG_SGI_40MHZ_MSK  = BIT(1),
+	IWL_TLC_MNG_SGI_80MHZ_MSK  = BIT(2),
+	IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode {
+	IWL_TLC_MNG_MODE_CCK = 0,
+	IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+	IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+	IWL_TLC_MNG_MODE_HT,
+	IWL_TLC_MNG_MODE_VHT,
+	IWL_TLC_MNG_MODE_HE,
+	IWL_TLC_MNG_MODE_INVALID,
+	IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_vht_he_types - VHT HE types
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
+ */
+enum iwl_tlc_mng_vht_he_types {
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
+		IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates - HT/VHT rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates {
+	IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+	IWL_TLC_MNG_HT_RATE_MCS1,
+	IWL_TLC_MNG_HT_RATE_MCS2,
+	IWL_TLC_MNG_HT_RATE_MCS3,
+	IWL_TLC_MNG_HT_RATE_MCS4,
+	IWL_TLC_MNG_HT_RATE_MCS5,
+	IWL_TLC_MNG_HT_RATE_MCS6,
+	IWL_TLC_MNG_HT_RATE_MCS7,
+	IWL_TLC_MNG_HT_RATE_MCS8,
+	IWL_TLC_MNG_HT_RATE_MCS9,
+	IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
+};
+
+/* Maximum supported tx antennas number */
+#define MAX_RS_ANT_NUM 3
+
+/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_supp_ch_width: channel width
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
+ * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types
+ * @non_ht_supp_rates: bitmap of supported legacy rates
+ * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @reserved2: reserved
+ * @he_supp_rates: bitmap of supported HE rates
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * @he_gi_support: 11ax HE guard interval
+ * @max_ampdu_cnt: max AMPDU size (frames count)
+ */
+struct iwl_tlc_config_cmd {
+	u8 sta_id;
+	u8 reserved1[3];
+	u8 max_supp_ch_width;
+	u8 chains;
+	u8 max_supp_ss;
+	u8 valid_vht_he_types;
+	__le16 flags;
+	__le16 non_ht_supp_rates;
+	__le16 ht_supp_rates[MAX_RS_ANT_NUM];
+	u8 mode;
+	u8 reserved2;
+	__le16 he_supp_rates;
+	u8 sgi_ch_width_supp;
+	u8 he_gi_support;
+	__le32 max_ampdu_cnt;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_TLC_NOTIF_INIT_RATE_POS 0
+#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
+#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
+
+/**
+ * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
+ * @sta_id: relevant station
+ * @reserved1: reserved
+ * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
+ * @interval: minimum time between notifications from TLC to the driver (msec)
+ * @reserved2: reserved
+ */
+struct iwl_tlc_notif_req_config_cmd {
+	u8 sta_id;
+	u8 reserved1;
+	__le16 flags;
+	__le16 interval;
+	__le16 reserved2;
+} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @values: field per flag in struct iwl_tlc_notif_req_config_cmd
+ */
+struct iwl_tlc_update_notif {
+	u8 sta_id;
+	u8 reserved;
+	__le16 flags;
+	__le32 values[16];
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwl_tlc_debug_flags - debug options
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
+ *	frames
+ * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
+ *	driver, in msec
+ * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
+ * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
+ *	threshold should not start an aggregation session
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
+ * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
+ * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
+ * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
+ */
+enum iwl_tlc_debug_flags {
+	IWL_TLC_DEBUG_FIXED_RATE,
+	IWL_TLC_DEBUG_STATS_TH,
+	IWL_TLC_DEBUG_STATS_TIME_TH,
+	IWL_TLC_DEBUG_AGG_TIME_LIM,
+	IWL_TLC_DEBUG_AGG_DIS_START_TH,
+	IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+	IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
+	IWL_TLC_DEBUG_START_AC_RATE_IDX,
+	IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
+}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
+
+/**
+ * struct iwl_dhc_tlc_dbg - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @flags: bitmap of %IWL_TLC_DEBUG_\*
+ * @fixed_rate: rate value
+ * @stats_threshold: if number of tx-ed frames is greater, send statistics
+ * @time_threshold: statistics threshold in usec
+ * @agg_time_lim: max agg time
+ * @agg_dis_start_threshold: frames with try-cont greater than this count will
+ *			     not be aggregated
+ * @agg_frame_count_lim: agg size
+ * @addba_retry_delay: delay between retries of ADD BA
+ * @start_ac_rate_idx: frames per second to start a BA session
+ * @no_far_range_tweak: disable BW scaling
+ * @reserved2: reserved
+ */
+struct iwl_dhc_tlc_cmd {
+	u8 sta_id;
+	u8 reserved1[3];
+	__le32 flags;
+	__le32 fixed_rate;
+	__le16 stats_threshold;
+	__le16 time_threshold;
+	__le16 agg_time_lim;
+	__le16 agg_dis_start_threshold;
+	__le16 agg_frame_count_lim;
+	__le16 addba_retry_delay;
+	u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
+	u8 no_far_range_tweak;
+	u8 reserved2[3];
+} __packed;
+
 /*
  * These serve as indexes into
  * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
@@ -253,7 +514,6 @@ enum {
 #define RATE_MCS_ANT_ABC_MSK		(RATE_MCS_ANT_AB_MSK | \
 					 RATE_MCS_ANT_C_MSK)
 #define RATE_MCS_ANT_MSK		RATE_MCS_ANT_ABC_MSK
-#define RATE_MCS_ANT_NUM 3
 
 /* Bit 17: (0) SS, (1) SS*2 */
 #define RATE_MCS_STBC_POS		17
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f5d5ba7e37ec..a2a40b515a3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -121,7 +121,7 @@ enum iwl_tx_flags {
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
 /**
- * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
  * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
  * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
  *	to a secured STA
@@ -301,7 +301,7 @@ struct iwl_dram_sec_info {
 } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
 
 /**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
  * @offload_assist: TX offload configuration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 8106fd4be996..67aefc8fc9ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -964,7 +964,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
 	if (trigger)
 		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
 
-	if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
+	/*
+	 * If the loading of the FW completed successfully, the next step is to
+	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+	 * zero, the FW was already loaded successully. If the state is "NO_FW"
+	 * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+	 * can try to collect the data, since FW might just not be fully
+	 * loaded (no "ALIVE" yet), and the debug data is accessible.
+	 *
+	 * Corner case: got the FW alive but crashed before getting the SMEM
+	 *	config. In such a case, due to HW access problems, we might
+	 *	collect garbage.
+	 */
+	if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
+		 fwrt->smem_cfg.num_lmacs,
 		 "Can't collect dbg data when FW isn't alive\n"))
 		return -EIO;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
new file mode 100644
index 000000000000..e2ded29a145d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -0,0 +1,195 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "api/commands.h"
+#include "debugfs.h"
+
+#define FWRT_DEBUGFS_READ_FILE_OPS(name)				\
+static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt,	\
+				       char *buf, size_t count,		\
+				       loff_t *ppos);			\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)			\
+static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt,	\
+					char *buf, size_t count,	\
+					loff_t *ppos);			\
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file,		\
+					 const char __user *user_buf,	\
+					 size_t count, loff_t *ppos)	\
+{									\
+	struct iwl_fw_runtime *fwrt = file->private_data;		\
+	char buf[buflen] = {};						\
+	size_t buf_size = min(count, sizeof(buf) -  1);			\
+									\
+	if (copy_from_user(buf, user_buf, buf_size))			\
+		return -EFAULT;						\
+									\
+	return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos);	\
+}
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen)			\
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)				\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen)			\
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)				\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do {	\
+		if (!debugfs_create_file(alias, mode, parent, fwrt,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
+	FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
+{
+	struct iwl_mvm_marker marker = {
+		.dw_len = sizeof(struct iwl_mvm_marker) / 4,
+		.marker_id = MARKER_ID_SYNC_CLOCK,
+
+		/* the real timestamp is taken from the ftrace clock
+		 * this is for finding the match between fw and kernel logs
+		 */
+		.timestamp = cpu_to_le64(fwrt->timestamp.seq++),
+	};
+
+	struct iwl_host_cmd hcmd = {
+		.id = MARKER_CMD,
+		.flags = CMD_ASYNC,
+		.data[0] = &marker,
+		.len[0] = sizeof(marker),
+	};
+
+	return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
+{
+	int ret;
+	struct iwl_fw_runtime *fwrt =
+		container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
+	unsigned long delay = fwrt->timestamp.delay;
+
+	ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
+	if (!ret && delay)
+		schedule_delayed_work(&fwrt->timestamp.wk,
+				      round_jiffies_relative(delay));
+	else
+		IWL_INFO(fwrt,
+			 "stopping timestamp_marker, ret: %d, delay: %u\n",
+			 ret, jiffies_to_msecs(delay) / 1000);
+}
+
+static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
+						char *buf, size_t count,
+						loff_t *ppos)
+{
+	int ret;
+	u32 delay;
+
+	ret = kstrtou32(buf, 10, &delay);
+	if (ret < 0)
+		return ret;
+
+	IWL_INFO(fwrt,
+		 "starting timestamp_marker trigger with delay: %us\n",
+		 delay);
+
+	iwl_fw_cancel_timestamp(fwrt);
+
+	fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+
+	schedule_delayed_work(&fwrt->timestamp.wk,
+			      round_jiffies_relative(fwrt->timestamp.delay));
+	return count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
+
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+			    struct dentry *dbgfs_dir)
+{
+	INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
+	FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, S_IWUSR);
+	return 0;
+err:
+	IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
+	return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
new file mode 100644
index 000000000000..e57ff92a68ae
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "runtime.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+			    struct dentry *dbgfs_dir);
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
+{
+	fwrt->timestamp.delay = 0;
+	cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+#else
+static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+					  struct dentry *dbgfs_dir)
+{
+	return 0;
+}
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 37a5c5b4eda6..1a05d506ac9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -246,10 +246,10 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
  * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
  * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
- * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
- *	include information about ACL time sharing.
  * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
  *	indicating low latency direction.
+ * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is
+ *	deprecated.
  *
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
@@ -267,8 +267,8 @@ enum iwl_ucode_tlv_api {
 	IWL_UCODE_TLV_API_ADAPTIVE_DWELL	= (__force iwl_ucode_tlv_api_t)32,
 	IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE	= (__force iwl_ucode_tlv_api_t)34,
 	IWL_UCODE_TLV_API_NEW_RX_STATS		= (__force iwl_ucode_tlv_api_t)35,
-	IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL	= (__force iwl_ucode_tlv_api_t)37,
 	IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY	= (__force iwl_ucode_tlv_api_t)38,
+	IWL_UCODE_TLV_API_DEPRECATE_TTAK	= (__force iwl_ucode_tlv_api_t)41,
 
 	NUM_IWL_UCODE_TLV_API
 #ifdef __CHECKER__
@@ -313,6 +313,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
+ * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
  * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
  * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -367,6 +369,8 @@ enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT		= (__force iwl_ucode_tlv_capa_t)39,
 	IWL_UCODE_TLV_CAPA_CDB_SUPPORT			= (__force iwl_ucode_tlv_capa_t)40,
 	IWL_UCODE_TLV_CAPA_D0I3_END_FIRST		= (__force iwl_ucode_tlv_capa_t)41,
+	IWL_UCODE_TLV_CAPA_TLC_OFFLOAD                  = (__force iwl_ucode_tlv_capa_t)43,
+	IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA                = (__force iwl_ucode_tlv_capa_t)44,
 	IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE		= (__force iwl_ucode_tlv_capa_t)64,
 	IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS		= (__force iwl_ucode_tlv_capa_t)65,
 	IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)67,
@@ -541,7 +545,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
 } __packed;
 
 /**
- * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
+ * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
  *
  * @version: version of the TLV - currently 0
  * @monitor_mode: &enum iwl_fw_dbg_monitor_mode
@@ -556,7 +560,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
  *
  * This parses IWL_UCODE_TLV_FW_DBG_DEST
  */
-struct iwl_fw_dbg_dest_tlv {
+struct iwl_fw_dbg_dest_tlv_v1 {
 	u8 version;
 	u8 monitor_mode;
 	u8 size_power;
@@ -570,6 +574,26 @@ struct iwl_fw_dbg_dest_tlv {
 	struct iwl_fw_dbg_reg_op reg_ops[0];
 } __packed;
 
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWL_LDBG_M2S_BUF_SIZE_MSK	0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWL_LDBG_M2S_BUF_BA_MSK		0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWL_M2S_UNIT_SIZE			0x100
+
+struct iwl_fw_dbg_dest_tlv {
+	u8 version;
+	u8 monitor_mode;
+	u8 size_power;
+	u8 reserved;
+	__le32 cfg_reg;
+	__le32 write_ptr_reg;
+	__le32 wrap_count;
+	u8 base_shift;
+	u8 size_shift;
+	struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
 struct iwl_fw_dbg_conf_hcmd {
 	u8 id;
 	u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 985496cc01d0..b23ffe12ad84 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -284,7 +284,7 @@ struct iwl_fw {
 	struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
 	u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
 
-	struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+	struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
 	struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
 	size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index bfe5316bbb6a..c39fe84bb4c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -58,10 +58,12 @@
 #include "iwl-drv.h"
 #include "runtime.h"
 #include "dbg.h"
+#include "debugfs.h"
 
 void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
-			 const struct iwl_fw *fw,
-			 const struct iwl_fw_runtime_ops *ops, void *ops_ctx)
+			const struct iwl_fw *fw,
+			const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+			struct dentry *dbgfs_dir)
 {
 	memset(fwrt, 0, sizeof(*fwrt));
 	fwrt->trans = trans;
@@ -71,5 +73,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
 	fwrt->ops = ops;
 	fwrt->ops_ctx = ops_ctx;
 	INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
+	iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt)
+{
+	iwl_fw_cancel_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 50cfb6d795a5..e25c049f980f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -134,11 +134,21 @@ struct iwl_fw_runtime {
 		/* ts of the beginning of a non-collect fw dbg data period */
 		unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
 	} dump;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct {
+		struct delayed_work wk;
+		u32 delay;
+		u64 seq;
+	} timestamp;
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
 };
 
 void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
-			 const struct iwl_fw *fw,
-			 const struct iwl_fw_runtime_ops *ops, void *ops_ctx);
+			const struct iwl_fw *fw,
+			const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+			struct dentry *dbgfs_dir);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
 
 static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
 					    enum iwl_ucode_type cur_fw_img)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 76675736ba4f..fb4b6442b4d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -63,8 +63,8 @@
 #include "runtime.h"
 #include "fw/api/commands.h"
 
-static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt,
-				      struct iwl_rx_packet *pkt)
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+				       struct iwl_rx_packet *pkt)
 {
 	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
 	int i, lmac;
@@ -143,8 +143,8 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
-		iwl_parse_shared_mem_a000(fwrt, pkt);
+	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+		iwl_parse_shared_mem_22000(fwrt, pkt);
 	else
 		iwl_parse_shared_mem(fwrt, pkt);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e21e46cf6f9a..258d439bb0a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -89,7 +89,7 @@ enum iwl_device_family {
 	IWL_DEVICE_FAMILY_7000,
 	IWL_DEVICE_FAMILY_8000,
 	IWL_DEVICE_FAMILY_9000,
-	IWL_DEVICE_FAMILY_A000,
+	IWL_DEVICE_FAMILY_22000,
 };
 
 /*
@@ -266,7 +266,7 @@ struct iwl_tt_params {
 #define OTP_LOW_IMAGE_SIZE_FAMILY_7000	(16 * 512 * sizeof(u16)) /* 16 KB */
 #define OTP_LOW_IMAGE_SIZE_FAMILY_8000	(32 * 512 * sizeof(u16)) /* 32 KB */
 #define OTP_LOW_IMAGE_SIZE_FAMILY_9000	OTP_LOW_IMAGE_SIZE_FAMILY_8000
-#define OTP_LOW_IMAGE_SIZE_FAMILY_A000	OTP_LOW_IMAGE_SIZE_FAMILY_9000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_22000	OTP_LOW_IMAGE_SIZE_FAMILY_9000
 
 struct iwl_eeprom_params {
 	const u8 regulatory_bands[7];
@@ -330,7 +330,7 @@ struct iwl_pwr_tx_backoff {
  * @vht_mu_mimo_supported: VHT MU-MIMO support
  * @rf_id: need to read rf_id to determine the firmware image
  * @integrated: discrete or integrated
- * @gen2: a000 and on transport operation
+ * @gen2: 22000 and on transport operation
  * @cdb: CDB support
  * @nvm_type: see &enum iwl_nvm_type
  * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
@@ -477,13 +477,13 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwla000_2ac_cfg_jf;
-extern const struct iwl_cfg iwla000_2ax_cfg_hr;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 7f16dcce0995..9518a82f44c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -95,7 +95,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
 	TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
 	TP_STRUCT__entry(
 		DEV_ENTRY
-
+		__field(void *, skbaddr)
 		__field(size_t, framelen)
 		__dynamic_array(u8, tfd, tfdlen)
 
@@ -110,6 +110,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
 	),
 	TP_fast_assign(
 		DEV_ASSIGN;
+		__entry->skbaddr = skb;
 		__entry->framelen = buf0_len;
 		if (hdr_len > 0)
 			__entry->framelen += skb->len - hdr_len;
@@ -120,9 +121,9 @@ TRACE_EVENT(iwlwifi_dev_tx,
 				      __get_dynamic_array(buf1),
 				      skb->len - hdr_len);
 	),
-	TP_printk("[%s] TX %.2x (%zu bytes)",
+	TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p",
 		  __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
-		  __entry->framelen)
+		  __entry->framelen, __entry->skbaddr)
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_error,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4b224d7d967c..9c4a7f648a44 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -296,7 +296,12 @@ struct iwl_firmware_pieces {
 	u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
 
 	/* FW debug data parsed for driver usage */
-	struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+	bool dbg_dest_tlv_init;
+	u8 *dbg_dest_ver;
+	union {
+		struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+		struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+	};
 	struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
 	size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
@@ -919,27 +924,60 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 			minor = le32_to_cpup(ptr++);
 			local_comp = le32_to_cpup(ptr);
 
-			snprintf(drv->fw.fw_version,
-				 sizeof(drv->fw.fw_version), "%u.%u.%u",
-				 major, minor, local_comp);
+			if (major >= 35)
+				snprintf(drv->fw.fw_version,
+					 sizeof(drv->fw.fw_version),
+					"%u.%08x.%u", major, minor, local_comp);
+			else
+				snprintf(drv->fw.fw_version,
+					 sizeof(drv->fw.fw_version),
+					"%u.%u.%u", major, minor, local_comp);
 			break;
 			}
 		case IWL_UCODE_TLV_FW_DBG_DEST: {
-			struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
+			struct iwl_fw_dbg_dest_tlv *dest = NULL;
+			struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+			u8 mon_mode;
+
+			pieces->dbg_dest_ver = (u8 *)tlv_data;
+			if (*pieces->dbg_dest_ver == 1) {
+				dest = (void *)tlv_data;
+			} else if (*pieces->dbg_dest_ver == 0) {
+				dest_v1 = (void *)tlv_data;
+			} else {
+				IWL_ERR(drv,
+					"The version is %d, and it is invalid\n",
+					*pieces->dbg_dest_ver);
+				break;
+			}
 
-			if (pieces->dbg_dest_tlv) {
+			if (pieces->dbg_dest_tlv_init) {
 				IWL_ERR(drv,
 					"dbg destination ignored, already exists\n");
 				break;
 			}
 
-			pieces->dbg_dest_tlv = dest;
+			pieces->dbg_dest_tlv_init = true;
+
+			if (dest_v1) {
+				pieces->dbg_dest_tlv_v1 = dest_v1;
+				mon_mode = dest_v1->monitor_mode;
+			} else {
+				pieces->dbg_dest_tlv = dest;
+				mon_mode = dest->monitor_mode;
+			}
+
 			IWL_INFO(drv, "Found debug destination: %s\n",
-				 get_fw_dbg_mode_string(dest->monitor_mode));
+				 get_fw_dbg_mode_string(mon_mode));
+
+			drv->fw.dbg_dest_reg_num = (dest_v1) ?
+				tlv_len -
+				offsetof(struct iwl_fw_dbg_dest_tlv_v1,
+					 reg_ops) :
+				tlv_len -
+				offsetof(struct iwl_fw_dbg_dest_tlv,
+					 reg_ops);
 
-			drv->fw.dbg_dest_reg_num =
-				tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
-						   reg_ops);
 			drv->fw.dbg_dest_reg_num /=
 				sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
 
@@ -948,7 +986,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 		case IWL_UCODE_TLV_FW_DBG_CONF: {
 			struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
 
-			if (!pieces->dbg_dest_tlv) {
+			if (!pieces->dbg_dest_tlv_init) {
 				IWL_ERR(drv,
 					"Ignore dbg config %d - no destination configured\n",
 					conf->id);
@@ -1335,15 +1373,51 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 		if (iwl_alloc_ucode(drv, pieces, i))
 			goto out_free_fw;
 
-	if (pieces->dbg_dest_tlv) {
-		drv->fw.dbg_dest_tlv =
-			kmemdup(pieces->dbg_dest_tlv,
-				sizeof(*pieces->dbg_dest_tlv) +
-				sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
-				drv->fw.dbg_dest_reg_num, GFP_KERNEL);
+	if (pieces->dbg_dest_tlv_init) {
+		size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
+			sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+			drv->fw.dbg_dest_reg_num;
+
+		drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
 
 		if (!drv->fw.dbg_dest_tlv)
 			goto out_free_fw;
+
+		if (*pieces->dbg_dest_ver == 0) {
+			memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+			       dbg_dest_size);
+		} else {
+			struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
+				drv->fw.dbg_dest_tlv;
+
+			dest_tlv->version = pieces->dbg_dest_tlv->version;
+			dest_tlv->monitor_mode =
+				pieces->dbg_dest_tlv->monitor_mode;
+			dest_tlv->size_power =
+				pieces->dbg_dest_tlv->size_power;
+			dest_tlv->wrap_count =
+				pieces->dbg_dest_tlv->wrap_count;
+			dest_tlv->write_ptr_reg =
+				pieces->dbg_dest_tlv->write_ptr_reg;
+			dest_tlv->base_shift =
+				pieces->dbg_dest_tlv->base_shift;
+			memcpy(dest_tlv->reg_ops,
+			       pieces->dbg_dest_tlv->reg_ops,
+			       sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+			       drv->fw.dbg_dest_reg_num);
+
+			/* In version 1 of the destination tlv, which is
+			 * relevant for internal buffer exclusively,
+			 * the base address is part of given with the length
+			 * of the buffer, and the size shift is give instead of
+			 * end shift. We now store these values in base_reg,
+			 * and end shift, and when dumping the data we'll
+			 * manipulate it for extracting both the length and
+			 * base address */
+			dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
+			dest_tlv->end_shift =
+				pieces->dbg_dest_tlv->size_shift;
+		}
 	}
 
 	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 66e5db41e559..11789ffb6512 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -121,7 +121,7 @@
 #define FH_MEM_CBBC_16_19_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xC00)
 #define FH_MEM_CBBC_20_31_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xB20)
 #define FH_MEM_CBBC_20_31_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xB80)
-/* a000 TFD table address, 64 bit */
+/* 22000 TFD table address, 64 bit */
 #define TFH_TFDQ_CBB_TABLE			(0x1C00)
 
 /* Find TFD CB base pointer for given queue */
@@ -140,7 +140,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
 	return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
 }
 
-/* a000 configuration registers */
+/* 22000 configuration registers */
 
 /*
  * TFH Configuration register.
@@ -697,8 +697,8 @@ struct iwl_tfh_tb {
  * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
  * Both driver and device share these circular buffers, each of which must be
  * contiguous 256 TFDs.
- * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
- * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
  *
  * Driver must indicate the physical address of the base of each
  * circular buffer via the FH_MEM_CBBC_QUEUE registers.
@@ -750,10 +750,10 @@ struct iwl_tfh_tfd {
 /**
  * struct iwlagn_schedq_bc_tbl scheduler byte count table
  *	base physical address provided by SCD_DRAM_BASE_ADDR
- * For devices up to a000:
+ * For devices up to 22000:
  * @tfd_offset  0-12 - tx command byte count
  *		12-16 - station index
- * For a000 and on:
+ * For 22000 and on:
  * @tfd_offset  0-12 - tx command byte count
  *		12-13 - number of 64 byte chunks
  *		14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 921cab9e2d73..c25ed1a0bbb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -551,7 +551,7 @@ struct iwl_trans_ops {
 			   unsigned int queue_wdg_timeout);
 	void (*txq_disable)(struct iwl_trans *trans, int queue,
 			    bool configure_scd);
-	/* a000 functions */
+	/* 22000 functions */
 	int (*txq_alloc)(struct iwl_trans *trans,
 			 struct iwl_tx_queue_cfg_cmd *cmd,
 			 int cmd_id,
@@ -579,6 +579,7 @@ struct iwl_trans_ops {
 	void (*configure)(struct iwl_trans *trans,
 			  const struct iwl_trans_config *trans_cfg);
 	void (*set_pmi)(struct iwl_trans *trans, bool state);
+	void (*sw_reset)(struct iwl_trans *trans);
 	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
 	void (*release_nic_access)(struct iwl_trans *trans,
 				   unsigned long *flags);
@@ -744,7 +745,7 @@ struct iwl_trans {
 	struct lockdep_map sync_cmd_lockdep_map;
 #endif
 
-	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+	const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
 	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
 	u8 dbg_dest_reg_num;
@@ -1124,6 +1125,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
 		trans->ops->set_pmi(trans, state);
 }
 
+static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
+{
+	if (trans->ops->sw_reset)
+		trans->ops->sw_reset(trans);
+}
+
 static inline void
 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
 {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index a47635c32c11..9ffd21918b5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@
 obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
-iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
 iwlmvm-y += power.o coex.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 79c80f181f7d..890dbfff3a06 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -7,7 +7,6 @@
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +33,6 @@
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -514,36 +512,17 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-	if (!iwl_mvm_has_new_ats_coex_api(mvm)) {
-		struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data;
-
-		mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0];
-		mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1];
-		mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2];
-		mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3];
-		mvm->last_bt_notif.msg_idx = v4->msg_idx;
-		mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance;
-		mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut;
-		mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut;
-		mvm->last_bt_notif.bt_activity_grading =
-			v4->bt_activity_grading;
-		mvm->last_bt_notif.ttc_status = v4->ttc_status;
-		mvm->last_bt_notif.rrc_status = v4->rrc_status;
-	} else {
-		/* save this notification for future use: rssi fluctuations */
-		memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
-	}
-
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n",
-		       mvm->last_bt_notif.bt_ci_compliance);
+	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
 	IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-		       le32_to_cpu(mvm->last_bt_notif.primary_ch_lut));
+		       le32_to_cpu(notif->primary_ch_lut));
 	IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-		       le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut));
+		       le32_to_cpu(notif->secondary_ch_lut));
 	IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-		       le32_to_cpu(mvm->last_bt_notif.bt_activity_grading));
+		       le32_to_cpu(notif->bt_activity_grading));
 
+	/* remember this notification for future use: rssi fluctuations */
+	memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b1f73dcabd31..0e6cf39285f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -429,231 +429,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
 	return err;
 }
 
-enum iwl_mvm_tcp_packet_type {
-	MVM_TCP_TX_SYN,
-	MVM_TCP_RX_SYNACK,
-	MVM_TCP_TX_DATA,
-	MVM_TCP_RX_ACK,
-	MVM_TCP_RX_WAKE,
-	MVM_TCP_TX_FIN,
-};
-
-static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
-{
-	__sum16 check = tcp_v4_check(len, saddr, daddr, 0);
-	return cpu_to_le16(be16_to_cpu((__force __be16)check));
-}
-
-static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
-				     struct cfg80211_wowlan_tcp *tcp,
-				     void *_pkt, u8 *mask,
-				     __le16 *pseudo_hdr_csum,
-				     enum iwl_mvm_tcp_packet_type ptype)
-{
-	struct {
-		struct ethhdr eth;
-		struct iphdr ip;
-		struct tcphdr tcp;
-		u8 data[];
-	} __packed *pkt = _pkt;
-	u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
-	int i;
-
-	pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
-	pkt->ip.version = 4;
-	pkt->ip.ihl = 5;
-	pkt->ip.protocol = IPPROTO_TCP;
-
-	switch (ptype) {
-	case MVM_TCP_TX_SYN:
-	case MVM_TCP_TX_DATA:
-	case MVM_TCP_TX_FIN:
-		memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
-		memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
-		pkt->ip.ttl = 128;
-		pkt->ip.saddr = tcp->src;
-		pkt->ip.daddr = tcp->dst;
-		pkt->tcp.source = cpu_to_be16(tcp->src_port);
-		pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
-		/* overwritten for TX SYN later */
-		pkt->tcp.doff = sizeof(struct tcphdr) / 4;
-		pkt->tcp.window = cpu_to_be16(65000);
-		break;
-	case MVM_TCP_RX_SYNACK:
-	case MVM_TCP_RX_ACK:
-	case MVM_TCP_RX_WAKE:
-		memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
-		memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
-		pkt->ip.saddr = tcp->dst;
-		pkt->ip.daddr = tcp->src;
-		pkt->tcp.source = cpu_to_be16(tcp->dst_port);
-		pkt->tcp.dest = cpu_to_be16(tcp->src_port);
-		break;
-	default:
-		WARN_ON(1);
-		return;
-	}
-
-	switch (ptype) {
-	case MVM_TCP_TX_SYN:
-		/* firmware assumes 8 option bytes - 8 NOPs for now */
-		memset(pkt->data, 0x01, 8);
-		ip_tot_len += 8;
-		pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
-		pkt->tcp.syn = 1;
-		break;
-	case MVM_TCP_TX_DATA:
-		ip_tot_len += tcp->payload_len;
-		memcpy(pkt->data, tcp->payload, tcp->payload_len);
-		pkt->tcp.psh = 1;
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_TX_FIN:
-		pkt->tcp.fin = 1;
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_RX_SYNACK:
-		pkt->tcp.syn = 1;
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_RX_ACK:
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_RX_WAKE:
-		ip_tot_len += tcp->wake_len;
-		pkt->tcp.psh = 1;
-		pkt->tcp.ack = 1;
-		memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
-		break;
-	}
-
-	switch (ptype) {
-	case MVM_TCP_TX_SYN:
-	case MVM_TCP_TX_DATA:
-	case MVM_TCP_TX_FIN:
-		pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
-		pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
-		break;
-	case MVM_TCP_RX_WAKE:
-		for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
-			u8 tmp = tcp->wake_mask[i];
-			mask[i + 6] |= tmp << 6;
-			if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
-				mask[i + 7] = tmp >> 2;
-		}
-		/* fall through for ethernet/IP/TCP headers mask */
-	case MVM_TCP_RX_SYNACK:
-	case MVM_TCP_RX_ACK:
-		mask[0] = 0xff; /* match ethernet */
-		/*
-		 * match ethernet, ip.version, ip.ihl
-		 * the ip.ihl half byte is really masked out by firmware
-		 */
-		mask[1] = 0x7f;
-		mask[2] = 0x80; /* match ip.protocol */
-		mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
-		mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
-		mask[5] = 0x80; /* match tcp flags */
-		/* leave rest (0 or set for MVM_TCP_RX_WAKE) */
-		break;
-	};
-
-	*pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
-					    pkt->ip.saddr, pkt->ip.daddr);
-}
-
-static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
-					struct ieee80211_vif *vif,
-					struct cfg80211_wowlan_tcp *tcp)
-{
-	struct iwl_wowlan_remote_wake_config *cfg;
-	struct iwl_host_cmd cmd = {
-		.id = REMOTE_WAKE_CONFIG_CMD,
-		.len = { sizeof(*cfg), },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	int ret;
-
-	if (!tcp)
-		return 0;
-
-	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-	if (!cfg)
-		return -ENOMEM;
-	cmd.data[0] = cfg;
-
-	cfg->max_syn_retries = 10;
-	cfg->max_data_retries = 10;
-	cfg->tcp_syn_ack_timeout = 1; /* seconds */
-	cfg->tcp_ack_timeout = 1; /* seconds */
-
-	/* SYN (TX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->syn_tx.data, NULL,
-		&cfg->syn_tx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_TX_SYN);
-	cfg->syn_tx.info.tcp_payload_length = 0;
-
-	/* SYN/ACK (RX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
-		&cfg->synack_rx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_RX_SYNACK);
-	cfg->synack_rx.info.tcp_payload_length = 0;
-
-	/* KEEPALIVE/ACK (TX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->keepalive_tx.data, NULL,
-		&cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_TX_DATA);
-	cfg->keepalive_tx.info.tcp_payload_length =
-		cpu_to_le16(tcp->payload_len);
-	cfg->sequence_number_offset = tcp->payload_seq.offset;
-	/* length must be 0..4, the field is little endian */
-	cfg->sequence_number_length = tcp->payload_seq.len;
-	cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
-	cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
-	if (tcp->payload_tok.len) {
-		cfg->token_offset = tcp->payload_tok.offset;
-		cfg->token_length = tcp->payload_tok.len;
-		cfg->num_tokens =
-			cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
-		memcpy(cfg->tokens, tcp->payload_tok.token_stream,
-		       tcp->tokens_size);
-	} else {
-		/* set tokens to max value to almost never run out */
-		cfg->num_tokens = cpu_to_le16(65535);
-	}
-
-	/* ACK (RX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->keepalive_ack_rx.data,
-		cfg->keepalive_ack_rx.rx_mask,
-		&cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_RX_ACK);
-	cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
-
-	/* WAKEUP (RX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
-		&cfg->wake_rx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_RX_WAKE);
-	cfg->wake_rx.info.tcp_payload_length =
-		cpu_to_le16(tcp->wake_len);
-
-	/* FIN */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->fin_tx.data, NULL,
-		&cfg->fin_tx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_TX_FIN);
-	cfg->fin_tx.info.tcp_payload_length = 0;
-
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
-	kfree(cfg);
-
-	return ret;
-}
-
 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 				struct ieee80211_sta *ap_sta)
 {
@@ -1082,12 +857,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
 	if (ret)
 		return ret;
 
-	ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
-	if (ret)
-		return ret;
-
-	ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
-	return ret;
+	return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
 }
 
 static int
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 2ff594f11259..a7892c1254a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -425,6 +425,50 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+	struct iwl_mvm *mvm = lq_sta->pers.drv;
+	static const size_t bufsz = 2048;
+	char *buff;
+	int desc = 0;
+	ssize_t ret;
+
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+
+	desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+			  lq_sta->pers.sta_id);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "fixed rate 0x%X\n",
+			  lq_sta->pers.dbg_fixed_rate);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "A-MPDU size limit %d\n",
+			  lq_sta->pers.dbg_agg_frame_count_lim);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "valid_tx_ant %s%s%s\n",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "last tx rate=0x%X ",
+			  lq_sta->last_rate_n_flags);
+
+	desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+				     lq_sta->last_rate_n_flags);
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
 static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
 						char __user *user_buf,
 						size_t count, loff_t *ppos)
@@ -470,8 +514,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
 }
 
 static
-int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
-			   struct iwl_bt_coex_profile_notif *notif, char *buf,
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
 			   int pos, int bufsz)
 {
 	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
@@ -525,12 +568,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
 	BT_MBOX_PRINT(3, INBAND_P, false);
 	BT_MBOX_PRINT(3, MSG_TYPE_2, false);
 	BT_MBOX_PRINT(3, SSN_2, false);
-	BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm));
-
-	if (iwl_mvm_has_new_ats_coex_api(mvm)) {
-		BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false);
-		BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true);
-	}
+	BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
 	return pos;
 }
@@ -549,7 +587,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
 	mutex_lock(&mvm->mutex);
 
-	pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz);
+	pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
 			 notif->bt_ci_compliance);
@@ -721,6 +759,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
 
 	mutex_lock(&mvm->mutex);
 
+	if (iwl_mvm_firmware_running(mvm))
+		iwl_mvm_request_statistics(mvm, false);
+
 	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
 			 "Statistics_Rx - OFDM");
 	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -936,7 +977,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
 			continue;
 		pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
 				 (int)(ARRAY_SIZE(stats->last_rates) - i));
-		pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+		pos += rs_pretty_print_rate(pos, endpos - pos,
+					    stats->last_rates[idx]);
 	}
 	spin_unlock_bh(&mvm->drv_stats_lock);
 
@@ -1179,7 +1221,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
 					      loff_t *ppos)
 {
 	struct iwl_trans *trans = mvm->trans;
-	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
 	struct iwl_continuous_record_cmd cont_rec = {};
 	int ret, rec_mode;
 
@@ -1603,6 +1645,19 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
 #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
 	MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
 
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do {	\
+		if (!debugfs_create_file(alias, mode, parent, sta,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+	MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
 static ssize_t
 iwl_dbgfs_prph_reg_read(struct file *file,
 			char __user *user_buf,
@@ -1687,6 +1742,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
 MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
 MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
@@ -1851,6 +1907,21 @@ static const struct file_operations iwl_dbgfs_mem_ops = {
 	.llseek = default_llseek,
 };
 
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct dentry *dir)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	if (iwl_mvm_has_tlc_offload(mvm))
+		MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR);
+
+	return;
+err:
+	IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 {
 	struct dentry *bcast_dir __maybe_unused;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c0de7bb86cf7..0920be637b57 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -297,7 +297,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 	if (ret) {
 		struct iwl_trans *trans = mvm->trans;
 
-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
 			IWL_ERR(mvm,
 				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
 				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -923,11 +923,11 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
 
 	ret = iwl_run_init_mvm_ucode(mvm, false);
 
-	if (iwlmvm_mod_params.init_dbg)
-		return 0;
-
 	if (ret) {
 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+		if (iwlmvm_mod_params.init_dbg)
+			return 0;
 		return ret;
 	}
 
@@ -998,9 +998,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 		goto error;
 
 	/* Init RSS configuration */
-	/* TODO - remove a000 disablement when we have RXQ config API */
+	/* TODO - remove 22000 disablement when we have RXQ config API */
 	if (iwl_mvm_has_new_rx_api(mvm) &&
-	    mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
+	    mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
 		ret = iwl_send_rss_cfg_cmd(mvm);
 		if (ret) {
 			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1111,7 +1111,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
 	return 0;
  error:
-	if (!iwlmvm_mod_params.init_dbg)
+	if (!iwlmvm_mod_params.init_dbg || !ret)
 		iwl_mvm_stop_device(mvm);
 	return ret;
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3e92a117c0b8..8aed40a8bc38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -114,29 +114,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
 	},
 };
 
-#ifdef CONFIG_PM_SLEEP
-static const struct nl80211_wowlan_tcp_data_token_feature
-iwl_mvm_wowlan_tcp_token_feature = {
-	.min_len = 0,
-	.max_len = 255,
-	.bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
-};
-
-static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
-	.tok = &iwl_mvm_wowlan_tcp_token_feature,
-	.data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
-			    sizeof(struct ethhdr) -
-			    sizeof(struct iphdr) -
-			    sizeof(struct tcphdr),
-	.data_interval_max = 65535, /* __le16 in API */
-	.wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
-			    sizeof(struct ethhdr) -
-			    sizeof(struct iphdr) -
-			    sizeof(struct tcphdr),
-	.seq = true,
-};
-#endif
-
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 /*
  * Use the reserved field to indicate magic values.
@@ -443,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
 	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+	if (iwl_mvm_has_tlc_offload(mvm)) {
+		ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+		ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	}
+
 	if (iwl_mvm_has_new_rx_api(mvm))
 		ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
 
@@ -477,7 +460,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 	/* this is the case for CCK frames, it's better (only 8) for OFDM */
 	hw->radiotap_timestamp.accuracy = 22;
 
-	hw->rate_control_algorithm = "iwl-mvm-rs";
+	if (!iwl_mvm_has_tlc_offload(mvm))
+		hw->rate_control_algorithm = RS_NAME;
+
 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
@@ -702,7 +687,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
 		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
 		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
-		mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
 		hw->wiphy->wowlan = &mvm->wowlan;
 	}
 #endif
@@ -3216,6 +3200,10 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
 			   duration, type);
 
+	/*
+	 * Flush the done work, just in case it's still pending, so that
+	 * the work it does can complete and we can accept new frames.
+	 */
 	flush_work(&mvm->roc_done_wk);
 
 	mutex_lock(&mvm->mutex);
@@ -3813,7 +3801,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
 		mvm->noa_duration = noa_duration;
 		mvm->noa_vif = vif;
 
-		return iwl_mvm_update_quotas(mvm, false, NULL);
+		return iwl_mvm_update_quotas(mvm, true, NULL);
 	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
 		/* must be associated client vif - ignore authorized */
 		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
@@ -4301,7 +4289,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 			   mvm->trans->num_rx_queues);
 
 	/* TODO - remove this when we have RXQ config API */
-	if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+	if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
 		qmask = BIT(0);
 		if (notif->sync)
 			atomic_set(&mvm->queue_sync_counter, 1);
@@ -4414,4 +4402,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
 #endif
 	.get_survey = iwl_mvm_mac_get_survey,
 	.sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
 };
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 55ab5349dd40..2d28e0804218 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1248,7 +1248,7 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
 static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
 {
 	/* TODO - better define this */
-	return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000;
+	return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
 }
 
 static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1272,16 +1272,16 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
 			  IWL_UCODE_TLV_API_NEW_RX_STATS);
 }
 
-static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
 {
 	return fw_has_api(&mvm->fw->ucode_capa,
-			  IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL);
+			  IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
 }
 
-static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
 {
-	return fw_has_api(&mvm->fw->ucode_capa,
-			  IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
 }
 
 static inline struct agg_tx_status *
@@ -1600,9 +1600,9 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 /* rate scaling */
 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
-int rs_pretty_print_rate(char *buf, const u32 rate);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-			 struct iwl_lq_sta *lq_sta,
+			 struct iwl_mvm_sta *mvmsta,
 			 struct ieee80211_rx_status *rx_status);
 
 /* power management */
@@ -1882,5 +1882,11 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
 
 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct dentry *dir);
+#endif
 
 #endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 45470b6b351a..5d525a0023dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -127,11 +127,8 @@ static int __init iwl_mvm_init(void)
 	}
 
 	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
-
-	if (ret) {
+	if (ret)
 		pr_err("Unable to register MVM op_mode: %d\n", ret);
-		iwl_mvm_rate_control_unregister();
-	}
 
 	return ret;
 }
@@ -605,7 +602,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	mvm->fw = fw;
 	mvm->hw = hw;
 
-	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm);
+	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
+			    dbgfs_dir);
 
 	mvm->init_status = 0;
 
@@ -750,7 +748,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	mutex_lock(&mvm->mutex);
 	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
 	err = iwl_run_init_mvm_ucode(mvm, true);
-	if (!iwlmvm_mod_params.init_dbg)
+	if (!iwlmvm_mod_params.init_dbg || !err)
 		iwl_mvm_stop_device(mvm);
 	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
 	mutex_unlock(&mvm->mutex);
@@ -804,6 +802,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	iwl_mvm_leds_exit(mvm);
 	iwl_mvm_thermal_exit(mvm);
  out_free:
+	iwl_fw_runtime_exit(&mvm->fwrt);
 	iwl_fw_flush_dump(&mvm->fwrt);
 
 	if (iwlmvm_mod_params.init_dbg)
@@ -844,7 +843,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
 	kfree(mvm->d3_resume_sram);
 #endif
-
+	iwl_fw_runtime_exit(&mvm->fwrt);
 	iwl_trans_op_mode_leave(mvm->trans);
 
 	iwl_phy_db_free(mvm->phy_db);
@@ -1021,6 +1020,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
 		iwl_mvm_rx_queue_notif(mvm, rxb, 0);
 	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
 		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+	else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
+		iwl_mvm_tlc_update_notif(mvm, pkt);
 	else
 		iwl_mvm_rx_common(mvm, rxb, pkt);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index b4a0264329b7..690559bdf421 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -202,6 +202,10 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
 
 	lockdep_assert_held(&mvm->mutex);
 
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+		return 0;
+
 	/* update all upon completion */
 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
 		return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 000000000000..55d1274c6092
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+	switch (sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_160:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
+	case IEEE80211_STA_RX_BW_80:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
+	case IEEE80211_STA_RX_BW_40:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
+	case IEEE80211_STA_RX_BW_20:
+	default:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
+	}
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+	u8 fw_chains = 0;
+
+	if (chains & ANT_A)
+		fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+	if (chains & ANT_B)
+		fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+	if (chains & ANT_C)
+		fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
+
+	return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	u8 supp = 0;
+
+	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+		supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
+	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+		supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
+	if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+		supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
+	if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+		supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
+
+	return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+				  struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	bool vht_ena = vht_cap && vht_cap->vht_supported;
+	u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
+		    IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
+		    IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
+
+	if (mvm->cfg->ht_params->stbc &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+	     (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+	if (mvm->cfg->ht_params->ldpc &&
+	    ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+	     (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
+
+	return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+				   int nss)
+{
+	u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+		(0x3 << (2 * (nss - 1)));
+	rx_mcs >>= (2 * (nss - 1));
+
+	switch (rx_mcs) {
+	case IEEE80211_VHT_MCS_SUPPORT_0_7:
+		return IWL_TLC_MNG_HT_RATE_MCS7;
+	case IEEE80211_VHT_MCS_SUPPORT_0_8:
+		return IWL_TLC_MNG_HT_RATE_MCS8;
+	case IEEE80211_VHT_MCS_SUPPORT_0_9:
+		return IWL_TLC_MNG_HT_RATE_MCS9;
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+	return 0;
+}
+
+static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
+					struct ieee80211_sta_vht_cap *vht_cap,
+					struct iwl_tlc_config_cmd *cmd)
+{
+	u16 supp;
+	int i, highest_mcs;
+
+	for (i = 0; i < sta->rx_nss; i++) {
+		if (i == MAX_RS_ANT_NUM)
+			break;
+
+		highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+		if (!highest_mcs)
+			continue;
+
+		supp = BIT(highest_mcs + 1) - 1;
+		if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+			supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+		cmd->ht_supp_rates[i] = cpu_to_le16(supp);
+	}
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+				 struct ieee80211_supported_band *sband,
+				 struct iwl_tlc_config_cmd *cmd)
+{
+	int i;
+	unsigned long tmp;
+	unsigned long supp; /* must be unsigned long for for_each_set_bit */
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+	/* non HT rates */
+	supp = 0;
+	tmp = sta->supp_rates[sband->band];
+	for_each_set_bit(i, &tmp, BITS_PER_LONG)
+		supp |= BIT(sband->bitrates[i].hw_value);
+
+	cmd->non_ht_supp_rates = cpu_to_le16(supp);
+	cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+	/* HT/VHT rates */
+	if (vht_cap && vht_cap->vht_supported) {
+		cmd->mode = IWL_TLC_MNG_MODE_VHT;
+		rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+	} else if (ht_cap && ht_cap->ht_supported) {
+		cmd->mode = IWL_TLC_MNG_MODE_HT;
+		cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+		cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+	}
+}
+
+static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
+{
+	u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
+	struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
+		.sta_id = sta_id,
+		.flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
+		.interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
+	};
+	int ret;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+	struct iwl_tlc_update_notif *notif;
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_lq_sta_rs_fw *lq_sta;
+
+	notif = (void *)pkt->data;
+	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+
+	if (!mvmsta) {
+		IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+			notif->sta_id);
+		return;
+	}
+
+	lq_sta = &mvmsta->lq_sta.rs_fw;
+
+	if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
+		lq_sta->last_rate_n_flags =
+			le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
+		IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+			       lq_sta->last_rate_n_flags);
+	}
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		     enum nl80211_band band)
+{
+	struct ieee80211_hw *hw = mvm->hw;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+	u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+	struct ieee80211_supported_band *sband;
+	struct iwl_tlc_config_cmd cfg_cmd = {
+		.sta_id = mvmsta->sta_id,
+		.max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
+		.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+		.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+		.max_supp_ss = sta->rx_nss,
+		.max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
+		.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+	};
+	int ret;
+
+	memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	iwl_mvm_reset_frame_stats(mvm);
+#endif
+	sband = hw->wiphy->bands[band];
+	rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+
+	rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			bool enable)
+{
+	/* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+	IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+	return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+	lq_sta->pers.drv = mvm;
+	lq_sta->pers.sta_id = mvmsta->sta_id;
+	lq_sta->pers.chains = 0;
+	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+	lq_sta->pers.last_rssi = S8_MIN;
+	lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index c69515ed72df..60abb0084ee5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -42,8 +42,6 @@
 #include "mvm.h"
 #include "debugfs.h"
 
-#define RS_NAME "iwl-mvm-rs"
-
 #define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
 
 /* Calculations of success ratio are done in fixed point where 12800 is 100%.
@@ -809,7 +807,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm,
 		return -EINVAL;
 
 	if (tbl->column != RS_COLUMN_INVALID) {
-		struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
+		struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
 
 		pers->tx_stats[tbl->column][scale_index].total += attempts;
 		pers->tx_stats[tbl->column][scale_index].success += successes;
@@ -1206,7 +1204,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
 	u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
 	/* Treat uninitialized rate scaling data same as non-existing. */
 	if (!lq_sta) {
@@ -1416,13 +1414,13 @@ done:
 /*
  * mac80211 sends us Tx status
  */
-static void rs_mac80211_tx_status(void *mvm_r,
-				  struct ieee80211_supported_band *sband,
-				  struct ieee80211_sta *sta, void *priv_sta,
-				  struct sk_buff *skb)
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+				      struct ieee80211_supported_band *sband,
+				      struct ieee80211_sta *sta, void *priv_sta,
+				      struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+	struct iwl_op_mode *op_mode = mvm_r;
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
@@ -1877,12 +1875,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
 	struct rs_rate *rate = &search_tbl->rate;
 	const struct rs_tx_column *column = &rs_tx_columns[col_id];
 	const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	unsigned long rate_mask = 0;
 	u32 rate_idx = 0;
 
-	memcpy(search_tbl, tbl, sz);
+	memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
 
 	rate->sgi = column->sgi;
 	rate->ant = column->ant;
@@ -2787,9 +2783,10 @@ out:
 
 /* Save info about RSSI of last Rx */
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-			 struct iwl_lq_sta *lq_sta,
+			 struct iwl_mvm_sta *mvmsta,
 			 struct ieee80211_rx_status *rx_status)
 {
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 	int i;
 
 	lq_sta->pers.chains = rx_status->chains;
@@ -2858,15 +2855,15 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
 	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
 }
 
-static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
-			struct ieee80211_tx_rate_control *txrc)
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+			    void *mvm_sta,
+			    struct ieee80211_tx_rate_control *txrc)
 {
-	struct sk_buff *skb = txrc->skb;
-	struct iwl_op_mode *op_mode __maybe_unused =
-			(struct iwl_op_mode *)mvm_r;
+	struct iwl_op_mode *op_mode = mvm_r;
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+	struct sk_buff *skb = txrc->skb;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct iwl_lq_sta *lq_sta = mvm_sta;
+	struct iwl_lq_sta *lq_sta;
 	struct rs_rate *optimal_rate;
 	u32 last_ucode_rate;
 
@@ -2878,18 +2875,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 		mvm_sta = NULL;
 	}
 
-	/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
-
-	/* Treat uninitialized rate scaling data same as non-existing. */
-	if (lq_sta && !lq_sta->pers.drv) {
-		IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
-		mvm_sta = NULL;
-	}
-
 	/* Send management frames and NO_ACK data using lowest rate. */
 	if (rate_control_send_low(sta, mvm_sta, txrc))
 		return;
 
+	if (!mvm_sta)
+		return;
+
+	lq_sta = mvm_sta;
 	iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
 				  info->band, &info->control.rates[0]);
 	info->control.rates[0].count = 1;
@@ -2906,13 +2899,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 	}
 }
 
-static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
-			  gfp_t gfp)
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+			      gfp_t gfp)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
 	struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
-	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
 	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
 
@@ -2926,7 +2919,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
 	lq_sta->pers.last_rssi = S8_MIN;
 
-	return &mvmsta->lq_sta;
+	return lq_sta;
 }
 
 static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@ -3043,7 +3036,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
 {
 	spin_lock_bh(&mvm->drv_stats_lock);
 	memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
@@ -3111,15 +3104,15 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
 /*
  * Called after adding a new station to initialize rate scaling
  */
-void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum nl80211_band band, bool init)
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			     enum nl80211_band band, bool init)
 {
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 	struct ieee80211_supported_band *sband;
 	unsigned long supp; /* must be unsigned long for for_each_set_bit */
 
@@ -3194,16 +3187,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	rs_initialize_lq(mvm, sta, lq_sta, band, init);
 }
 
-static void rs_rate_update(void *mvm_r,
-			   struct ieee80211_supported_band *sband,
-			   struct cfg80211_chan_def *chandef,
-			   struct ieee80211_sta *sta, void *priv_sta,
-			   u32 changed)
+static void rs_drv_rate_update(void *mvm_r,
+			       struct ieee80211_supported_band *sband,
+			       struct cfg80211_chan_def *chandef,
+			       struct ieee80211_sta *sta,
+			       void *priv_sta, u32 changed)
 {
+	struct iwl_op_mode *op_mode = mvm_r;
+	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
 	u8 tid;
-	struct iwl_op_mode *op_mode  =
-			(struct iwl_op_mode *)mvm_r;
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
 	if (!iwl_mvm_sta_from_mac80211(sta)->vif)
 		return;
@@ -3385,7 +3377,7 @@ static void rs_bfer_active_iter(void *_data,
 {
 	struct rs_bfer_active_iter_data *data = _data;
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+	struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
 	u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
 
 	if (sta == data->exclude_sta)
@@ -3497,7 +3489,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
 
 	/* Disallow BFER on another STA if active and we're a higher priority */
 	if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
-		struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+		struct iwl_lq_cmd *bfersta_lq_cmd =
+			&bfer_mvmsta->lq_sta.rs_drv.lq;
 		u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
 
 		bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
@@ -3569,14 +3562,14 @@ static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
 	return hw->priv;
 }
+
 /* rate scale requires free function to be implemented */
 static void rs_free(void *mvm_rate)
 {
 	return;
 }
 
-static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
-			void *mvm_sta)
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
 {
 	struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
@@ -3586,7 +3579,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
 {
 
 	char *type, *bw;
@@ -3597,10 +3590,10 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
 	    !(rate & RATE_MCS_VHT_MSK)) {
 		int index = iwl_hwrate_to_plcp_idx(rate);
 
-		return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
-			       rs_pretty_ant(ant),
-			       index == IWL_RATE_INVALID ? "BAD" :
-			       iwl_rate_mcs[index].mbps);
+		return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+				 rs_pretty_ant(ant),
+				 index == IWL_RATE_INVALID ? "BAD" :
+				 iwl_rate_mcs[index].mbps);
 	}
 
 	if (rate & RATE_MCS_VHT_MSK) {
@@ -3634,12 +3627,13 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
 		bw = "BAD BW";
 	}
 
-	return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
-		       type, rs_pretty_ant(ant), bw, mcs, nss,
-		       (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
-		       (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
-		       (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
-		       (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+	return scnprintf(buf, bufsz,
+			 "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+			 type, rs_pretty_ant(ant), bw, mcs, nss,
+			 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+			 (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+			 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+			 (rate & RATE_MCS_BF_MSK) ? "BF " : "");
 }
 
 /**
@@ -3696,65 +3690,70 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 	int desc = 0;
 	int i = 0;
 	ssize_t ret;
+	static const size_t bufsz = 2048;
 
 	struct iwl_lq_sta *lq_sta = file->private_data;
 	struct iwl_mvm_sta *mvmsta =
-		container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+		container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
 	struct iwl_mvm *mvm;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct rs_rate *rate = &tbl->rate;
 	u32 ss_params;
 
 	mvm = lq_sta->pers.drv;
-	buff = kmalloc(2048, GFP_KERNEL);
+	buff = kmalloc(bufsz, GFP_KERNEL);
 	if (!buff)
 		return -ENOMEM;
 
-	desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
-	desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
-			lq_sta->total_failed, lq_sta->total_success,
-			lq_sta->active_legacy_rate);
-	desc += sprintf(buff+desc, "fixed rate 0x%X\n",
-			lq_sta->pers.dbg_fixed_rate);
-	desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "sta_id %d\n", lq_sta->lq.sta_id);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "failed=%d success=%d rate=0%lX\n",
+			  lq_sta->total_failed, lq_sta->total_success,
+			  lq_sta->active_legacy_rate);
+	desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+			  lq_sta->pers.dbg_fixed_rate);
+	desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
 	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
 	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
 	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
-	desc += sprintf(buff+desc, "lq type %s\n",
-			(is_legacy(rate)) ? "legacy" :
-			is_vht(rate) ? "VHT" : "HT");
+	desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+			  (is_legacy(rate)) ? "legacy" :
+			  is_vht(rate) ? "VHT" : "HT");
 	if (!is_legacy(rate)) {
-		desc += sprintf(buff + desc, " %s",
+		desc += scnprintf(buff + desc, bufsz - desc, " %s",
 		   (is_siso(rate)) ? "SISO" : "MIMO2");
-		desc += sprintf(buff + desc, " %s",
+		desc += scnprintf(buff + desc, bufsz - desc, " %s",
 				(is_ht20(rate)) ? "20MHz" :
 				(is_ht40(rate)) ? "40MHz" :
 				(is_ht80(rate)) ? "80MHz" :
 				(is_ht160(rate)) ? "160MHz" : "BAD BW");
-		desc += sprintf(buff + desc, " %s %s %s %s\n",
+		desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
 				(rate->sgi) ? "SGI" : "NGI",
 				(rate->ldpc) ? "LDPC" : "BCC",
 				(lq_sta->is_agg) ? "AGG on" : "",
 				(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
 	}
-	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+	desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
 			lq_sta->last_rate_n_flags);
-	desc += sprintf(buff+desc,
+	desc += scnprintf(buff + desc, bufsz - desc,
 			"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
 			lq_sta->lq.flags,
 			lq_sta->lq.mimo_delim,
 			lq_sta->lq.single_stream_ant_msk,
 			lq_sta->lq.dual_stream_ant_msk);
 
-	desc += sprintf(buff+desc,
+	desc += scnprintf(buff + desc, bufsz - desc,
 			"agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
 			le16_to_cpu(lq_sta->lq.agg_time_limit),
 			lq_sta->lq.agg_disable_start_th,
 			lq_sta->lq.agg_frame_cnt_limit);
 
-	desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+	desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+			  lq_sta->lq.reduced_tpc);
 	ss_params = le32_to_cpu(lq_sta->lq.ss_params);
-	desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+	desc += scnprintf(buff + desc, bufsz - desc,
+			"single stream params: %s%s%s%s\n",
 			(ss_params & LQ_SS_PARAMS_VALID) ?
 			"VALID" : "INVALID",
 			(ss_params & LQ_SS_BFER_ALLOWED) ?
@@ -3763,7 +3762,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 			", STBC" : "",
 			(ss_params & LQ_SS_FORCE) ?
 			", FORCE" : "");
-	desc += sprintf(buff+desc,
+	desc += scnprintf(buff + desc, bufsz - desc,
 			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
 			lq_sta->lq.initial_rate_index[0],
 			lq_sta->lq.initial_rate_index[1],
@@ -3773,8 +3772,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
 		u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
 
-		desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
-		desc += rs_pretty_print_rate(buff+desc, r);
+		desc += scnprintf(buff + desc, bufsz - desc,
+				  " rate[%d] 0x%X ", i, r);
+		desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -3987,12 +3987,13 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
 
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
 
-static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+				   struct dentry *dir)
 {
 	struct iwl_lq_sta *lq_sta = priv_sta;
 	struct iwl_mvm_sta *mvmsta;
 
-	mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+	mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
 
 	if (!mvmsta->vif)
 		return;
@@ -4014,7 +4015,7 @@ err:
 	IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
 }
 
-static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
 {
 }
 #endif
@@ -4024,50 +4025,53 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
  * the station is added. Since mac80211 calls this function before a
  * station is added we ignore it.
  */
-static void rs_rate_init_stub(void *mvm_r,
-			      struct ieee80211_supported_band *sband,
-			      struct cfg80211_chan_def *chandef,
-			      struct ieee80211_sta *sta, void *mvm_sta)
+static void rs_rate_init_ops(void *mvm_r,
+			     struct ieee80211_supported_band *sband,
+			     struct cfg80211_chan_def *chandef,
+			     struct ieee80211_sta *sta, void *mvm_sta)
 {
 }
 
-static const struct rate_control_ops rs_mvm_ops = {
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
 	.name = RS_NAME,
-	.tx_status = rs_mac80211_tx_status,
-	.get_rate = rs_get_rate,
-	.rate_init = rs_rate_init_stub,
+	.tx_status = rs_drv_mac80211_tx_status,
+	.get_rate = rs_drv_get_rate,
+	.rate_init = rs_rate_init_ops,
 	.alloc = rs_alloc,
 	.free = rs_free,
-	.alloc_sta = rs_alloc_sta,
+	.alloc_sta = rs_drv_alloc_sta,
 	.free_sta = rs_free_sta,
-	.rate_update = rs_rate_update,
+	.rate_update = rs_drv_rate_update,
 #ifdef CONFIG_MAC80211_DEBUGFS
-	.add_sta_debugfs = rs_add_debugfs,
-	.remove_sta_debugfs = rs_remove_debugfs,
+	.add_sta_debugfs = rs_drv_add_sta_debugfs,
+	.remove_sta_debugfs = rs_remove_sta_debugfs,
 #endif
 };
 
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  enum nl80211_band band, bool init)
+{
+	if (iwl_mvm_has_tlc_offload(mvm))
+		rs_fw_rate_init(mvm, sta, band);
+	else
+		rs_drv_rate_init(mvm, sta, band, init);
+}
+
 int iwl_mvm_rate_control_register(void)
 {
-	return ieee80211_rate_control_register(&rs_mvm_ops);
+	return ieee80211_rate_control_register(&rs_mvm_ops_drv);
 }
 
 void iwl_mvm_rate_control_unregister(void)
 {
-	ieee80211_rate_control_unregister(&rs_mvm_ops);
+	ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
 }
 
-/**
- * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this request and previous requests,
- * and send the LQ command.
- * @mvmsta: The station
- * @enable: Enable Tx protection?
- */
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
-			  bool enable)
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+				bool enable)
 {
-	struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+	struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -4083,3 +4087,17 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 
 	return iwl_mvm_send_lq_cmd(mvm, lq, false);
 }
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			  bool enable)
+{
+	if (iwl_mvm_has_tlc_offload(mvm))
+		return rs_fw_tx_protection(mvm, mvmsta, enable);
+	else
+		return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 32b4d66debea..fb18cb8c233d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -36,6 +36,8 @@
 #include "fw-api.h"
 #include "iwl-trans.h"
 
+#define RS_NAME "iwl-mvm-rs"
+
 struct iwl_rs_rate_info {
 	u8 plcp;	  /* uCode API:  IWL_RATE_6M_PLCP, etc. */
 	u8 plcp_ht_siso;  /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
@@ -218,6 +220,38 @@ struct iwl_rate_mcs_info {
 };
 
 /**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+
+	/* persistent fields - initialized only once - keep last! */
+	struct lq_sta_pers_rs_fw {
+		u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+		u32 dbg_fixed_rate;
+		u16 dbg_agg_frame_count_lim;
+#endif
+		u8 chains;
+		s8 chain_signal[IEEE80211_MAX_CHAINS];
+		s8 last_rssi;
+		struct iwl_mvm *drv;
+	} pers;
+};
+
+/**
  * struct iwl_rate_scale_data -- tx success history for one rate
  */
 struct iwl_rate_scale_data {
@@ -407,4 +441,18 @@ struct iwl_mvm_sta;
 int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 			  bool enable);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		     enum nl80211_band band);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
 #endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index d1a40688d5e1..d26833c5ce1f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -222,7 +222,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
 
 	case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
 		/* Don't drop the frame and decrypt it in SW */
-		if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+		if (!fw_has_api(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+		    !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
 			return 0;
 		*crypt_len = IEEE80211_TKIP_IV_LEN;
 		/* fall through if TTAK OK */
@@ -383,7 +385,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
 								 false);
 		}
 
-		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+		rs_update_last_rssi(mvm, mvmsta, rx_status);
 
 		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
 		    ieee80211_is_beacon(hdr->frame_control)) {
@@ -439,7 +441,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
 		rx_status->bw = RATE_INFO_BW_160;
 		break;
 	}
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
+	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+	    rate_n_flags & RATE_MCS_SGI_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 3b8d44361380..a3f7c1bf3cc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -261,7 +261,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 		return 0;
 	case IWL_RX_MPDU_STATUS_SEC_TKIP:
 		/* Don't drop the frame and decrypt it in SW */
-		if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
+		if (!fw_has_api(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+		    !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
 			return 0;
 
 		*crypt_len = IEEE80211_TKIP_IV_LEN;
@@ -943,7 +945,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 								 false);
 		}
 
-		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+		rs_update_last_rssi(mvm, mvmsta, rx_status);
 
 		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
 		    ieee80211_is_beacon(hdr->frame_control)) {
@@ -1020,7 +1022,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		rx_status->bw = RATE_INFO_BW_160;
 		break;
 	}
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
+
+	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+	    rate_n_flags & RATE_MCS_SGI_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index e4fd476e9ccb..356b16f40e78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -664,6 +664,22 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
 	return newpos;
 }
 
+#define WFA_TPC_IE_LEN	9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+	pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+	pos[1] = WFA_TPC_IE_LEN - 2;
+	pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+	pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+	pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+	pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+	pos[6] = 0;
+	/* pos[7] - tx power will be inserted by the FW */
+	pos[7] = 0;
+	pos[8] = 0;
+}
+
 static void
 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			 struct ieee80211_scan_ies *ies,
@@ -716,7 +732,16 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 	memcpy(pos, ies->common_ies, ies->common_ie_len);
 	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
-	params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    !fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+		iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+							   WFA_TPC_IE_LEN);
+	} else {
+		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+	}
 }
 
 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
@@ -781,7 +806,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
-	if (iwl_mvm_rrm_scan_needed(mvm))
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
 		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 
 	if (params->pass_all)
@@ -1183,7 +1210,9 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
 			flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
 	}
 
-	if (iwl_mvm_rrm_scan_needed(mvm))
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
 
 	if (params->pass_all)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 1add5615fc3a..6b2674e02606 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1439,6 +1439,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 			goto err;
 	}
 
+	/*
+	 * if rs is registered with mac80211, then "add station" will be handled
+	 * via the corresponding ops, otherwise need to notify rate scaling here
+	 */
+	if (iwl_mvm_has_tlc_offload(mvm))
+		iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
 update_fw:
 	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
 	if (ret)
@@ -1762,7 +1769,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 	}
 
 	/*
-	 * For a000 firmware and on we cannot add queue to a station unknown
+	 * For 22000 firmware and on we cannot add queue to a station unknown
 	 * to firmware so enable queue here - after the station was added
 	 */
 	if (iwl_mvm_has_new_tx_api(mvm))
@@ -1885,7 +1892,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 		return ret;
 
 	/*
-	 * For a000 firmware and on we cannot add queue to a station unknown
+	 * For 22000 firmware and on we cannot add queue to a station unknown
 	 * to firmware so enable queue here - after the station was added
 	 */
 	if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -2064,7 +2071,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 	/*
 	 * Enable cab queue after the ADD_STA command is sent.
-	 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
 	 * command with unknown station id, and for FW that doesn't support
 	 * station API since the cab queue is not included in the
 	 * tfd_queue_mask.
@@ -2530,7 +2537,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			    tid_data->next_reclaimed);
 
 	/*
-	 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
 	 * to align the wrap around of ssn so we compare relevant values.
 	 */
 	normalized_ssn = tid_data->ssn;
@@ -2575,6 +2582,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 		.aggregate = true,
 	};
 
+	/*
+	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+	 * manager, so this function should never be called in this case.
+	 */
+	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
+		return -EINVAL;
+
 	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
 		     != IWL_MAX_TID_COUNT);
 
@@ -2672,12 +2686,12 @@ out:
 	 */
 	mvmsta->max_agg_bufsize =
 		min(mvmsta->max_agg_bufsize, buf_size);
-	mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
 	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
 		     sta->addr, tid);
 
-	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
+	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
 }
 
 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
@@ -3615,7 +3629,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
 	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
 	/*
-	 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
 	 * to align the wrap around of ssn so we compare relevant values.
 	 */
 	if (mvm->trans->cfg->gen2)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index aedabe101cf0..5ffd6adbc383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data {
  * and from Tx response flow, it needs a spinlock.
  * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
  * @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ *	the driver - %rs_drv or in the FW - %rs_fw.
  * @reserved_queue: the queue reserved for this STA for DQA purposes
  *	Every STA has is given one reserved queue to allow it to operate. If no
  *	such queue can be guaranteed, the STA addition will fail.
@@ -417,7 +419,10 @@ struct iwl_mvm_sta {
 	spinlock_t lock;
 	struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
 	u8 tid_to_baid[IWL_MAX_TID_COUNT];
-	struct iwl_lq_sta lq_sta;
+	union {
+		struct iwl_lq_sta_rs_fw rs_fw;
+		struct iwl_lq_sta rs_drv;
+	} lq_sta;
 	struct ieee80211_vif *vif;
 	struct iwl_mvm_key_pn __rcu *ptk_pn[4];
 	struct iwl_mvm_rxq_dup_data *dup_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e25cda9fbf6c..200ab50ec86b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -101,7 +101,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 void iwl_mvm_roc_done_wk(struct work_struct *wk)
 {
 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-	u32 queues = 0;
 
 	/*
 	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
@@ -110,14 +109,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
 	 * in the case that the time event actually completed in the firmware
 	 * (which is handled in iwl_mvm_te_handle_notif).
 	 */
-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
-		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
-	}
-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
-		queues |= BIT(mvm->aux_queue);
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
-	}
 
 	synchronize_net();
 
@@ -777,12 +772,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 		return -EBUSY;
 	}
 
-	/*
-	 * Flush the done work, just in case it's still pending, so that
-	 * the work it does can complete and we can accept new frames.
-	 */
-	flush_work(&mvm->roc_done_wk);
-
 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
 	time_cmd.id_and_color =
 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 333bcb75b8af..dda77b327c98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -888,10 +888,9 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
 	/*
 	 * The first deferred frame should've stopped the MAC queues, so we
 	 * should never get a second deferred frame for the RA/TID.
+	 * In case of GSO the first packet may have been split, so don't warn.
 	 */
-	if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
-		  "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
-		  skb_queue_len(deferred_tx_frames))) {
+	if (skb_queue_len(deferred_tx_frames) == 1) {
 		iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
 		schedule_work(&mvm->add_stream_wk);
 	}
@@ -1132,7 +1131,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
 	}
 
 	/*
-	 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
 	 * to align the wrap around of ssn so we compare relevant values.
 	 */
 	normalized_ssn = tid_data->ssn;
@@ -1624,7 +1623,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 	int freed;
 
 	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
-		      tid >= IWL_MAX_TID_COUNT,
+		      tid > IWL_MAX_TID_COUNT,
 		      "sta_id %d tid %d", sta_id, tid))
 		return;
 
@@ -1679,7 +1678,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 		if (ieee80211_is_data_qos(hdr->frame_control))
 			freed++;
 		else
-			WARN_ON_ONCE(1);
+			WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
 
 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
 
@@ -1719,8 +1718,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 		ba_info->band = chanctx_conf->def.chan->band;
 		iwl_mvm_hwrate_to_tx_status(rate, ba_info);
 
-		IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
-		iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+		if (!iwl_mvm_has_tlc_offload(mvm)) {
+			IWL_DEBUG_TX_REPLY(mvm,
+					   "No reclaim. Update rs directly\n");
+			iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+		}
 	}
 
 out:
@@ -1771,8 +1773,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 			struct iwl_mvm_compressed_ba_tfd *ba_tfd =
 				&ba_res->tfd[i];
 
+			tid = ba_tfd->tid;
+			if (tid == IWL_MGMT_TID)
+				tid = IWL_MAX_TID_COUNT;
+
 			mvmsta->tid_data[i].lq_color = lq_color;
-			iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
+			iwl_mvm_tx_reclaim(mvm, sta_id, tid,
 					   (int)(le16_to_cpu(ba_tfd->q_num)),
 					   le16_to_cpu(ba_tfd->tfd_index),
 					   &ba_info,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 03ffd84786ca..d65e1db7c097 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
 	u8 ind = last_idx;
 	int i;
 
-	for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
-		ind = (ind + 1) % RATE_MCS_ANT_NUM;
+	for (i = 0; i < MAX_RS_ANT_NUM; i++) {
+		ind = (ind + 1) % MAX_RS_ANT_NUM;
 		if (valid & BIT(ind))
 			return ind;
 	}
@@ -516,8 +516,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
 		IWL_ERR(trans, "HW error, resetting before reading\n");
 
 		/* reset the device */
-		iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-		usleep_range(5000, 6000);
+		iwl_trans_sw_reset(trans);
 
 		/* set INIT_DONE flag */
 		iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -595,6 +594,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
 
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
+	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+		IWL_ERR(mvm,
+			"DEVICE_ENABLED bit is not set. Aborting dump.\n");
+		return;
+	}
+
 	iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
 
 	if (mvm->error_event_table[1])
@@ -906,7 +911,8 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
 		.data = { lq, },
 	};
 
-	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
+	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+		    iwl_mvm_has_tlc_offload(mvm)))
 		return -EINVAL;
 
 	return iwl_mvm_send_cmd(mvm, &cmd);
@@ -1025,12 +1031,34 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	int res;
+	bool low_latency;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	if (iwl_mvm_vif_low_latency(mvmvif) == prev)
+	low_latency = iwl_mvm_vif_low_latency(mvmvif);
+
+	if (low_latency == prev)
 		return 0;
 
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+		struct iwl_mac_low_latency_cmd cmd = {
+			.mac_id = cpu_to_le32(mvmvif->id)
+		};
+
+		if (low_latency) {
+			/* currently we don't care about the direction */
+			cmd.low_latency_rx = 1;
+			cmd.low_latency_tx = 1;
+		}
+		res = iwl_mvm_send_cmd_pdu(mvm,
+					   iwl_cmd_id(LOW_LATENCY_CMD,
+						      MAC_CONF_GROUP, 0),
+					   0, sizeof(cmd), &cmd);
+		if (res)
+			IWL_ERR(mvm, "Failed to send low latency command\n");
+	}
+
 	res = iwl_mvm_update_quotas(mvm, false, NULL);
 	if (res)
 		return res;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ccd7c33c4c28..56fc28750a41 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -652,20 +652,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 	{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
 
-/* a000 Series */
-	{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
-	{IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
+/* 22000 Series */
+	{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+	{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
 
 #endif /* CONFIG_IWLMVM */
 
@@ -707,7 +707,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		iwl_trans->cfg = cfg_7265d;
 	}
 
-	if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb &&
+	if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
 	    iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
 		u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
 		u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
@@ -715,14 +715,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 		if (rf_id_chp == jf_chp_id) {
 			if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
-				cfg = &iwla000_2ax_cfg_qnj_jf_b0;
+				cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
 			else
-				cfg = &iwla000_2ac_cfg_jf;
+				cfg = &iwl22000_2ac_cfg_jf;
 		} else if (rf_id_chp == hr_chp_id) {
 			if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
-				cfg = &iwla000_2ax_cfg_qnj_hr_a0;
+				cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
 			else
-				cfg = &iwla000_2ac_cfg_hr;
+				cfg = &iwl22000_2ac_cfg_hr;
 		}
 		iwl_trans->cfg = cfg;
 	}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 403e65c309d0..ca3b64ff4dad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -658,13 +658,6 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
 	}
 }
 
-static inline void iwl_pcie_sw_reset(struct iwl_trans *trans)
-{
-	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
-	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-	usleep_range(5000, 6000);
-}
-
 static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
 {
 	return index & (q->n_window - 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ac05fd1e74c4..cb4012541f45 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -137,7 +137,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
 	/* Stop device's DMA activity */
 	iwl_pcie_apm_stop_master(trans);
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_sw_reset(trans);
 
 	/*
 	 * Clear "initialization complete" bit to move adapter from
@@ -192,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
 	/* Stop the device, and put it in low power state */
 	iwl_pcie_gen2_apm_stop(trans, false);
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_sw_reset(trans);
 
 	/*
 	 * Upon stop, the IVAR table gets erased, so msi-x won't
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 4541c86881d6..b406b536c850 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -176,6 +176,13 @@ out:
 	kfree(buf);
 }
 
+static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
+{
+	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+	usleep_range(5000, 6000);
+}
+
 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -446,7 +453,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
 				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_pcie_sw_reset(trans);
 
 	/*
 	 * Set "initialization complete" bit to move adapter from
@@ -487,7 +494,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
 				 apmg_xtal_cfg_reg |
 				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_pcie_sw_reset(trans);
 
 	/* Enable LP XTAL by indirect access through CSR */
 	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@@ -580,7 +587,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
 		return;
 	}
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_pcie_sw_reset(trans);
 
 	/*
 	 * Clear "initialization complete" bit to move adapter from
@@ -915,14 +922,9 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 void iwl_pcie_apply_destination(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
 	int i;
 
-	if (dest->version)
-		IWL_ERR(trans,
-			"DBG DEST version is %d - expect issues\n",
-			dest->version);
-
 	IWL_INFO(trans, "Applying debug destination %s\n",
 		 get_fw_dbg_mode_string(dest->monitor_mode));
 
@@ -1270,7 +1272,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 	/* Stop the device, and put it in low power state */
 	iwl_pcie_apm_stop(trans, false);
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_pcie_sw_reset(trans);
 
 	/*
 	 * Upon stop, the IVAR table gets erased, so msi-x won't
@@ -1744,7 +1746,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 		return err;
 	}
 
-	iwl_pcie_sw_reset(trans);
+	iwl_trans_pcie_sw_reset(trans);
 
 	err = iwl_pcie_apm_init(trans);
 	if (err)
@@ -2816,8 +2818,17 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
 			 * Update pointers to reflect actual values after
 			 * shifting
 			 */
-			base = iwl_read_prph(trans, base) <<
-			       trans->dbg_dest_tlv->base_shift;
+			if (trans->dbg_dest_tlv->version) {
+				base = (iwl_read_prph(trans, base) &
+					IWL_LDBG_M2S_BUF_BA_MSK) <<
+				       trans->dbg_dest_tlv->base_shift;
+				base *= IWL_M2S_UNIT_SIZE;
+				base += trans->cfg->smem_offset;
+			} else {
+				base = iwl_read_prph(trans, base) <<
+				       trans->dbg_dest_tlv->base_shift;
+			}
+
 			iwl_trans_read_mem(trans, base, fw_mon_data->data,
 					   monitor_len / sizeof(u32));
 		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
@@ -2865,21 +2876,36 @@ static struct iwl_trans_dump_data
 		       trans_pcie->fw_mon_size;
 		monitor_len = trans_pcie->fw_mon_size;
 	} else if (trans->dbg_dest_tlv) {
-		u32 base, end;
+		u32 base, end, cfg_reg;
 
-		base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-		end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+		if (trans->dbg_dest_tlv->version == 1) {
+			cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+			cfg_reg = iwl_read_prph(trans, cfg_reg);
+			base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
+				trans->dbg_dest_tlv->base_shift;
+			base *= IWL_M2S_UNIT_SIZE;
+			base += trans->cfg->smem_offset;
 
-		base = iwl_read_prph(trans, base) <<
-		       trans->dbg_dest_tlv->base_shift;
-		end = iwl_read_prph(trans, end) <<
-		      trans->dbg_dest_tlv->end_shift;
+			monitor_len =
+				(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
+				trans->dbg_dest_tlv->end_shift;
+			monitor_len *= IWL_M2S_UNIT_SIZE;
+		} else {
+			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+			end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
 
-		/* Make "end" point to the actual end */
-		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 ||
-		    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
-			end += (1 << trans->dbg_dest_tlv->end_shift);
-		monitor_len = end - base;
+			base = iwl_read_prph(trans, base) <<
+			       trans->dbg_dest_tlv->base_shift;
+			end = iwl_read_prph(trans, end) <<
+			      trans->dbg_dest_tlv->end_shift;
+
+			/* Make "end" point to the actual end */
+			if (trans->cfg->device_family >=
+			    IWL_DEVICE_FAMILY_8000 ||
+			    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
+				end += (1 << trans->dbg_dest_tlv->end_shift);
+			monitor_len = end - base;
+		}
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
 		       monitor_len;
 	} else {
@@ -3025,6 +3051,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 	.write_mem = iwl_trans_pcie_write_mem,				\
 	.configure = iwl_trans_pcie_configure,				\
 	.set_pmi = iwl_trans_pcie_set_pmi,				\
+	.sw_reset = iwl_trans_pcie_sw_reset,				\
 	.grab_nic_access = iwl_trans_pcie_grab_nic_access,		\
 	.release_nic_access = iwl_trans_pcie_release_nic_access,	\
 	.set_bits_mask = iwl_trans_pcie_set_bits_mask,			\
@@ -3250,9 +3277,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
 		hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
 		if (hw_status & UMAG_GEN_HW_IS_FPGA)
-			trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0;
+			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
 		else
-			trans->cfg = &iwla000_2ac_cfg_hr;
+			trans->cfg = &iwl22000_2ac_cfg_hr;
 	}
 #endif
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f6d4a50f1bdb..1cf22e62e3dd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,7 @@
 #include <net/genetlink.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <linux/rhashtable.h>
 #include "mac80211_hwsim.h"
 
 #define WARN_QUEUE 100
@@ -490,6 +491,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
 static spinlock_t hwsim_radio_lock;
 static LIST_HEAD(hwsim_radios);
 static struct workqueue_struct *hwsim_wq;
+static struct rhashtable hwsim_radios_rht;
 static int hwsim_radio_idx;
 
 static struct platform_driver mac80211_hwsim_driver = {
@@ -500,6 +502,7 @@ static struct platform_driver mac80211_hwsim_driver = {
 
 struct mac80211_hwsim_data {
 	struct list_head list;
+	struct rhash_head rht;
 	struct ieee80211_hw *hw;
 	struct device *dev;
 	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
@@ -574,6 +577,13 @@ struct mac80211_hwsim_data {
 	u64 tx_failed;
 };
 
+static const struct rhashtable_params hwsim_rht_params = {
+	.nelem_hint = 2,
+	.automatic_shrinking = true,
+	.key_len = ETH_ALEN,
+	.key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]),
+	.head_offset = offsetof(struct mac80211_hwsim_data, rht),
+};
 
 struct hwsim_radiotap_hdr {
 	struct ieee80211_radiotap_header hdr;
@@ -729,16 +739,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
 	    val != PS_MANUAL_POLL)
 		return -EINVAL;
 
-	old_ps = data->ps;
-	data->ps = val;
-
-	local_bh_disable();
 	if (val == PS_MANUAL_POLL) {
+		if (data->ps != PS_ENABLED)
+			return -EINVAL;
+		local_bh_disable();
 		ieee80211_iterate_active_interfaces_atomic(
 			data->hw, IEEE80211_IFACE_ITER_NORMAL,
 			hwsim_send_ps_poll, data);
-		data->ps_poll_pending = true;
-	} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+		local_bh_enable();
+		return 0;
+	}
+	old_ps = data->ps;
+	data->ps = val;
+
+	local_bh_disable();
+	if (old_ps == PS_DISABLED && val != PS_DISABLED) {
 		ieee80211_iterate_active_interfaces_atomic(
 			data->hw, IEEE80211_IFACE_ITER_NORMAL,
 			hwsim_send_nullfunc_ps, data);
@@ -1004,6 +1019,36 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
 	return res;
 }
 
+static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
+{
+	u16 result = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
+		result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS;
+	if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+		result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT;
+	if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+		result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE;
+	if (rate->flags & IEEE80211_TX_RC_MCS)
+		result |= MAC80211_HWSIM_TX_RC_MCS;
+	if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+		result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD;
+	if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+		result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH;
+	if (rate->flags & IEEE80211_TX_RC_DUP_DATA)
+		result |= MAC80211_HWSIM_TX_RC_DUP_DATA;
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		result |= MAC80211_HWSIM_TX_RC_SHORT_GI;
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS)
+		result |= MAC80211_HWSIM_TX_RC_VHT_MCS;
+	if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+		result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH;
+	if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+		result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH;
+
+	return result;
+}
+
 static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
 				       struct sk_buff *my_skb,
 				       int dst_portid)
@@ -1016,6 +1061,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
 	unsigned int hwsim_flags = 0;
 	int i;
 	struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
+	struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES];
 	uintptr_t cookie;
 
 	if (data->ps != PS_DISABLED)
@@ -1067,7 +1113,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
 
 	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
 		tx_attempts[i].idx = info->status.rates[i].idx;
+		tx_attempts_flags[i].idx = info->status.rates[i].idx;
 		tx_attempts[i].count = info->status.rates[i].count;
+		tx_attempts_flags[i].flags =
+				trans_tx_rate_flags_ieee2hwsim(
+						&info->status.rates[i]);
 	}
 
 	if (nla_put(skb, HWSIM_ATTR_TX_INFO,
@@ -1075,6 +1125,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
 		    tx_attempts))
 		goto nla_put_failure;
 
+	if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS,
+		    sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES,
+		    tx_attempts_flags))
+		goto nla_put_failure;
+
 	/* We create a cookie to identify this skb */
 	data->pending_cookie++;
 	cookie = data->pending_cookie;
@@ -2727,6 +2782,15 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
 			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 
 	spin_lock_bh(&hwsim_radio_lock);
+	err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
+				     hwsim_rht_params);
+	if (err < 0) {
+		pr_debug("mac80211_hwsim: radio index %d already present\n",
+			 idx);
+		spin_unlock_bh(&hwsim_radio_lock);
+		goto failed_final_insert;
+	}
+
 	list_add_tail(&data->list, &hwsim_radios);
 	spin_unlock_bh(&hwsim_radio_lock);
 
@@ -2735,6 +2799,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
 
 	return idx;
 
+failed_final_insert:
+	debugfs_remove_recursive(data->debugfs);
+	ieee80211_unregister_hw(data->hw);
 failed_hw:
 	device_release_driver(data->dev);
 failed_bind:
@@ -2870,22 +2937,9 @@ static void hwsim_mon_setup(struct net_device *dev)
 
 static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
 {
-	struct mac80211_hwsim_data *data;
-	bool _found = false;
-
-	spin_lock_bh(&hwsim_radio_lock);
-	list_for_each_entry(data, &hwsim_radios, list) {
-		if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
-			_found = true;
-			break;
-		}
-	}
-	spin_unlock_bh(&hwsim_radio_lock);
-
-	if (!_found)
-		return NULL;
-
-	return data;
+	return rhashtable_lookup_fast(&hwsim_radios_rht,
+				      addr,
+				      hwsim_rht_params);
 }
 
 static void hwsim_register_wmediumd(struct net *net, u32 portid)
@@ -2970,7 +3024,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
 	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
 		txi->status.rates[i].idx = tx_attempts[i].idx;
 		txi->status.rates[i].count = tx_attempts[i].count;
-		/*txi->status.rates[i].flags = 0;*/
 	}
 
 	txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
@@ -3150,8 +3203,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
 	if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
 		u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
 
-		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
+			kfree(hwname);
 			return -EINVAL;
+		}
 		param.regd = hwsim_world_regdom_custom[idx];
 	}
 
@@ -3192,6 +3247,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
 			continue;
 
 		list_del(&data->list);
+		rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+				       hwsim_rht_params);
 		spin_unlock_bh(&hwsim_radio_lock);
 		mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
 					 info);
@@ -3347,6 +3404,8 @@ static void remove_user_radios(u32 portid)
 	list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
 		if (entry->destroy_on_close && entry->portid == portid) {
 			list_del(&entry->list);
+			rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
+					       hwsim_rht_params);
 			INIT_WORK(&entry->destroy_work, destroy_radio);
 			queue_work(hwsim_wq, &entry->destroy_work);
 		}
@@ -3422,6 +3481,8 @@ static void __net_exit hwsim_exit_net(struct net *net)
 			continue;
 
 		list_del(&data->list);
+		rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+				       hwsim_rht_params);
 		INIT_WORK(&data->destroy_work, destroy_radio);
 		queue_work(hwsim_wq, &data->destroy_work);
 	}
@@ -3458,6 +3519,7 @@ static int __init init_mac80211_hwsim(void)
 	hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
 	if (!hwsim_wq)
 		return -ENOMEM;
+	rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
 
 	err = register_pernet_device(&hwsim_net_ops);
 	if (err)
@@ -3599,6 +3661,7 @@ static void __exit exit_mac80211_hwsim(void)
 	mac80211_hwsim_free();
 	flush_workqueue(hwsim_wq);
 
+	rhashtable_destroy(&hwsim_radios_rht);
 	unregister_netdev(hwsim_mon);
 	platform_driver_unregister(&mac80211_hwsim_driver);
 	unregister_pernet_device(&hwsim_net_ops);
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 3f5eda591dba..a96a79c1eff5 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -64,7 +64,8 @@ enum hwsim_tx_control_flags {
  * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
  *	kernel, uses:
  *	%HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
- *	%HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ *	%HWSIM_ATTR_TX_INFO, %WSIM_ATTR_TX_INFO_FLAGS,
+ *	%HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
  * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
  *	returns the radio ID (>= 0) or negative on errors, if successful
  *	then multicast the result
@@ -123,6 +124,8 @@ enum {
  * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666
  * @HWSIM_ATTR_NO_VIF:  Do not create vif (wlanX) when creating radio.
  * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received.
+ * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding
+ *	rates of %HWSIM_ATTR_TX_INFO
  * @__HWSIM_ATTR_MAX: enum limit
  */
 
@@ -149,6 +152,7 @@ enum {
 	HWSIM_ATTR_NO_VIF,
 	HWSIM_ATTR_FREQ,
 	HWSIM_ATTR_PAD,
+	HWSIM_ATTR_TX_INFO_FLAGS,
 	__HWSIM_ATTR_MAX,
 };
 #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
@@ -171,4 +175,66 @@ struct hwsim_tx_rate {
 	u8 count;
 } __packed;
 
+/**
+ * enum hwsim_tx_rate_flags - per-rate flags set by the rate control algorithm.
+ *	Inspired by structure mac80211_rate_control_flags. New flags may be
+ *	appended, but old flags not deleted, to keep compatibility for
+ *	userspace.
+ *
+ * These flags are set by the Rate control algorithm for each rate during tx,
+ * in the @flags member of struct ieee80211_tx_rate.
+ *
+ * @MAC80211_HWSIM_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate.
+ * @MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required.
+ *	This is set if the current BSS requires ERP protection.
+ * @MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
+ * @MAC80211_HWSIM_TX_RC_MCS: HT rate.
+ * @MAC80211_HWSIM_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is
+ *	split into a higher 4 bits (Nss) and lower 4 bits (MCS number)
+ * @MAC80211_HWSIM_TX_RC_GREEN_FIELD: Indicates whether this rate should be used
+ *	in Greenfield mode.
+ * @MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be
+ *	40 MHz.
+ * @MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission
+ * @MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission
+ *	(80+80 isn't supported yet)
+ * @MAC80211_HWSIM_TX_RC_DUP_DATA: The frame should be transmitted on both of
+ *	the adjacent 20 MHz channels, if the current channel type is
+ *	NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
+ * @MAC80211_HWSIM_TX_RC_SHORT_GI: Short Guard interval should be used for this
+ *	rate.
+ */
+enum hwsim_tx_rate_flags {
+	MAC80211_HWSIM_TX_RC_USE_RTS_CTS		= BIT(0),
+	MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT		= BIT(1),
+	MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE	= BIT(2),
+
+	/* rate index is an HT/VHT MCS instead of an index */
+	MAC80211_HWSIM_TX_RC_MCS			= BIT(3),
+	MAC80211_HWSIM_TX_RC_GREEN_FIELD		= BIT(4),
+	MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH		= BIT(5),
+	MAC80211_HWSIM_TX_RC_DUP_DATA		= BIT(6),
+	MAC80211_HWSIM_TX_RC_SHORT_GI		= BIT(7),
+	MAC80211_HWSIM_TX_RC_VHT_MCS			= BIT(8),
+	MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH		= BIT(9),
+	MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH		= BIT(10),
+};
+
+/**
+ * struct hwsim_tx_rate - rate selection/status
+ *
+ * @idx: rate index to attempt to send with
+ * @count: number of tries in this rate before going to the next rate
+ *
+ * A value of -1 for @idx indicates an invalid rate and, if used
+ * in an array of retry rates, that no more rates should be tried.
+ *
+ * When used for transmit status reporting, the driver should
+ * always report the rate and number of retries used.
+ *
+ */
+struct hwsim_tx_rate_flag {
+	s8 idx;
+	u16 flags;
+} __packed;
 #endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 6e0d9a9c5cfb..ce4432c535f0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
 
+	if (priv->scan_request) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "change virtual interface: scan in process\n");
+		return -EBUSY;
+	}
+
 	switch (curr_iftype) {
 	case NL80211_IFTYPE_ADHOC:
 		switch (type) {
@@ -1180,7 +1186,6 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
 	case NL80211_IFTYPE_AP:
 		switch (type) {
 		case NL80211_IFTYPE_ADHOC:
-		case NL80211_IFTYPE_STATION:
 			return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
 							       type, params);
 			break;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index dcc529e9c0ef..874660052055 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -290,13 +290,16 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
 	adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
 			get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
 
+	/* Setup the timer after transmit command, except that specific
+	 * command might not have command response.
+	 */
+	if (cmd_code != HostCmd_CMD_FW_DUMP_EVENT)
+		mod_timer(&adapter->cmd_timer,
+			  jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+
 	/* Clear BSS_NO_BITS from HostCmd */
 	cmd_code &= HostCmd_CMD_ID_MASK;
 
-	/* Setup the timer after transmit command */
-	mod_timer(&adapter->cmd_timer,
-		  jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 6f4239be609d..db2872daae97 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -168,10 +168,15 @@ mwifiex_device_dump_read(struct file *file, char __user *ubuf,
 {
 	struct mwifiex_private *priv = file->private_data;
 
-	if (!priv->adapter->if_ops.device_dump)
-		return -EIO;
-
-	priv->adapter->if_ops.device_dump(priv->adapter);
+	/* For command timeouts, USB firmware will automatically emit
+	 * firmware dump events, so we don't implement device_dump().
+	 * For user-initiated dumps, we trigger it ourselves.
+	 */
+	if (priv->adapter->iface_type == MWIFIEX_USB)
+		mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT,
+				 HostCmd_ACT_GEN_SET, 0, NULL, true);
+	else
+		priv->adapter->if_ops.device_dump(priv->adapter);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 13cd58e963b3..9c2cdef54074 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -56,6 +56,15 @@ struct mwifiex_fw_data {
 	u8 data[1];
 } __packed;
 
+struct mwifiex_fw_dump_header {
+	__le16          seq_num;
+	__le16          reserved;
+	__le16          type;
+	__le16          len;
+} __packed;
+
+#define FW_DUMP_INFO_ENDED 0x0002
+
 #define MWIFIEX_FW_DNLD_CMD_1 0x1
 #define MWIFIEX_FW_DNLD_CMD_5 0x5
 #define MWIFIEX_FW_DNLD_CMD_6 0x6
@@ -400,6 +409,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_TDLS_CONFIG                       0x0100
 #define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
+#define HostCmd_CMD_FW_DUMP_EVENT		      0x0125
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 #define HostCmd_CMD_CHAN_REGION_CFG		      0x0242
 #define HostCmd_CMD_PACKET_AGGR_CTRL		      0x0251
@@ -570,6 +580,7 @@ enum mwifiex_channel_flags {
 #define EVENT_BG_SCAN_STOPPED           0x00000065
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_MULTI_CHAN_INFO           0x0000006a
+#define EVENT_FW_DUMP_INFO		0x00000073
 #define EVENT_TX_STATUS_REPORT		0x00000074
 #define EVENT_BT_COEX_WLAN_PARA_CHANGE	0X00000076
 
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index e1aa86042469..d239e9248c05 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -64,6 +64,13 @@ static void wakeup_timer_fn(struct timer_list *t)
 		adapter->if_ops.card_reset(adapter);
 }
 
+static void fw_dump_timer_fn(struct timer_list *t)
+{
+	struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+
+	mwifiex_upload_device_dump(adapter);
+}
+
 /*
  * This function initializes the private structure and sets default
  * values to the members.
@@ -314,6 +321,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 	adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
 	adapter->active_scan_triggered = false;
 	timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
+	adapter->devdump_len = 0;
+	timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
 }
 
 /*
@@ -396,6 +405,7 @@ static void
 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 {
 	del_timer(&adapter->wakeup_timer);
+	del_timer_sync(&adapter->devdump_timer);
 	mwifiex_cancel_all_pending_cmd(adapter);
 	wake_up_interruptible(&adapter->cmd_wait_q.wait);
 	wake_up_interruptible(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a96bd7e653bf..12e739950332 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1051,9 +1051,30 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
 }
 EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
 
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
 {
-	void *p;
+	/* Dump all the memory data into single file, a userspace script will
+	 * be used to split all the memory data to multiple files
+	 */
+	mwifiex_dbg(adapter, MSG,
+		    "== mwifiex dump information to /sys/class/devcoredump start\n");
+	dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len,
+		      GFP_KERNEL);
+	mwifiex_dbg(adapter, MSG,
+		    "== mwifiex dump information to /sys/class/devcoredump end\n");
+
+	/* Device dump data will be freed in device coredump release function
+	 * after 5 min. Here reset adapter->devdump_data and ->devdump_len
+	 * to avoid it been accidentally reused.
+	 */
+	adapter->devdump_data = NULL;
+	adapter->devdump_len = 0;
+}
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
+{
+	char *p;
 	char drv_version[64];
 	struct usb_card_rec *cardp;
 	struct sdio_mmc_card *sdio_card;
@@ -1061,17 +1082,12 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
 	int i, idx;
 	struct netdev_queue *txq;
 	struct mwifiex_debug_info *debug_info;
-	void *drv_info_dump;
 
 	mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
 
-	/* memory allocate here should be free in mwifiex_upload_device_dump*/
-	drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
-
-	if (!drv_info_dump)
-		return 0;
-
-	p = (char *)(drv_info_dump);
+	p = adapter->devdump_data;
+	strcpy(p, "========Start dump driverinfo========\n");
+	p += strlen("========Start dump driverinfo========\n");
 	p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
 
 	mwifiex_drv_get_driver_version(adapter, drv_version,
@@ -1155,21 +1171,18 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
 		kfree(debug_info);
 	}
 
+	strcpy(p, "\n========End dump========\n");
+	p += strlen("\n========End dump========\n");
 	mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
-	*drv_info = drv_info_dump;
-	return p - drv_info_dump;
+	adapter->devdump_len = p - (char *)adapter->devdump_data;
 }
 EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
 
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
-				int drv_info_size)
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter)
 {
-	u8 idx, *dump_data, *fw_dump_ptr;
-	u32 dump_len;
-
-	dump_len = (strlen("========Start dump driverinfo========\n") +
-		       drv_info_size +
-		       strlen("\n========End dump========\n"));
+	u8 idx;
+	char *fw_dump_ptr;
+	u32 dump_len = 0;
 
 	for (idx = 0; idx < adapter->num_mem_types; idx++) {
 		struct memory_type_mapping *entry =
@@ -1184,24 +1197,24 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
 		}
 	}
 
-	dump_data = vzalloc(dump_len + 1);
-	if (!dump_data)
-		goto done;
-
-	fw_dump_ptr = dump_data;
+	if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) {
+		/* Realloc in case buffer overflow */
+		fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len);
+		mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n");
+		if (!fw_dump_ptr) {
+			vfree(adapter->devdump_data);
+			mwifiex_dbg(adapter, ERROR,
+				    "vzalloc devdump data failure!\n");
+			return;
+		}
 
-	/* Dump all the memory data into single file, a userspace script will
-	 * be used to split all the memory data to multiple files
-	 */
-	mwifiex_dbg(adapter, MSG,
-		    "== mwifiex dump information to /sys/class/devcoredump start");
+		memmove(fw_dump_ptr, adapter->devdump_data,
+			adapter->devdump_len);
+		vfree(adapter->devdump_data);
+		adapter->devdump_data = fw_dump_ptr;
+	}
 
-	strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
-	fw_dump_ptr += strlen("========Start dump driverinfo========\n");
-	memcpy(fw_dump_ptr, drv_info, drv_info_size);
-	fw_dump_ptr += drv_info_size;
-	strcpy(fw_dump_ptr, "\n========End dump========\n");
-	fw_dump_ptr += strlen("\n========End dump========\n");
+	fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len;
 
 	for (idx = 0; idx < adapter->num_mem_types; idx++) {
 		struct memory_type_mapping *entry =
@@ -1225,14 +1238,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
 		}
 	}
 
-	/* device dump data will be free in device coredump release function
-	 * after 5 min
-	 */
-	dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
-	mwifiex_dbg(adapter, MSG,
-		    "== mwifiex dump information to /sys/class/devcoredump end");
+	adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data;
 
-done:
 	for (idx = 0; idx < adapter->num_mem_types; idx++) {
 		struct memory_type_mapping *entry =
 			&adapter->mem_type_mapping_tbl[idx];
@@ -1241,10 +1248,8 @@ done:
 		entry->mem_ptr = NULL;
 		entry->mem_size = 0;
 	}
-
-	vfree(drv_info);
 }
-EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info);
 
 /*
  * CFG802.11 network device handler for statistics retrieval.
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 154c0796c0c5..6b5539b1f4d8 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -94,6 +94,8 @@ enum {
 
 #define MAX_EVENT_SIZE                  2048
 
+#define MWIFIEX_FW_DUMP_SIZE       (2 * 1024 * 1024)
+
 #define ARP_FILTER_MAX_BUF_SIZE         68
 
 #define MWIFIEX_KEY_BUFFER_SIZE			16
@@ -1032,6 +1034,10 @@ struct mwifiex_adapter {
 	bool wake_by_wifi;
 	/* Aggregation parameters*/
 	struct bus_aggr_params bus_aggr;
+	/* Device dump data/length */
+	void *devdump_data;
+	int devdump_len;
+	struct timer_list devdump_timer;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1656,9 +1662,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
 u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
 			    u8 rx_rate, u8 ht_info);
 
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
-				int drv_info_size);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
@@ -1677,6 +1683,7 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
 void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
 int mwifiex_set_mac_address(struct mwifiex_private *priv,
 			    struct net_device *dev);
+void mwifiex_devdump_tmo_func(unsigned long function_context);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index cd314946452c..97a6199692ab 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2769,19 +2769,27 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 
 static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
 {
-	int drv_info_size;
-	void *drv_info;
+	adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+	if (!adapter->devdump_data) {
+		mwifiex_dbg(adapter, ERROR,
+			    "vzalloc devdump data failure!\n");
+		return;
+	}
 
-	drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+	mwifiex_drv_info_dump(adapter);
 	mwifiex_pcie_fw_dump(adapter);
-	mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+	mwifiex_prepare_fw_dump_info(adapter);
+	mwifiex_upload_device_dump(adapter);
 }
 
 static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
 {
 	struct pcie_service_card *card = adapter->card;
 
-	pci_reset_function(card->dev);
+	/* We can't afford to wait here; remove() might be waiting on us. If we
+	 * can't grab the device lock, maybe we'll get another chance later.
+	 */
+	pci_try_reset_function(card->dev);
 }
 
 static void mwifiex_pcie_work(struct work_struct *work)
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index fd5183c10c4e..a82880132af4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2505,15 +2505,21 @@ done:
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
 	struct sdio_mmc_card *card = adapter->card;
-	int drv_info_size;
-	void *drv_info;
 
-	drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+	adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+	if (!adapter->devdump_data) {
+		mwifiex_dbg(adapter, ERROR,
+			    "vzalloc devdump data failure!\n");
+		return;
+	}
+
+	mwifiex_drv_info_dump(adapter);
 	if (card->fw_dump_enh)
 		mwifiex_sdio_generic_fw_dump(adapter);
 	else
 		mwifiex_sdio_fw_dump(adapter);
-	mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+	mwifiex_prepare_fw_dump_info(adapter);
+	mwifiex_upload_device_dump(adapter);
 }
 
 static void mwifiex_sdio_work(struct work_struct *work)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index fb090144a6d8..211e47d8b318 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -2206,6 +2206,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
 	case HostCmd_CMD_CHAN_REGION_CFG:
 		ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
 		break;
+	case HostCmd_CMD_FW_DUMP_EVENT:
+		cmd_ptr->command = cpu_to_le16(cmd_no);
+		cmd_ptr->size = cpu_to_le16(S_DS_GEN);
+		break;
 	default:
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index d8db412b76c6..93dfb76cd8a6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -584,6 +584,62 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
 		adapter->coex_rx_win_size);
 }
 
+static void
+mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
+			   struct sk_buff *event_skb)
+{
+	struct mwifiex_adapter *adapter = priv->adapter;
+	struct mwifiex_fw_dump_header *fw_dump_hdr =
+				(void *)adapter->event_body;
+
+	if (adapter->iface_type != MWIFIEX_USB) {
+		mwifiex_dbg(adapter, MSG,
+			    "event is not on usb interface, ignore it\n");
+		return;
+	}
+
+	if (!adapter->devdump_data) {
+		/* When receive the first event, allocate device dump
+		 * buffer, dump driver info.
+		 */
+		adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+		if (!adapter->devdump_data) {
+			mwifiex_dbg(adapter, ERROR,
+				    "vzalloc devdump data failure!\n");
+			return;
+		}
+
+		mwifiex_drv_info_dump(adapter);
+
+		/* If no proceeded event arrive in 10s, upload device
+		 * dump data, this will be useful if the end of
+		 * transmission event get lost, in this cornel case,
+		 * user would still get partial of the dump.
+		 */
+		mod_timer(&adapter->devdump_timer,
+			  jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+	}
+
+	/* Overflow check */
+	if (adapter->devdump_len + event_skb->len >= MWIFIEX_FW_DUMP_SIZE)
+		goto upload_dump;
+
+	memmove(adapter->devdump_data + adapter->devdump_len,
+		adapter->event_skb->data, event_skb->len);
+	adapter->devdump_len += event_skb->len;
+
+	if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+		mwifiex_dbg(adapter, MSG,
+			    "receive end of transmission flag event!\n");
+		goto upload_dump;
+	}
+	return;
+
+upload_dump:
+	del_timer_sync(&adapter->devdump_timer);
+	mwifiex_upload_device_dump(adapter);
+}
+
 /*
  * This function handles events generated by firmware.
  *
@@ -636,6 +692,7 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
  *      - EVENT_DELBA
  *      - EVENT_BA_STREAM_TIEMOUT
  *      - EVENT_AMSDU_AGGR_CTRL
+ *      - EVENT_FW_DUMP_INFO
  */
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
@@ -1007,6 +1064,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 					    adapter->event_skb->len -
 					    sizeof(eventcause));
 		break;
+	case EVENT_FW_DUMP_INFO:
+		mwifiex_dbg(adapter, EVENT, "event: firmware debug info\n");
+		mwifiex_fw_dump_info_event(priv, adapter->event_skb);
+		break;
 	/* Debugging event; not used, but let's not print an ERROR for it. */
 	case EVENT_UNKNOWN_DEBUG:
 		mwifiex_dbg(adapter, EVENT, "event: debug\n");
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index e813b2ca740c..8e4e9b6919e0 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -199,7 +199,7 @@ struct mwl8k_priv {
 	struct ieee80211_channel channels_24[14];
 	struct ieee80211_rate rates_24[13];
 	struct ieee80211_supported_band band_50;
-	struct ieee80211_channel channels_50[4];
+	struct ieee80211_channel channels_50[9];
 	struct ieee80211_rate rates_50[8];
 	u32 ap_macids_supported;
 	u32 sta_macids_supported;
@@ -383,6 +383,11 @@ static const struct ieee80211_channel mwl8k_channels_50[] = {
 	{ .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
 	{ .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
 	{ .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
index 28843fed750a..92ce4062f307 100644
--- a/drivers/net/wireless/mediatek/Kconfig
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -11,4 +11,5 @@ config WLAN_VENDOR_MEDIATEK
 
 if WLAN_VENDOR_MEDIATEK
 source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
 endif # WLAN_VENDOR_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
index 9d5f182fd7fd..00f945f59b38 100644
--- a/drivers/net/wireless/mediatek/Makefile
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_MT7601U)	+= mt7601u/
+obj-$(CONFIG_MT76_CORE)	+= mt76/
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644
index 000000000000..fc05d79c80d0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -0,0 +1,10 @@
+config MT76_CORE
+	tristate
+
+config MT76x2E
+	tristate "MediaTek MT76x2E (PCIe) support"
+	select MT76_CORE
+	depends on MAC80211
+	depends on PCI
+	---help---
+	  This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644
index 000000000000..a0156bc01dea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+
+mt76-y := \
+	mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+
+CFLAGS_trace.o := -I$(src)
+
+mt76x2e-y := \
+	mt76x2_pci.o mt76x2_dma.o \
+	mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
+	mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+	mt76x2_dfs.o mt76x2_trace.o
+
+CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
new file mode 100644
index 000000000000..8027bb7c03c2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+#define REORDER_TIMEOUT (HZ / 10)
+
+static void
+mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
+{
+	struct sk_buff *skb;
+
+	tid->head = ieee80211_sn_inc(tid->head);
+
+	skb = tid->reorder_buf[idx];
+	if (!skb)
+		return;
+
+	tid->reorder_buf[idx] = NULL;
+	tid->nframes--;
+	__skb_queue_tail(frames, skb);
+}
+
+static void
+mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
+			 u16 head)
+{
+	int idx;
+
+	while (ieee80211_sn_less(tid->head, head)) {
+		idx = tid->head % tid->size;
+		mt76_aggr_release(tid, frames, idx);
+	}
+}
+
+static void
+mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+	int idx = tid->head % tid->size;
+
+	while (tid->reorder_buf[idx]) {
+		mt76_aggr_release(tid, frames, idx);
+		idx = tid->head % tid->size;
+	}
+}
+
+static void
+mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+	struct mt76_rx_status *status;
+	struct sk_buff *skb;
+	int start, idx, nframes;
+
+	if (!tid->nframes)
+		return;
+
+	mt76_rx_aggr_release_head(tid, frames);
+
+	start = tid->head % tid->size;
+	nframes = tid->nframes;
+
+	for (idx = (tid->head + 1) % tid->size;
+	     idx != start && nframes;
+	     idx = (idx + 1) % tid->size) {
+
+		skb = tid->reorder_buf[idx];
+		if (!skb)
+			continue;
+
+		nframes--;
+		status = (struct mt76_rx_status *) skb->cb;
+		if (!time_after(jiffies, status->reorder_time +
+					 REORDER_TIMEOUT))
+			continue;
+
+		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
+	}
+
+	mt76_rx_aggr_release_head(tid, frames);
+}
+
+static void
+mt76_rx_aggr_reorder_work(struct work_struct *work)
+{
+	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
+					       reorder_work.work);
+	struct mt76_dev *dev = tid->dev;
+	struct sk_buff_head frames;
+
+	__skb_queue_head_init(&frames);
+
+	local_bh_disable();
+
+	spin_lock(&tid->lock);
+	mt76_rx_aggr_check_release(tid, &frames);
+	spin_unlock(&tid->lock);
+
+	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+	mt76_rx_complete(dev, &frames, -1);
+
+	local_bh_enable();
+}
+
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+	struct mt76_wcid *wcid = status->wcid;
+	struct ieee80211_sta *sta;
+	struct mt76_rx_tid *tid;
+	bool sn_less;
+	u16 seqno, head, size;
+	u8 idx;
+
+	__skb_queue_tail(frames, skb);
+
+	sta = wcid_to_sta(wcid);
+	if (!sta || !status->aggr)
+		return;
+
+	tid = rcu_dereference(wcid->aggr[status->tid]);
+	if (!tid)
+		return;
+
+	spin_lock_bh(&tid->lock);
+
+	if (tid->stopped)
+		goto out;
+
+	head = tid->head;
+	seqno = status->seqno;
+	size = tid->size;
+	sn_less = ieee80211_sn_less(seqno, head);
+
+	if (!tid->started) {
+		if (sn_less)
+			goto out;
+
+		tid->started = true;
+	}
+
+	if (sn_less) {
+		__skb_unlink(skb, frames);
+		dev_kfree_skb(skb);
+		goto out;
+	}
+
+	if (seqno == head) {
+		tid->head = ieee80211_sn_inc(head);
+		if (tid->nframes)
+			mt76_rx_aggr_release_head(tid, frames);
+		goto out;
+	}
+
+	__skb_unlink(skb, frames);
+
+	/*
+	 * Frame sequence number exceeds buffering window, free up some space
+	 * by releasing previous frames
+	 */
+	if (!ieee80211_sn_less(seqno, head + size)) {
+		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
+		mt76_rx_aggr_release_frames(tid, frames, head);
+	}
+
+	idx = seqno % size;
+
+	/* Discard if the current slot is already in use */
+	if (tid->reorder_buf[idx]) {
+		dev_kfree_skb(skb);
+		goto out;
+	}
+
+	status->reorder_time = jiffies;
+	tid->reorder_buf[idx] = skb;
+	tid->nframes++;
+	mt76_rx_aggr_release_head(tid, frames);
+
+	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+
+out:
+	spin_unlock_bh(&tid->lock);
+}
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
+		       u16 ssn, u8 size)
+{
+	struct mt76_rx_tid *tid;
+
+	mt76_rx_aggr_stop(dev, wcid, tidno);
+
+	tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
+		      GFP_KERNEL);
+	if (!tid)
+		return -ENOMEM;
+
+	tid->dev = dev;
+	tid->head = ssn;
+	tid->size = size;
+	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
+	spin_lock_init(&tid->lock);
+
+	rcu_assign_pointer(wcid->aggr[tidno], tid);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
+
+static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+{
+	u8 size = tid->size;
+	int i;
+
+	spin_lock_bh(&tid->lock);
+
+	tid->stopped = true;
+	for (i = 0; tid->nframes && i < size; i++) {
+		struct sk_buff *skb = tid->reorder_buf[i];
+
+		if (!skb)
+			continue;
+
+		tid->nframes--;
+		dev_kfree_skb(skb);
+	}
+
+	spin_unlock_bh(&tid->lock);
+
+	cancel_delayed_work_sync(&tid->reorder_work);
+}
+
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
+{
+	struct mt76_rx_tid *tid;
+
+	rcu_read_lock();
+
+	tid = rcu_dereference(wcid->aggr[tidno]);
+	if (tid) {
+		rcu_assign_pointer(wcid->aggr[tidno], NULL);
+		mt76_rx_aggr_shutdown(dev, tid);
+		kfree_rcu(tid, rcu_head);
+	}
+
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644
index 000000000000..c121b502a462
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+	struct mt76_dev *dev = data;
+
+	dev->bus->wr(dev, dev->debugfs_reg, val);
+	return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+	struct mt76_dev *dev = data;
+
+	*val = dev->bus->rr(dev, dev->debugfs_reg);
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
+			 "0x%08llx\n");
+
+static int
+mt76_queues_read(struct seq_file *s, void *data)
+{
+	struct mt76_dev *dev = dev_get_drvdata(s->private);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+		struct mt76_queue *q = &dev->q_tx[i];
+
+		if (!q->ndesc)
+			continue;
+
+		seq_printf(s,
+			   "%d:	queued=%d head=%d tail=%d swq_queued=%d\n",
+			   i, q->queued, q->head, q->tail, q->swq_queued);
+	}
+
+	return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+	if (!dir)
+		return NULL;
+
+	debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin);
+	debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+	debugfs_create_file_unsafe("regval", S_IRUSR | S_IWUSR, dir, dev,
+				   &fops_regval);
+	debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom);
+	if (dev->otp.data)
+		debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp);
+	debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+
+	return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644
index 000000000000..3518703524e7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+#define DMA_DUMMY_TXWI	((void *) ~0)
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	int size;
+	int i;
+
+	spin_lock_init(&q->lock);
+	INIT_LIST_HEAD(&q->swq);
+
+	size = q->ndesc * sizeof(struct mt76_desc);
+	q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+	if (!q->desc)
+		return -ENOMEM;
+
+	size = q->ndesc * sizeof(*q->entry);
+	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+	if (!q->entry)
+		return -ENOMEM;
+
+	/* clear descriptors */
+	for (i = 0; i < q->ndesc; i++)
+		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+	iowrite32(q->desc_dma, &q->regs->desc_base);
+	iowrite32(0, &q->regs->cpu_idx);
+	iowrite32(0, &q->regs->dma_idx);
+	iowrite32(q->ndesc, &q->regs->ring_size);
+
+	return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+		 struct mt76_queue_buf *buf, int nbufs, u32 info,
+		 struct sk_buff *skb, void *txwi)
+{
+	struct mt76_desc *desc;
+	u32 ctrl;
+	int i, idx = -1;
+
+	if (txwi)
+		q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+
+	for (i = 0; i < nbufs; i += 2, buf += 2) {
+		u32 buf0 = buf[0].addr, buf1 = 0;
+
+		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+		if (i < nbufs - 1) {
+			buf1 = buf[1].addr;
+			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+		}
+
+		if (i == nbufs - 1)
+			ctrl |= MT_DMA_CTL_LAST_SEC0;
+		else if (i == nbufs - 2)
+			ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+		idx = q->head;
+		q->head = (q->head + 1) % q->ndesc;
+
+		desc = &q->desc[idx];
+
+		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+		WRITE_ONCE(desc->info, cpu_to_le32(info));
+		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+		q->queued++;
+	}
+
+	q->entry[idx].txwi = txwi;
+	q->entry[idx].skb = skb;
+
+	return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+			struct mt76_queue_entry *prev_e)
+{
+	struct mt76_queue_entry *e = &q->entry[idx];
+	__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
+	u32 ctrl = le32_to_cpu(__ctrl);
+
+	if (!e->txwi || !e->skb) {
+		__le32 addr = READ_ONCE(q->desc[idx].buf0);
+		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+
+		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+				 DMA_TO_DEVICE);
+	}
+
+	if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
+		__le32 addr = READ_ONCE(q->desc[idx].buf1);
+		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
+
+		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+				 DMA_TO_DEVICE);
+	}
+
+	if (e->txwi == DMA_DUMMY_TXWI)
+		e->txwi = NULL;
+
+	*prev_e = *e;
+	memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	q->head = ioread32(&q->regs->dma_idx);
+	q->tail = q->head;
+	iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+	struct mt76_queue *q = &dev->q_tx[qid];
+	struct mt76_queue_entry entry;
+	bool wake = false;
+	int last;
+
+	if (!q->ndesc)
+		return;
+
+	spin_lock_bh(&q->lock);
+	if (flush)
+		last = -1;
+	else
+		last = ioread32(&q->regs->dma_idx);
+
+	while (q->queued && q->tail != last) {
+		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+		if (entry.schedule)
+			q->swq_queued--;
+
+		if (entry.skb)
+			dev->drv->tx_complete_skb(dev, q, &entry, flush);
+
+		if (entry.txwi) {
+			mt76_put_txwi(dev, entry.txwi);
+			wake = true;
+		}
+
+		q->tail = (q->tail + 1) % q->ndesc;
+		q->queued--;
+
+		if (!flush && q->tail == last)
+			last = ioread32(&q->regs->dma_idx);
+	}
+
+	if (!flush)
+		mt76_txq_schedule(dev, q);
+	else
+		mt76_dma_sync_idx(dev, q);
+
+	wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+	spin_unlock_bh(&q->lock);
+
+	if (wake)
+		ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+		 int *len, u32 *info, bool *more)
+{
+	struct mt76_queue_entry *e = &q->entry[idx];
+	struct mt76_desc *desc = &q->desc[idx];
+	dma_addr_t buf_addr;
+	void *buf = e->buf;
+	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+	buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+	if (len) {
+		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+	}
+
+	if (info)
+		*info = le32_to_cpu(desc->info);
+
+	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+	e->buf = NULL;
+
+	return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+		 int *len, u32 *info, bool *more)
+{
+	int idx = q->tail;
+
+	*more = false;
+	if (!q->queued)
+		return NULL;
+
+	if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+		return NULL;
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->queued--;
+
+	return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+{
+	dma_addr_t addr;
+	void *buf;
+	int frames = 0;
+	int len = SKB_WITH_OVERHEAD(q->buf_size);
+	int offset = q->buf_offset;
+	int idx;
+	void *(*alloc)(unsigned int fragsz);
+
+	if (napi)
+		alloc = napi_alloc_frag;
+	else
+		alloc = netdev_alloc_frag;
+
+	spin_lock_bh(&q->lock);
+
+	while (q->queued < q->ndesc - 1) {
+		struct mt76_queue_buf qbuf;
+
+		buf = alloc(q->buf_size);
+		if (!buf)
+			break;
+
+		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev->dev, addr)) {
+			skb_free_frag(buf);
+			break;
+		}
+
+		qbuf.addr = addr + offset;
+		qbuf.len = len - offset;
+		idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+		frames++;
+	}
+
+	if (frames)
+		mt76_dma_kick_queue(dev, q);
+
+	spin_unlock_bh(&q->lock);
+
+	return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	void *buf;
+	bool more;
+
+	spin_lock_bh(&q->lock);
+	do {
+		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+		if (!buf)
+			break;
+
+		skb_free_frag(buf);
+	} while (1);
+	spin_unlock_bh(&q->lock);
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+	struct mt76_queue *q = &dev->q_rx[qid];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+	mt76_dma_rx_cleanup(dev, q);
+	mt76_dma_sync_idx(dev, q);
+	mt76_dma_rx_fill(dev, q, false);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+		  int len, bool more)
+{
+	struct page *page = virt_to_head_page(data);
+	int offset = data - page_address(page);
+	struct sk_buff *skb = q->rx_head;
+
+	offset += q->buf_offset;
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
+			q->buf_size);
+
+	if (more)
+		return;
+
+	q->rx_head = NULL;
+	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+	struct sk_buff *skb;
+	unsigned char *data;
+	int len;
+	int done = 0;
+	bool more;
+
+	while (done < budget) {
+		u32 info;
+
+		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+		if (!data)
+			break;
+
+		if (q->rx_head) {
+			mt76_add_fragment(dev, q, data, len, more);
+			continue;
+		}
+
+		skb = build_skb(data, q->buf_size);
+		if (!skb) {
+			skb_free_frag(data);
+			continue;
+		}
+
+		skb_reserve(skb, q->buf_offset);
+		if (skb->tail + len > skb->end) {
+			dev_kfree_skb(skb);
+			continue;
+		}
+
+		if (q == &dev->q_rx[MT_RXQ_MCU]) {
+			u32 *rxfce = (u32 *) skb->cb;
+			*rxfce = info;
+		}
+
+		__skb_put(skb, len);
+		done++;
+
+		if (more) {
+			q->rx_head = skb;
+			continue;
+		}
+
+		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+	}
+
+	mt76_dma_rx_fill(dev, q, true);
+	return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct mt76_dev *dev;
+	int qid, done = 0, cur;
+
+	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+	qid = napi - dev->napi;
+
+	rcu_read_lock();
+
+	do {
+		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
+		mt76_rx_poll_complete(dev, qid);
+		done += cur;
+	} while (cur && done < budget);
+
+	rcu_read_unlock();
+
+	if (done < budget) {
+		napi_complete(napi);
+		dev->drv->rx_poll_complete(dev, qid);
+	}
+
+	return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+	int i;
+
+	init_dummy_netdev(&dev->napi_dev);
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+		netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+			       64);
+		mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+		skb_queue_head_init(&dev->rx_skb[i]);
+		napi_enable(&dev->napi[i]);
+	}
+
+	return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+	.init = mt76_dma_init,
+	.alloc = mt76_dma_alloc_queue,
+	.add_buf = mt76_dma_add_buf,
+	.tx_cleanup = mt76_dma_tx_cleanup,
+	.rx_reset = mt76_dma_rx_reset,
+	.kick = mt76_dma_kick_queue,
+};
+
+int mt76_dma_attach(struct mt76_dev *dev)
+{
+	dev->queue_ops = &mt76_dma_ops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+		mt76_dma_tx_cleanup(dev, i, true);
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+		netif_napi_del(&dev->napi[i]);
+		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644
index 000000000000..1dad39697929
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define MT_RING_SIZE			0x10
+
+#define MT_DMA_CTL_SD_LEN1		GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1		BIT(14)
+#define MT_DMA_CTL_BURST		BIT(15)
+#define MT_DMA_CTL_SD_LEN0		GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0		BIT(30)
+#define MT_DMA_CTL_DMA_DONE		BIT(31)
+
+struct mt76_desc {
+	__le32 buf0;
+	__le32 ctrl;
+	__le32 buf1;
+	__le32 info;
+} __packed __aligned(4);
+
+int mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644
index 000000000000..530e5593765c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+	struct device_node *np = dev->dev->of_node;
+	struct mtd_info *mtd;
+	const __be32 *list;
+	const char *part;
+	phandle phandle;
+	int offset = 0;
+	int size;
+	size_t retlen;
+	int ret;
+
+	if (!np)
+		return -ENOENT;
+
+	list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+	if (!list)
+		return -ENOENT;
+
+	phandle = be32_to_cpup(list++);
+	if (!phandle)
+		return -ENOENT;
+
+	np = of_find_node_by_phandle(phandle);
+	if (!np)
+		return -EINVAL;
+
+	part = of_get_property(np, "label", NULL);
+	if (!part)
+		part = np->name;
+
+	mtd = get_mtd_device_nm(part);
+	if (IS_ERR(mtd))
+		return PTR_ERR(mtd);
+
+	if (size <= sizeof(*list))
+		return -EINVAL;
+
+	offset = be32_to_cpup(list);
+	ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+	put_mtd_device(mtd);
+	if (ret)
+		return ret;
+
+	if (retlen < len)
+		return -EINVAL;
+
+	return 0;
+#else
+	return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+	struct device_node *np = dev->dev->of_node;
+	const u8 *mac;
+
+	if (!np)
+		return;
+
+	mac = of_get_mac_address(np);
+	if (mac)
+		memcpy(dev->macaddr, mac, ETH_ALEN);
+#endif
+
+	if (!is_valid_ether_addr(dev->macaddr)) {
+		eth_random_addr(dev->macaddr);
+		dev_info(dev->dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->macaddr);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+	dev->eeprom.size = len;
+	dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+	if (!dev->eeprom.data)
+		return -ENOMEM;
+
+	return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644
index 000000000000..5fcb2deb89a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) {			\
+	.band = NL80211_BAND_2GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+#define CHAN5G(_idx, _freq) {			\
+	.band = NL80211_BAND_5GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+	CHAN2G(1, 2412),
+	CHAN2G(2, 2417),
+	CHAN2G(3, 2422),
+	CHAN2G(4, 2427),
+	CHAN2G(5, 2432),
+	CHAN2G(6, 2437),
+	CHAN2G(7, 2442),
+	CHAN2G(8, 2447),
+	CHAN2G(9, 2452),
+	CHAN2G(10, 2457),
+	CHAN2G(11, 2462),
+	CHAN2G(12, 2467),
+	CHAN2G(13, 2472),
+	CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+	CHAN5G(36, 5180),
+	CHAN5G(40, 5200),
+	CHAN5G(44, 5220),
+	CHAN5G(48, 5240),
+
+	CHAN5G(52, 5260),
+	CHAN5G(56, 5280),
+	CHAN5G(60, 5300),
+	CHAN5G(64, 5320),
+
+	CHAN5G(100, 5500),
+	CHAN5G(104, 5520),
+	CHAN5G(108, 5540),
+	CHAN5G(112, 5560),
+	CHAN5G(116, 5580),
+	CHAN5G(120, 5600),
+	CHAN5G(124, 5620),
+	CHAN5G(128, 5640),
+	CHAN5G(132, 5660),
+	CHAN5G(136, 5680),
+	CHAN5G(140, 5700),
+
+	CHAN5G(149, 5745),
+	CHAN5G(153, 5765),
+	CHAN5G(157, 5785),
+	CHAN5G(161, 5805),
+	CHAN5G(165, 5825),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+	{ .throughput =   0 * 1024, .blink_time = 334 },
+	{ .throughput =   1 * 1024, .blink_time = 260 },
+	{ .throughput =   5 * 1024, .blink_time = 220 },
+	{ .throughput =  10 * 1024, .blink_time = 190 },
+	{ .throughput =  20 * 1024, .blink_time = 170 },
+	{ .throughput =  50 * 1024, .blink_time = 150 },
+	{ .throughput =  70 * 1024, .blink_time = 130 },
+	{ .throughput = 100 * 1024, .blink_time = 110 },
+	{ .throughput = 200 * 1024, .blink_time =  80 },
+	{ .throughput = 300 * 1024, .blink_time =  50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+	struct device_node *np = dev->dev->of_node;
+	struct ieee80211_hw *hw = dev->hw;
+	int led_pin;
+
+	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+		return 0;
+
+	snprintf(dev->led_name, sizeof(dev->led_name),
+		 "mt76-%s", wiphy_name(hw->wiphy));
+
+	dev->led_cdev.name = dev->led_name;
+	dev->led_cdev.default_trigger =
+		ieee80211_create_tpt_led_trigger(hw,
+					IEEE80211_TPT_LEDTRIG_FL_RADIO,
+					mt76_tpt_blink,
+					ARRAY_SIZE(mt76_tpt_blink));
+
+	np = of_get_child_by_name(np, "led");
+	if (np) {
+		if (!of_property_read_u32(np, "led-sources", &led_pin))
+			dev->led_pin = led_pin;
+		dev->led_al = of_property_read_bool(np, "led-active-low");
+	}
+
+	return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+		const struct ieee80211_channel *chan, int n_chan,
+		struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+	struct ieee80211_supported_band *sband = &msband->sband;
+	struct ieee80211_sta_ht_cap *ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap;
+	void *chanlist;
+	u16 mcs_map;
+	int size;
+
+	size = n_chan * sizeof(*chan);
+	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+	if (!chanlist)
+		return -ENOMEM;
+
+	msband->chan = devm_kzalloc(dev->dev, n_chan * sizeof(*msband->chan),
+				    GFP_KERNEL);
+	if (!msband->chan)
+		return -ENOMEM;
+
+	sband->channels = chanlist;
+	sband->n_channels = n_chan;
+	sband->bitrates = rates;
+	sband->n_bitrates = n_rates;
+	dev->chandef.chan = &sband->channels[0];
+
+	ht_cap = &sband->ht_cap;
+	ht_cap->ht_supported = true;
+	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+		       IEEE80211_HT_CAP_GRN_FLD |
+		       IEEE80211_HT_CAP_SGI_20 |
+		       IEEE80211_HT_CAP_SGI_40 |
+		       IEEE80211_HT_CAP_TX_STBC |
+		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+	ht_cap->mcs.rx_mask[0] = 0xff;
+	ht_cap->mcs.rx_mask[1] = 0xff;
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+	if (!vht)
+		return 0;
+
+	vht_cap = &sband->vht_cap;
+	vht_cap->vht_supported = true;
+
+	mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) |
+		  (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2));
+
+	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+			IEEE80211_VHT_CAP_TXSTBC |
+			IEEE80211_VHT_CAP_RXSTBC_1 |
+			IEEE80211_VHT_CAP_SHORT_GI_80;
+
+	return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+		   int n_rates)
+{
+	dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+
+	return mt76_init_sband(dev, &dev->sband_2g,
+			       mt76_channels_2ghz,
+			       ARRAY_SIZE(mt76_channels_2ghz),
+			       rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+		   int n_rates, bool vht)
+{
+	dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+
+	return mt76_init_sband(dev, &dev->sband_5g,
+			       mt76_channels_5ghz,
+			       ARRAY_SIZE(mt76_channels_5ghz),
+			       rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_dev *dev, int band)
+{
+	struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+	bool found = false;
+	int i;
+
+	if (!sband)
+		return;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+			continue;
+
+		found = true;
+		break;
+	}
+
+	if (found)
+		return;
+
+	sband->n_channels = 0;
+	dev->hw->wiphy->bands[band] = NULL;
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+			 struct ieee80211_rate *rates, int n_rates)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	dev_set_drvdata(dev->dev, dev);
+
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->cc_lock);
+	INIT_LIST_HEAD(&dev->txwi_cache);
+
+	SET_IEEE80211_DEV(hw, dev->dev);
+	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+	wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+	hw->txq_data_size = sizeof(struct mt76_txq);
+	hw->max_tx_fragments = 16;
+
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+	ieee80211_hw_set(hw, TX_AMSDU);
+	ieee80211_hw_set(hw, TX_FRAG_LIST);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+
+	wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+	if (dev->cap.has_2ghz) {
+		ret = mt76_init_sband_2g(dev, rates, n_rates);
+		if (ret)
+			return ret;
+	}
+
+	if (dev->cap.has_5ghz) {
+		ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+		if (ret)
+			return ret;
+	}
+
+	wiphy_read_of_freq_limits(dev->hw->wiphy);
+	mt76_check_sband(dev, NL80211_BAND_2GHZ);
+	mt76_check_sband(dev, NL80211_BAND_5GHZ);
+
+	ret = mt76_led_init(dev);
+	if (ret)
+		return ret;
+
+	return ieee80211_register_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+
+	ieee80211_unregister_hw(hw);
+	mt76_tx_free(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+	if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	__skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+void mt76_set_channel(struct mt76_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+	struct mt76_channel_state *state;
+	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+	if (dev->drv->update_survey)
+		dev->drv->update_survey(dev);
+
+	dev->chandef = *chandef;
+
+	if (!offchannel)
+		dev->main_chan = chandef->chan;
+
+	if (chandef->chan != dev->main_chan) {
+		state = mt76_channel_state(dev, chandef->chan);
+		memset(state, 0, sizeof(*state));
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+		    struct survey_info *survey)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct mt76_sband *sband;
+	struct ieee80211_channel *chan;
+	struct mt76_channel_state *state;
+	int ret = 0;
+
+	if (idx == 0 && dev->drv->update_survey)
+		dev->drv->update_survey(dev);
+
+	sband = &dev->sband_2g;
+	if (idx >= sband->sband.n_channels) {
+		idx -= sband->sband.n_channels;
+		sband = &dev->sband_5g;
+	}
+
+	if (idx >= sband->sband.n_channels)
+		return -ENOENT;
+
+	chan = &sband->sband.channels[idx];
+	state = mt76_channel_state(dev, chan);
+
+	memset(survey, 0, sizeof(*survey));
+	survey->channel = chan;
+	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+	if (chan == dev->main_chan)
+		survey->filled |= SURVEY_INFO_IN_USE;
+
+	spin_lock_bh(&dev->cc_lock);
+	survey->time = div_u64(state->cc_active, 1000);
+	survey->time_busy = div_u64(state->cc_busy, 1000);
+	spin_unlock_bh(&dev->cc_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+			 struct ieee80211_key_conf *key)
+{
+	struct ieee80211_key_seq seq;
+	int i;
+
+	wcid->rx_check_pn = false;
+
+	if (!key)
+		return;
+
+	if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+		wcid->rx_check_pn = true;
+
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+		ieee80211_get_key_rx_seq(key, i, &seq);
+		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
+	}
+}
+EXPORT_SYMBOL(mt76_wcid_key_setup);
+
+static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+{
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct mt76_rx_status mstat;
+
+	mstat = *((struct mt76_rx_status *) skb->cb);
+	memset(status, 0, sizeof(*status));
+
+	status->flag = mstat.flag;
+	status->freq = mstat.freq;
+	status->enc_flags = mstat.enc_flags;
+	status->encoding = mstat.encoding;
+	status->bw = mstat.bw;
+	status->rate_idx = mstat.rate_idx;
+	status->nss = mstat.nss;
+	status->band = mstat.band;
+	status->signal = mstat.signal;
+	status->chains = mstat.chains;
+
+	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
+	BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal));
+	memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal));
+
+	return wcid_to_sta(mstat.wcid);
+}
+
+static int
+mt76_check_ccmp_pn(struct sk_buff *skb)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+	struct mt76_wcid *wcid = status->wcid;
+	struct ieee80211_hdr *hdr;
+	int ret;
+
+	if (!(status->flag & RX_FLAG_DECRYPTED))
+		return 0;
+
+	if (!wcid || !wcid->rx_check_pn)
+		return 0;
+
+	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+		/*
+		 * Validate the first fragment both here and in mac80211
+		 * All further fragments will be validated by mac80211 only.
+		 */
+		hdr = (struct ieee80211_hdr *) skb->data;
+		if (ieee80211_is_frag(hdr) &&
+		    !ieee80211_is_first_frag(hdr->frame_control))
+			return 0;
+	}
+
+	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
+	ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
+		     sizeof(status->iv));
+	if (ret <= 0)
+		return -EINVAL; /* replay */
+
+	memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
+
+	if (status->flag & RX_FLAG_IV_STRIPPED)
+		status->flag |= RX_FLAG_PN_VALIDATED;
+
+	return 0;
+}
+
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+		      int queue)
+{
+	struct napi_struct *napi = NULL;
+	struct ieee80211_sta *sta;
+	struct sk_buff *skb;
+
+	if (queue >= 0)
+	    napi = &dev->napi[queue];
+
+	while ((skb = __skb_dequeue(frames)) != NULL) {
+		if (mt76_check_ccmp_pn(skb)) {
+			dev_kfree_skb(skb);
+			continue;
+		}
+
+		sta = mt76_rx_convert(skb);
+		ieee80211_rx_napi(dev->hw, sta, skb, napi);
+	}
+}
+
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+{
+	struct sk_buff_head frames;
+	struct sk_buff *skb;
+
+	__skb_queue_head_init(&frames);
+
+	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
+		mt76_rx_aggr_reorder(skb, &frames);
+
+	mt76_rx_complete(dev, &frames, q);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644
index 000000000000..09a14dead6e3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+	u32 val;
+
+	val = ioread32(dev->regs + offset);
+	trace_reg_rr(dev, offset, val);
+
+	return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+	trace_reg_wr(dev, offset, val);
+	iowrite32(val, dev->regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	val |= mt76_mmio_rr(dev, offset) & ~mask;
+	mt76_mmio_wr(dev, offset, val);
+	return val;
+}
+
+static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
+			   int len)
+{
+	__iowrite32_copy(dev->regs + offset, data, len >> 2);
+}
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+	static const struct mt76_bus_ops mt76_mmio_ops = {
+		.rr = mt76_mmio_rr,
+		.rmw = mt76_mmio_rmw,
+		.wr = mt76_mmio_wr,
+		.copy = mt76_mmio_copy,
+	};
+
+	dev->bus = &mt76_mmio_ops;
+	dev->regs = regs;
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644
index 000000000000..129015c9d116
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+#include "util.h"
+
+#define MT_TX_RING_SIZE     256
+#define MT_MCU_RING_SIZE    32
+#define MT_RX_BUF_SIZE      2048
+
+struct mt76_dev;
+
+struct mt76_bus_ops {
+	u32 (*rr)(struct mt76_dev *dev, u32 offset);
+	void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+	u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+	void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
+		     int len);
+};
+
+enum mt76_txq_id {
+	MT_TXQ_VO = IEEE80211_AC_VO,
+	MT_TXQ_VI = IEEE80211_AC_VI,
+	MT_TXQ_BE = IEEE80211_AC_BE,
+	MT_TXQ_BK = IEEE80211_AC_BK,
+	MT_TXQ_PSD,
+	MT_TXQ_MCU,
+	MT_TXQ_BEACON,
+	MT_TXQ_CAB,
+	__MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+	MT_RXQ_MAIN,
+	MT_RXQ_MCU,
+	__MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+	dma_addr_t addr;
+	int len;
+};
+
+struct mt76_queue_entry {
+	union {
+		void *buf;
+		struct sk_buff *skb;
+	};
+	struct mt76_txwi_cache *txwi;
+	bool schedule;
+};
+
+struct mt76_queue_regs {
+	u32 desc_base;
+	u32 ring_size;
+	u32 cpu_idx;
+	u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+	struct mt76_queue_regs __iomem *regs;
+
+	spinlock_t lock;
+	struct mt76_queue_entry *entry;
+	struct mt76_desc *desc;
+
+	struct list_head swq;
+	int swq_queued;
+
+	u16 head;
+	u16 tail;
+	int ndesc;
+	int queued;
+	int buf_size;
+
+	u8 buf_offset;
+	u8 hw_idx;
+
+	dma_addr_t desc_dma;
+	struct sk_buff *rx_head;
+};
+
+struct mt76_queue_ops {
+	int (*init)(struct mt76_dev *dev);
+
+	int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+
+	int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
+		       struct mt76_queue_buf *buf, int nbufs, u32 info,
+		       struct sk_buff *skb, void *txwi);
+
+	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+			 int *len, u32 *info, bool *more);
+
+	void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+	void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+			   bool flush);
+
+	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+struct mt76_wcid {
+	struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+
+	struct work_struct aggr_work;
+
+	u8 idx;
+	u8 hw_key_idx;
+
+	u8 sta:1;
+
+	u8 rx_check_pn;
+	u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
+
+	__le16 tx_rate;
+	bool tx_rate_set;
+	u8 tx_rate_nss;
+	s8 max_txpwr_adj;
+	bool sw_iv;
+};
+
+struct mt76_txq {
+	struct list_head list;
+	struct mt76_queue *hwq;
+	struct mt76_wcid *wcid;
+
+	struct sk_buff_head retry_q;
+
+	u16 agg_ssn;
+	bool send_bar;
+	bool aggr;
+};
+
+struct mt76_txwi_cache {
+	u32 txwi[8];
+	dma_addr_t dma_addr;
+	struct list_head list;
+};
+
+
+struct mt76_rx_tid {
+	struct rcu_head rcu_head;
+
+	struct mt76_dev *dev;
+
+	spinlock_t lock;
+	struct delayed_work reorder_work;
+
+	u16 head;
+	u8 size;
+	u8 nframes;
+
+	u8 started:1, stopped:1, timer_pending:1;
+
+	struct sk_buff *reorder_buf[];
+};
+
+enum {
+	MT76_STATE_INITIALIZED,
+	MT76_STATE_RUNNING,
+	MT76_SCANNING,
+	MT76_RESET,
+};
+
+struct mt76_hw_cap {
+	bool has_2ghz;
+	bool has_5ghz;
+};
+
+struct mt76_driver_ops {
+	u16 txwi_size;
+
+	void (*update_survey)(struct mt76_dev *dev);
+
+	int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+			      struct sk_buff *skb, struct mt76_queue *q,
+			      struct mt76_wcid *wcid,
+			      struct ieee80211_sta *sta, u32 *tx_info);
+
+	void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+				struct mt76_queue_entry *e, bool flush);
+
+	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+		       struct sk_buff *skb);
+
+	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+};
+
+struct mt76_channel_state {
+	u64 cc_active;
+	u64 cc_busy;
+};
+
+struct mt76_sband {
+	struct ieee80211_supported_band sband;
+	struct mt76_channel_state *chan;
+};
+
+struct mt76_dev {
+	struct ieee80211_hw *hw;
+	struct cfg80211_chan_def chandef;
+	struct ieee80211_channel *main_chan;
+
+	spinlock_t lock;
+	spinlock_t cc_lock;
+	const struct mt76_bus_ops *bus;
+	const struct mt76_driver_ops *drv;
+	void __iomem *regs;
+	struct device *dev;
+
+	struct net_device napi_dev;
+	struct napi_struct napi[__MT_RXQ_MAX];
+	struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+	struct list_head txwi_cache;
+	struct mt76_queue q_tx[__MT_TXQ_MAX];
+	struct mt76_queue q_rx[__MT_RXQ_MAX];
+	const struct mt76_queue_ops *queue_ops;
+
+	u8 macaddr[ETH_ALEN];
+	u32 rev;
+	unsigned long state;
+
+	struct mt76_sband sband_2g;
+	struct mt76_sband sband_5g;
+	struct debugfs_blob_wrapper eeprom;
+	struct debugfs_blob_wrapper otp;
+	struct mt76_hw_cap cap;
+
+	u32 debugfs_reg;
+
+	struct led_classdev led_cdev;
+	char led_name[32];
+	bool led_al;
+	u8 led_pin;
+};
+
+enum mt76_phy_type {
+	MT_PHY_TYPE_CCK,
+	MT_PHY_TYPE_OFDM,
+	MT_PHY_TYPE_HT,
+	MT_PHY_TYPE_HT_GF,
+	MT_PHY_TYPE_VHT,
+};
+
+struct mt76_rate_power {
+	union {
+		struct {
+			s8 cck[4];
+			s8 ofdm[8];
+			s8 ht[16];
+			s8 vht[10];
+		};
+		s8 all[38];
+	};
+};
+
+struct mt76_rx_status {
+	struct mt76_wcid *wcid;
+
+	unsigned long reorder_time;
+
+	u8 iv[6];
+
+	u8 aggr:1;
+	u8 tid;
+	u16 seqno;
+
+	u16 freq;
+	u32 flag;
+	u8 enc_flags;
+	u8 encoding:2, bw:3;
+	u8 rate_idx;
+	u8 nss;
+	u8 band;
+	u8 signal;
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
+#define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...)	(dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...)	(dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...)	(dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_set(dev, offset, val)	mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val)	mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field)		\
+	FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val)	\
+	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mt76.hw
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		 int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		      int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+	return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+	return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev)		(dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...)	(dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_add_buf(dev, ...)	(dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+static inline struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+	struct mt76_sband *msband;
+	int idx;
+
+	if (c->band == NL80211_BAND_2GHZ)
+		msband = &dev->sband_2g;
+	else
+		msband = &dev->sband_5g;
+
+	idx = c - &msband->sband.channels[0];
+	return &msband->chan[idx];
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+			 struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+	void *ptr = mtxq;
+
+	return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+static inline struct ieee80211_sta *
+wcid_to_sta(struct mt76_wcid *wcid)
+{
+	void *ptr = wcid;
+
+	if (!wcid || !wcid->sta)
+		return NULL;
+
+	return container_of(ptr, struct ieee80211_sta, drv_priv);
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+		      struct sk_buff *skb, struct mt76_wcid *wcid,
+		      struct ieee80211_sta *sta);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+	     struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+			 bool send_bar);
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+				  struct ieee80211_sta *sta,
+				  u16 tids, int nframes,
+				  enum ieee80211_frame_release_type reason,
+				  bool more_data);
+void mt76_set_channel(struct mt76_dev *dev);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+		    struct survey_info *survey);
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
+		       u16 ssn, u8 size);
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+			 struct ieee80211_key_conf *key);
+
+/* internal */
+void mt76_tx_free(struct mt76_dev *dev);
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+		      int queue);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
new file mode 100644
index 000000000000..17df17afd9bf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/kfifo.h>
+
+#define MT7662_FIRMWARE		"mt7662.bin"
+#define MT7662_ROM_PATCH	"mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE	512
+
+#define MT76x2_RX_RING_SIZE	256
+#define MT_RX_HEADROOM		32
+
+#define MT_MAX_CHAINS		2
+
+#define MT_CALIBRATE_INTERVAL	HZ
+
+#include "mt76.h"
+#include "mt76x2_regs.h"
+#include "mt76x2_mac.h"
+#include "mt76x2_dfs.h"
+
+struct mt76x2_mcu {
+	struct mutex mutex;
+
+	wait_queue_head_t wait;
+	struct sk_buff_head res_q;
+
+	u32 msg_seq;
+};
+
+struct mt76x2_rx_freq_cal {
+	s8 high_gain[MT_MAX_CHAINS];
+	s8 rssi_offset[MT_MAX_CHAINS];
+	s8 lna_gain;
+	u32 mcu_gain;
+};
+
+struct mt76x2_calibration {
+	struct mt76x2_rx_freq_cal rx;
+
+	u8 agc_gain_init[MT_MAX_CHAINS];
+	u8 agc_gain_cur[MT_MAX_CHAINS];
+
+	int avg_rssi[MT_MAX_CHAINS];
+	int avg_rssi_all;
+
+	s8 agc_gain_adjust;
+	s8 low_gain;
+
+	u8 temp;
+
+	bool init_cal_done;
+	bool tssi_cal_done;
+	bool tssi_comp_pending;
+	bool dpd_cal_done;
+	bool channel_cal_done;
+};
+
+struct mt76x2_dev {
+	struct mt76_dev mt76; /* must be first */
+
+	struct mac_address macaddr_list[8];
+
+	struct mutex mutex;
+
+	const u16 *beacon_offsets;
+	unsigned long wcid_mask[128 / BITS_PER_LONG];
+
+	int txpower_conf;
+	int txpower_cur;
+
+	u8 txdone_seq;
+	DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
+
+	struct mt76x2_mcu mcu;
+	struct sk_buff *rx_head;
+
+	struct tasklet_struct tx_tasklet;
+	struct tasklet_struct pre_tbtt_tasklet;
+	struct delayed_work cal_work;
+	struct delayed_work mac_work;
+
+	u32 aggr_stats[32];
+
+	struct mt76_wcid global_wcid;
+	struct mt76_wcid __rcu *wcid[128];
+
+	spinlock_t irq_lock;
+	u32 irqmask;
+
+	struct sk_buff *beacons[8];
+	u8 beacon_mask;
+	u8 beacon_data_mask;
+
+	u32 rev;
+	u32 rxfilter;
+
+	u16 chainmask;
+
+	struct mt76x2_calibration cal;
+
+	s8 target_power;
+	s8 target_power_delta[2];
+	struct mt76_rate_power rate_power;
+	bool enable_tpc;
+
+	u8 coverage_class;
+	u8 slottime;
+
+	struct mt76x2_dfs_pattern_detector dfs_pd;
+};
+
+struct mt76x2_vif {
+	u8 idx;
+
+	struct mt76_wcid group_wcid;
+};
+
+struct mt76x2_sta {
+	struct mt76_wcid wcid; /* must be first */
+
+	struct mt76x2_vif *vif;
+	struct mt76x2_tx_status status;
+	int n_frames;
+};
+
+static inline bool is_mt7612(struct mt76x2_dev *dev)
+{
+	return (dev->rev >> 16) == 0x7612;
+}
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+
+static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
+{
+	mt76x2_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
+{
+	mt76x2_set_irq_mask(dev, mask, 0);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x2_dev *dev);
+void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
+void mt76x2_phy_power_on(struct mt76x2_dev *dev);
+int mt76x2_init_hardware(struct mt76x2_dev *dev);
+void mt76x2_stop_hardware(struct mt76x2_dev *dev);
+int mt76x2_eeprom_init(struct mt76x2_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
+
+int mt76x2_phy_start(struct mt76x2_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+			 struct cfg80211_chan_def *chandef);
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+			   u8 bw_index, bool scan);
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+		       u8 channel);
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_dma_init(struct mt76x2_dev *dev);
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
+
+void mt76x2_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+			struct sk_buff *skb, int cmd, int seq);
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+	       struct sk_buff *skb);
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+			  struct sk_buff *skb, struct mt76_queue *q,
+			  struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+			  u32 *tx_info);
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+			    struct mt76_queue_entry *e, bool flush);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+			       const struct ieee80211_tx_rate *rate);
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
new file mode 100644
index 000000000000..2629779e8d3e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->irq_lock, flags);
+	dev->irqmask &= ~clear;
+	dev->irqmask |= set;
+	mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
+	spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
+{
+	struct mt76x2_dev *dev = dev_instance;
+	u32 intr;
+
+	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return IRQ_NONE;
+
+	trace_dev_irq(dev, intr, dev->irqmask);
+
+	intr &= dev->irqmask;
+
+	if (intr & MT_INT_TX_DONE_ALL) {
+		mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
+		tasklet_schedule(&dev->tx_tasklet);
+	}
+
+	if (intr & MT_INT_RX_DONE(0)) {
+		mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
+		napi_schedule(&dev->mt76.napi[0]);
+	}
+
+	if (intr & MT_INT_RX_DONE(1)) {
+		mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
+		napi_schedule(&dev->mt76.napi[1]);
+	}
+
+	if (intr & MT_INT_PRE_TBTT)
+		tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+	/* send buffered multicast frames now */
+	if (intr & MT_INT_TBTT)
+		mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+	if (intr & MT_INT_TX_STAT) {
+		mt76x2_mac_poll_tx_status(dev, true);
+		tasklet_schedule(&dev->tx_tasklet);
+	}
+
+	if (intr & MT_INT_GPTIMER) {
+		mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+	}
+
+	return IRQ_HANDLED;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
new file mode 100644
index 000000000000..612feb593d7d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x2.h"
+
+static int
+mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+{
+	struct mt76x2_dev *dev = file->private;
+	int i, j;
+
+	for (i = 0; i < 4; i++) {
+		seq_puts(file, "Length: ");
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%8d | ", i * 8 + j + 1);
+		seq_puts(file, "\n");
+		seq_puts(file, "Count:  ");
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+		seq_puts(file, "\n");
+		seq_puts(file, "--------");
+		for (j = 0; j < 8; j++)
+			seq_puts(file, "-----------");
+		seq_puts(file, "\n");
+	}
+
+	return 0;
+}
+
+static int
+mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+}
+
+static void
+seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
+{
+	int i;
+
+	seq_printf(file, "%10s:", str);
+	for (i = 0; i < len; i++)
+		seq_printf(file, " %2d", val[i]);
+	seq_puts(file, "\n");
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+	struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+	seq_printf(file, "Target power: %d\n", dev->target_power);
+
+	seq_puts_array(file, "Delta", dev->target_power_delta,
+		       ARRAY_SIZE(dev->target_power_delta));
+	seq_puts_array(file, "CCK", dev->rate_power.cck,
+		       ARRAY_SIZE(dev->rate_power.cck));
+	seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
+		       ARRAY_SIZE(dev->rate_power.ofdm));
+	seq_puts_array(file, "HT", dev->rate_power.ht,
+		       ARRAY_SIZE(dev->rate_power.ht));
+	seq_puts_array(file, "VHT", dev->rate_power.vht,
+		       ARRAY_SIZE(dev->rate_power.vht));
+	return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+	.open = mt76x2_ampdu_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int
+mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+{
+	int i;
+	struct mt76x2_dev *dev = file->private;
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		seq_printf(file, "engine: %d\n", i);
+		seq_printf(file, "  hw pattern detected:\t%d\n",
+			   dfs_pd->stats[i].hw_pattern);
+		seq_printf(file, "  hw pulse discarded:\t%d\n",
+			   dfs_pd->stats[i].hw_pulse_discarded);
+	}
+
+	return 0;
+}
+
+static int
+mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+	.open = mt76x2_dfs_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = mt76_register_debugfs(&dev->mt76);
+	if (!dir)
+		return;
+
+	debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp);
+	debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc);
+
+	debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+	debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+				    read_txpower);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
new file mode 100644
index 000000000000..f936dc9a5476
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh,		\
+		   w_tolerance, tl, th, t_tolerance,	\
+		   bl, bh, event_exp, power_jmp)	\
+{							\
+	.mode = m,					\
+	.avg_len = len,					\
+	.e_low = el,					\
+	.e_high = eh,					\
+	.w_low = wl,					\
+	.w_high = wh,					\
+	.w_margin = w_tolerance,			\
+	.t_low = tl,					\
+	.t_high = th,					\
+	.t_margin = t_tolerance,			\
+	.b_low = bl,					\
+	.b_high = bh,					\
+	.event_expiration = event_exp,			\
+	.pwr_jmp = power_jmp				\
+}
+
+static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0xfe808, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0xfe808, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+		   0x7fffffff, 0xfe808, 0x16cc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0x14c080, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0x14c080, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 }
+};
+
+static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
+					     u8 enable)
+{
+	u32 data;
+
+	data = (1 << 1) | enable;
+	mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+{
+	bool ret = false;
+	u32 current_ts, delta_ts;
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+	delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+	dfs_pd->chirp_pulse_ts = current_ts;
+
+	/* 12 sec */
+	if (delta_ts <= (12 * (1 << 20))) {
+		if (++dfs_pd->chirp_pulse_cnt > 8)
+			ret = true;
+	} else {
+		dfs_pd->chirp_pulse_cnt = 1;
+	}
+
+	return ret;
+}
+
+static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
+				    struct mt76x2_dfs_hw_pulse *pulse)
+{
+	u32 data;
+
+	/* select channel */
+	data = (MT_DFS_CH_EN << 16) | pulse->engine;
+	mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+	/* reported period */
+	pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+	/* reported width */
+	pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+	pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+	/* reported burst number */
+	pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
+				      struct mt76x2_dfs_hw_pulse *pulse)
+{
+	bool ret = false;
+
+	if (!pulse->period || !pulse->w1)
+		return false;
+
+	switch (dev->dfs_pd.region) {
+	case NL80211_DFS_FCC:
+		if (pulse->engine > 3)
+			break;
+
+		if (pulse->engine == 3) {
+			ret = mt76x2_dfs_check_chirp(dev);
+			break;
+		}
+
+		/* check short pulse*/
+		if (pulse->w1 < 120)
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 4700 ||
+				pulse->period >= 6400) &&
+			       (pulse->period <= 6800 ||
+				pulse->period >= 10200) &&
+			       pulse->period <= 61600);
+		else if (pulse->w1 < 130) /* 120 - 130 */
+			ret = (pulse->period >= 2900 &&
+			       pulse->period <= 61600);
+		else
+			ret = (pulse->period >= 3500 &&
+			       pulse->period <= 10100);
+		break;
+	case NL80211_DFS_ETSI:
+		if (pulse->engine >= 3)
+			break;
+
+		ret = (pulse->period >= 4900 &&
+		       (pulse->period <= 10200 ||
+			pulse->period >= 12400) &&
+		       pulse->period <= 100100);
+		break;
+	case NL80211_DFS_JP:
+		if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+		    dev->mt76.chandef.chan->center_freq <= 5350) {
+			/* JPW53 */
+			if (pulse->w1 <= 130)
+				ret = (pulse->period >= 28360 &&
+				       (pulse->period <= 28700 ||
+					pulse->period >= 76900) &&
+				       pulse->period <= 76940);
+			break;
+		}
+
+		if (pulse->engine > 3)
+			break;
+
+		if (pulse->engine == 3) {
+			ret = mt76x2_dfs_check_chirp(dev);
+			break;
+		}
+
+		/* check short pulse*/
+		if (pulse->w1 < 120)
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 4700 ||
+				pulse->period >= 6400) &&
+			       (pulse->period <= 6800 ||
+				pulse->period >= 27560) &&
+			       (pulse->period <= 27960 ||
+				pulse->period >= 28360) &&
+			       (pulse->period <= 28700 ||
+				pulse->period >= 79900) &&
+			       pulse->period <= 80100);
+		else if (pulse->w1 < 130) /* 120 - 130 */
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 10100 ||
+				pulse->period >= 27560) &&
+			       (pulse->period <= 27960 ||
+				pulse->period >= 28360) &&
+			       (pulse->period <= 28700 ||
+				pulse->period >= 79900) &&
+			       pulse->period <= 80100);
+		else
+			ret = (pulse->period >= 3900 &&
+			       pulse->period <= 10100);
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		return false;
+	}
+
+	return ret;
+}
+
+static void mt76x2_dfs_tasklet(unsigned long arg)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	u32 engine_mask;
+	int i;
+
+	if (test_bit(MT76_SCANNING, &dev->mt76.state))
+		goto out;
+
+	engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+	if (!(engine_mask & 0xf))
+		goto out;
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		struct mt76x2_dfs_hw_pulse pulse;
+
+		if (!(engine_mask & (1 << i)))
+			continue;
+
+		pulse.engine = i;
+		mt76x2_dfs_get_hw_pulse(dev, &pulse);
+
+		if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+			dfs_pd->stats[i].hw_pulse_discarded++;
+			continue;
+		}
+
+		/* hw detector rx radar pattern */
+		dfs_pd->stats[i].hw_pattern++;
+		ieee80211_radar_detected(dev->mt76.hw);
+
+		/* reset hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+		return;
+	}
+
+	/* reset hw detector */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+	mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+{
+	u32 data;
+	u8 i, shift;
+	const struct mt76x2_radar_specs *radar_specs;
+
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_40:
+		shift = MT_DFS_NUM_ENGINES;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		shift = 2 * MT_DFS_NUM_ENGINES;
+		break;
+	default:
+		shift = 0;
+		break;
+	}
+
+	switch (dev->dfs_pd.region) {
+	case NL80211_DFS_FCC:
+		radar_specs = &fcc_radar_specs[shift];
+		break;
+	case NL80211_DFS_ETSI:
+		radar_specs = &etsi_radar_specs[shift];
+		break;
+	case NL80211_DFS_JP:
+		if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+		    dev->mt76.chandef.chan->center_freq <= 5350)
+			radar_specs = &jp_w53_radar_specs[shift];
+		else
+			radar_specs = &jp_w56_radar_specs[shift];
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		return;
+	}
+
+	data = (MT_DFS_VGA_MASK << 16) |
+	       (MT_DFS_PWR_GAIN_OFFSET << 12) |
+	       (MT_DFS_PWR_DOWN_TIME << 8) |
+	       (MT_DFS_SYM_ROUND << 4) |
+	       (MT_DFS_DELTA_DELAY & 0xf);
+	mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+	data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+	mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		/* configure engine */
+		mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+		/* detection mode + avg_len */
+		data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+		       (radar_specs[i].mode & 0xf);
+		mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+		/* dfs energy */
+		data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+		       (radar_specs[i].e_low & 0x0fff);
+		mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+		/* dfs period */
+		mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+		mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+		/* dfs burst */
+		mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+		mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+		/* dfs width */
+		data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+		       (radar_specs[i].w_low & 0x0fff);
+		mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+		/* dfs margins */
+		data = (radar_specs[i].w_margin << 16) |
+		       radar_specs[i].t_margin;
+		mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+		/* dfs event expiration */
+		mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+		/* dfs pwr adj */
+		mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+	}
+
+	/* reset status */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+	mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+	/* enable detection*/
+	mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+	mt76_wr(dev, 0x212c, 0x0c350001);
+}
+
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+{
+	u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+	agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+	agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+	val_r8 = (agc_r8 & 0x00007e00) >> 9;
+	val_r4 = agc_r4 & ~0x1f000000;
+	val_r4 += (((val_r8 + 1) >> 1) << 24);
+	mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+	dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+	dfs_r31 += val_r8;
+	dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+	dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+	mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+	mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+}
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+	if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+	    dev->dfs_pd.region != NL80211_DFS_UNSET) {
+		mt76x2_dfs_set_bbp_params(dev);
+		/* enable debug mode */
+		mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+
+		mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+		mt76_rmw_field(dev, MT_INT_TIMER_EN,
+			       MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+	} else {
+		/* disable hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 0), 0);
+		/* clear detector status */
+		mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+		mt76_wr(dev, 0x212c, 0);
+
+		mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+		mt76_rmw_field(dev, MT_INT_TIMER_EN,
+			       MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+	}
+}
+
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+{
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	dfs_pd->region = NL80211_DFS_UNSET;
+	tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+		     (unsigned long)dev);
+}
+
+void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+			   enum nl80211_dfs_regions region)
+{
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	if (dfs_pd->region != region) {
+		tasklet_disable(&dfs_pd->dfs_tasklet);
+		dfs_pd->region = region;
+		mt76x2_dfs_init_params(dev);
+		tasklet_enable(&dfs_pd->dfs_tasklet);
+	}
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
new file mode 100644
index 000000000000..8dbc783cc6bc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DFS_H
+#define __MT76x2_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL		(10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES		4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND		0
+#define MT_DFS_DELTA_DELAY		2
+#define MT_DFS_VGA_MASK			0
+#define MT_DFS_PWR_GAIN_OFFSET		3
+#define MT_DFS_PWR_DOWN_TIME		0xf
+#define MT_DFS_RX_PE_MASK		0xff
+#define MT_DFS_PKT_END_MASK		0
+#define MT_DFS_CH_EN			0xf
+
+struct mt76x2_radar_specs {
+	u8 mode;
+	u16 avg_len;
+	u16 e_low;
+	u16 e_high;
+	u16 w_low;
+	u16 w_high;
+	u16 w_margin;
+	u32 t_low;
+	u32 t_high;
+	u16 t_margin;
+	u32 b_low;
+	u32 b_high;
+	u32 event_expiration;
+	u16 pwr_jmp;
+};
+
+struct mt76x2_dfs_hw_pulse {
+	u8 engine;
+	u32 period;
+	u32 w1;
+	u32 w2;
+	u32 burst;
+};
+
+struct mt76x2_dfs_engine_stats {
+	u32 hw_pattern;
+	u32 hw_pulse_discarded;
+};
+
+struct mt76x2_dfs_pattern_detector {
+	enum nl80211_dfs_regions region;
+
+	u8 chirp_pulse_cnt;
+	u32 chirp_pulse_ts;
+
+	struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+	struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
+void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+			   enum nl80211_dfs_regions region);
+
+#endif /* __MT76x2_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
new file mode 100644
index 000000000000..fd1ec4743e0b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+int
+mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+		    struct sk_buff *skb, int cmd, int seq)
+{
+	struct mt76_queue *q = &dev->mt76.q_tx[qid];
+	struct mt76_queue_buf buf;
+	dma_addr_t addr;
+	u32 tx_info;
+
+	tx_info = MT_MCU_MSG_TYPE_CMD |
+		  FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+		  FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+		  FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+		  FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+	addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
+			      DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->mt76.dev, addr))
+		return -ENOMEM;
+
+	buf.addr = addr;
+	buf.len = skb->len;
+	spin_lock_bh(&q->lock);
+	mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+	mt76_queue_kick(dev, q);
+	spin_unlock_bh(&q->lock);
+
+	return 0;
+}
+
+static int
+mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc)
+{
+	int ret;
+
+	q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+	q->ndesc = n_desc;
+	q->hw_idx = idx;
+
+	ret = mt76_queue_alloc(dev, q);
+	if (ret)
+		return ret;
+
+	mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+	return 0;
+}
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	void *rxwi = skb->data;
+
+	if (q == MT_RXQ_MCU) {
+		skb_queue_tail(&dev->mcu.res_q, skb);
+		wake_up(&dev->mcu.wait);
+		return;
+	}
+
+	skb_pull(skb, sizeof(struct mt76x2_rxwi));
+	if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	mt76_rx(&dev->mt76, q, skb);
+}
+
+static int
+mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc, int bufsize)
+{
+	int ret;
+
+	q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+	q->ndesc = n_desc;
+	q->buf_size = bufsize;
+
+	ret = mt76_queue_alloc(dev, q);
+	if (ret)
+		return ret;
+
+	mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+	return 0;
+}
+
+static void
+mt76x2_tx_tasklet(unsigned long data)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
+	int i;
+
+	mt76x2_mac_process_tx_status_fifo(dev);
+
+	for (i = MT_TXQ_MCU; i >= 0; i--)
+		mt76_queue_tx_cleanup(dev, i, false);
+
+	mt76x2_mac_poll_tx_status(dev, false);
+	mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x2_dma_init(struct mt76x2_dev *dev)
+{
+	static const u8 wmm_queue_map[] = {
+		[IEEE80211_AC_BE] = 0,
+		[IEEE80211_AC_BK] = 1,
+		[IEEE80211_AC_VI] = 2,
+		[IEEE80211_AC_VO] = 3,
+	};
+	int ret;
+	int i;
+	struct mt76_txwi_cache __maybe_unused *t;
+	struct mt76_queue *q;
+
+	BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
+	BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
+
+	mt76_dma_attach(&dev->mt76);
+
+	init_waitqueue_head(&dev->mcu.wait);
+	skb_queue_head_init(&dev->mcu.res_q);
+
+	tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
+
+	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+		ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
+					   wmm_queue_map[i], MT_TX_RING_SIZE);
+		if (ret)
+			return ret;
+	}
+
+	ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+				   MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+				   MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+				   MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
+	ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	return mt76_init_queues(dev);
+}
+
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
+{
+	tasklet_kill(&dev->tx_tasklet);
+	mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
new file mode 100644
index 000000000000..47f79d83fcb4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DMA_H
+#define __MT76x2_DMA_H
+
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN			GENMASK(13, 0)
+#define MT_TXD_INFO_NEXT_VLD		BIT(16)
+#define MT_TXD_INFO_TX_BURST		BIT(17)
+#define MT_TXD_INFO_80211		BIT(19)
+#define MT_TXD_INFO_TSO			BIT(20)
+#define MT_TXD_INFO_CSO			BIT(21)
+#define MT_TXD_INFO_WIV			BIT(24)
+#define MT_TXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_TXD_INFO_TCO			BIT(29)
+#define MT_TXD_INFO_UCO			BIT(30)
+#define MT_TXD_INFO_ICO			BIT(31)
+
+#define MT_RX_FCE_INFO_LEN		GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN		BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE		GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR	BIT(24)
+#define MT_RX_FCE_INFO_QSEL		GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT		GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE		GENMASK(31, 30)
+
+/* MCU request message header  */
+#define MT_MCU_MSG_LEN			GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ		GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE		GENMASK(26, 20)
+#define MT_MCU_MSG_PORT			GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE			GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD		BIT(30)
+
+enum mt76x2_qsel {
+	MT_QSEL_MGMT,
+	MT_QSEL_HCCA,
+	MT_QSEL_EDCA,
+	MT_QSEL_EDCA_2,
+};
+
+enum dma_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
new file mode 100644
index 000000000000..9c9bf3e785ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
+		   void *dest, int len)
+{
+	if (field + len > dev->mt76.eeprom.size)
+		return -1;
+
+	memcpy(dest, dev->mt76.eeprom.data + field, len);
+	return 0;
+}
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+{
+	void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+	memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+	return 0;
+}
+
+static void
+mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+{
+	u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+	switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+	case BOARD_TYPE_5GHZ:
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	case BOARD_TYPE_2GHZ:
+		dev->mt76.cap.has_2ghz = true;
+		break;
+	default:
+		dev->mt76.cap.has_2ghz = true;
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	}
+}
+
+static int
+mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+	val |= MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	udelay(2);
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, MT_EFUSE_DATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int
+mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
+{
+	int ret, i;
+
+	for (i = 0; i + 16 <= len; i += 16) {
+		ret = mt76x2_efuse_read(dev, i, buf + i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+	u16 *efuse_w = (u16 *) efuse;
+
+	if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+		return false;
+
+	return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id)							   \
+	MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),	   \
+	MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+	MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),	   \
+	MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+	static const u8 cal_free_bytes[] = {
+		MT_EE_XTAL_TRIM_1,
+		MT_EE_TX_POWER_EXT_PA_5G + 1,
+		MT_EE_TX_POWER_0_START_2G,
+		MT_EE_TX_POWER_0_START_2G + 1,
+		MT_EE_TX_POWER_1_START_2G,
+		MT_EE_TX_POWER_1_START_2G + 1,
+		GROUP_5G(0),
+		GROUP_5G(1),
+		GROUP_5G(2),
+		GROUP_5G(3),
+		GROUP_5G(4),
+		GROUP_5G(5),
+		MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+		MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+	};
+	u8 *eeprom = dev->mt76.eeprom.data;
+	u8 prev_grp0[4] = {
+		eeprom[MT_EE_TX_POWER_0_START_5G],
+		eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+		eeprom[MT_EE_TX_POWER_1_START_5G],
+		eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+	};
+	u16 val;
+	int i;
+
+	if (!mt76x2_has_cal_free_data(dev, efuse))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+		int offset = cal_free_bytes[i];
+
+		eeprom[offset] = efuse[offset];
+	}
+
+	if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+	      efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+		memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+	if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+	      efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+		memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+{
+	u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+	if (!val)
+		val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+	switch (val) {
+	case 0x7662:
+	case 0x7612:
+		return 0;
+	default:
+		dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+		return -EINVAL;
+	}
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x2_dev *dev)
+{
+	void *efuse;
+	int len = MT7662_EEPROM_SIZE;
+	bool found;
+	int ret;
+
+	ret = mt76_eeprom_init(&dev->mt76, len);
+	if (ret < 0)
+		return ret;
+
+	found = ret;
+	if (found)
+		found = !mt76x2_check_eeprom(dev);
+
+	dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+	dev->mt76.otp.size = len;
+	if (!dev->mt76.otp.data)
+		return -ENOMEM;
+
+	efuse = dev->mt76.otp.data;
+
+	if (mt76x2_get_efuse_data(dev, efuse, len))
+		goto out;
+
+	if (found) {
+		mt76x2_apply_cal_free_data(dev, efuse);
+	} else {
+		/* FIXME: check if efuse data is complete */
+		found = true;
+		memcpy(dev->mt76.eeprom.data, efuse, len);
+	}
+
+out:
+	if (!found)
+		return -ENOENT;
+
+	return 0;
+}
+
+static inline int
+mt76x2_sign_extend(u32 val, unsigned int size)
+{
+	bool sign = val & BIT(size - 1);
+
+	val &= BIT(size - 1) - 1;
+
+	return sign ? val : -val;
+}
+
+static inline int
+mt76x2_sign_extend_optional(u32 val, unsigned int size)
+{
+	bool enable = val & BIT(size);
+
+	return enable ? mt76x2_sign_extend(val, size) : 0;
+}
+
+static bool
+field_valid(u8 val)
+{
+	return val != 0 && val != 0xff;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+{
+	s8 *dest = dev->cal.rx.high_gain;
+
+	if (!field_valid(val)) {
+		dest[0] = 0;
+		dest[1] = 0;
+		return;
+	}
+
+	dest[0] = mt76x2_sign_extend(val, 4);
+	dest[1] = mt76x2_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+{
+	s8 *dest = dev->cal.rx.rssi_offset;
+
+	if (!field_valid(val)) {
+		dest[chain] = 0;
+		return;
+	}
+
+	dest[chain] = mt76x2_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+	if (channel >= 184 && channel <= 196)
+		return MT_CH_5G_JAPAN;
+	if (channel <= 48)
+		return MT_CH_5G_UNII_1;
+	if (channel <= 64)
+		return MT_CH_5G_UNII_2;
+	if (channel <= 114)
+		return MT_CH_5G_UNII_2E_1;
+	if (channel <= 144)
+		return MT_CH_5G_UNII_2E_2;
+	return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+{
+	enum mt76x2_cal_channel_group group;
+
+	group = mt76x2_get_cal_channel_group(channel);
+	switch (group) {
+	case MT_CH_5G_JAPAN:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+	case MT_CH_5G_UNII_1:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+	case MT_CH_5G_UNII_2:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+	case MT_CH_5G_UNII_2E_1:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+	case MT_CH_5G_UNII_2E_2:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+	default:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+	}
+}
+
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	int channel = chan->hw_value;
+	s8 lna_5g[3], lna_2g;
+	u8 lna;
+	u16 val;
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+	else
+		val = mt76x2_get_5g_rx_gain(dev, channel);
+
+	mt76x2_set_rx_gain_group(dev, val);
+
+	if (chan->band == NL80211_BAND_2GHZ) {
+		val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+		mt76x2_set_rssi_offset(dev, 0, val);
+		mt76x2_set_rssi_offset(dev, 1, val >> 8);
+	} else {
+		val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+		mt76x2_set_rssi_offset(dev, 0, val);
+		mt76x2_set_rssi_offset(dev, 1, val >> 8);
+	}
+
+	val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
+	lna_2g = val & 0xff;
+	lna_5g[0] = val >> 8;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+	lna_5g[1] = val >> 8;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+	lna_5g[2] = val >> 8;
+
+	if (!field_valid(lna_5g[1]))
+		lna_5g[1] = lna_5g[0];
+
+	if (!field_valid(lna_5g[2]))
+		lna_5g[2] = lna_5g[0];
+
+	dev->cal.rx.mcu_gain =  (lna_2g & 0xff);
+	dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+	dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+	dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+	if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+		lna_2g = 0;
+	if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+		memset(lna_5g, 0, sizeof(lna_5g));
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		lna = lna_2g;
+	else if (channel <= 64)
+		lna = lna_5g[0];
+	else if (channel <= 128)
+		lna = lna_5g[1];
+	else
+		lna = lna_5g[2];
+
+	if (lna == 0xff)
+		lna = 0;
+
+	dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
+}
+
+static s8
+mt76x2_rate_power_val(u8 val)
+{
+	if (!field_valid(val))
+		return 0;
+
+	return mt76x2_sign_extend_optional(val, 7);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+			   struct ieee80211_channel *chan)
+{
+	bool is_5ghz;
+	u16 val;
+
+	is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	memset(t, 0, sizeof(*t));
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+	t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
+	t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
+
+	if (is_5ghz)
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+	else
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+	t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
+	t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
+
+	if (is_5ghz)
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+	else
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+	t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
+	t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+	t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
+	t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+	t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
+	t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+	t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
+	t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+	t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
+	t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+	t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
+	t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+	t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
+	t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+	if (!is_5ghz)
+		val >>= 8;
+	t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
+}
+
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
+{
+	int i;
+	s8 ret = 0;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		ret = max(ret, r->all[i]);
+
+	return ret;
+}
+
+static void
+mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+		         struct ieee80211_channel *chan, int chain, int offset)
+{
+	int channel = chan->hw_value;
+	int delta_idx;
+	u8 data[6];
+	u16 val;
+
+	if (channel < 6)
+		delta_idx = 3;
+	else if (channel < 11)
+		delta_idx = 4;
+	else
+		delta_idx = 5;
+
+	mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+	t->chain[chain].tssi_slope = data[0];
+	t->chain[chain].tssi_offset = data[1];
+	t->chain[chain].target_power = data[2];
+	t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+	t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+		         struct ieee80211_channel *chan, int chain, int offset)
+{
+	int channel = chan->hw_value;
+	enum mt76x2_cal_channel_group group;
+	int delta_idx;
+	u16 val;
+	u8 data[5];
+
+	group = mt76x2_get_cal_channel_group(channel);
+	offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+	if (channel >= 192)
+		delta_idx = 4;
+	else if (channel >= 184)
+		delta_idx = 3;
+	else if (channel < 44)
+		delta_idx = 3;
+	else if (channel < 52)
+		delta_idx = 4;
+	else if (channel < 58)
+		delta_idx = 3;
+	else if (channel < 98)
+		delta_idx = 4;
+	else if (channel < 106)
+		delta_idx = 3;
+	else if (channel < 116)
+		delta_idx = 4;
+	else if (channel < 130)
+		delta_idx = 3;
+	else if (channel < 149)
+		delta_idx = 4;
+	else if (channel < 157)
+		delta_idx = 3;
+	else
+		delta_idx = 4;
+
+	mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+	t->chain[chain].tssi_slope = data[0];
+	t->chain[chain].tssi_offset = data[1];
+	t->chain[chain].target_power = data[2];
+	t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+	t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+			   struct mt76x2_tx_power_info *t,
+			   struct ieee80211_channel *chan)
+{
+	u16 bw40, bw80;
+
+	memset(t, 0, sizeof(*t));
+
+	bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+	bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+	if (chan->band == NL80211_BAND_5GHZ) {
+		bw40 >>= 8;
+		mt76x2_get_power_info_5g(dev, t, chan, 0,
+					 MT_EE_TX_POWER_0_START_5G);
+		mt76x2_get_power_info_5g(dev, t, chan, 1,
+					 MT_EE_TX_POWER_1_START_5G);
+	} else {
+		mt76x2_get_power_info_2g(dev, t, chan, 0,
+					 MT_EE_TX_POWER_0_START_2G);
+		mt76x2_get_power_info_2g(dev, t, chan, 1,
+					 MT_EE_TX_POWER_1_START_2G);
+	}
+
+	if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
+		t->target_power = t->chain[0].target_power;
+
+	t->delta_bw40 = mt76x2_rate_power_val(bw40);
+	t->delta_bw80 = mt76x2_rate_power_val(bw80);
+}
+
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+{
+	enum nl80211_band band = dev->mt76.chandef.chan->band;
+	u16 val, slope;
+	u8 bounds;
+
+	memset(t, 0, sizeof(*t));
+
+	val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+	if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
+		return -EINVAL;
+
+	if (!mt76x2_ext_pa_enabled(dev, band))
+		return -EINVAL;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+	if (!(val & BIT(7)))
+		return -EINVAL;
+
+	t->temp_25_ref = val & 0x7f;
+	if (band == NL80211_BAND_5GHZ) {
+		slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+		bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+	} else {
+		slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+		bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
+	}
+
+	t->high_slope = slope & 0xff;
+	t->low_slope = slope >> 8;
+	t->lower_bound = 0 - (bounds & 0xf);
+	t->upper_bound = (bounds >> 4) & 0xf;
+
+	return 0;
+}
+
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+	u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+	if (band == NL80211_BAND_5GHZ)
+		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+	else
+		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+
+int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+{
+	int ret;
+
+	ret = mt76x2_eeprom_load(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_eeprom_parse_hw_cap(dev);
+	mt76x2_eeprom_get_macaddr(dev);
+	mt76_eeprom_override(&dev->mt76);
+	dev->mt76.macaddr[0] &= ~BIT(1);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
new file mode 100644
index 000000000000..d79122728dca
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "mt76x2.h"
+
+enum mt76x2_eeprom_field {
+	MT_EE_CHIP_ID =				0x000,
+	MT_EE_VERSION =				0x002,
+	MT_EE_MAC_ADDR =			0x004,
+	MT_EE_PCI_ID =				0x00A,
+	MT_EE_NIC_CONF_0 =			0x034,
+	MT_EE_NIC_CONF_1 =			0x036,
+	MT_EE_NIC_CONF_2 =			0x042,
+
+	MT_EE_XTAL_TRIM_1 =			0x03a,
+	MT_EE_XTAL_TRIM_2 =			0x09e,
+
+	MT_EE_LNA_GAIN =			0x044,
+	MT_EE_RSSI_OFFSET_2G_0 =		0x046,
+	MT_EE_RSSI_OFFSET_2G_1 =		0x048,
+	MT_EE_RSSI_OFFSET_5G_0 =		0x04a,
+	MT_EE_RSSI_OFFSET_5G_1 =		0x04c,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x050,
+	MT_EE_TX_POWER_DELTA_BW80 =		0x052,
+
+	MT_EE_TX_POWER_EXT_PA_5G =		0x054,
+
+	MT_EE_TX_POWER_0_START_2G =		0x056,
+	MT_EE_TX_POWER_1_START_2G =		0x05c,
+
+	/* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G		5
+#define MT_TX_POWER_GROUPS_5G			6
+	MT_EE_TX_POWER_0_START_5G =		0x062,
+
+	MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =	0x074,
+	MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =	0x076,
+
+	MT_EE_TX_POWER_1_START_5G =		0x080,
+
+	MT_EE_TX_POWER_CCK =			0x0a0,
+	MT_EE_TX_POWER_OFDM_2G_6M =		0x0a2,
+	MT_EE_TX_POWER_OFDM_2G_24M =		0x0a4,
+	MT_EE_TX_POWER_OFDM_5G_6M =		0x0b2,
+	MT_EE_TX_POWER_OFDM_5G_24M =		0x0b4,
+	MT_EE_TX_POWER_HT_MCS0 =		0x0a6,
+	MT_EE_TX_POWER_HT_MCS4 =		0x0a8,
+	MT_EE_TX_POWER_HT_MCS8 =		0x0aa,
+	MT_EE_TX_POWER_HT_MCS12 =		0x0ac,
+	MT_EE_TX_POWER_VHT_MCS0 =		0x0ba,
+	MT_EE_TX_POWER_VHT_MCS4 =		0x0bc,
+	MT_EE_TX_POWER_VHT_MCS8 =		0x0be,
+
+	MT_EE_RF_TEMP_COMP_SLOPE_5G =		0x0f2,
+	MT_EE_RF_TEMP_COMP_SLOPE_2G =		0x0f4,
+
+	MT_EE_RF_2G_TSSI_OFF_TXPOWER =		0x0f6,
+	MT_EE_RF_2G_RX_HIGH_GAIN =		0x0f8,
+	MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN =	0x0fa,
+	MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN =	0x0fc,
+	MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN =	0x0fe,
+
+	MT_EE_BT_RCAL_RESULT =			0x138,
+	MT_EE_BT_VCDL_CALIBRATION =		0x13c,
+	MT_EE_BT_PMUCFG =			0x13e,
+
+	__MT_EE_MAX
+};
+
+#define MT_EE_NIC_CONF_0_PA_INT_2G		BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G		BIT(9)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE		GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC		BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G		BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G		BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN		BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV		BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION		GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE		BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD		GENMASK(15, 13)
+
+enum mt76x2_board_type {
+	BOARD_TYPE_2GHZ = 1,
+	BOARD_TYPE_5GHZ = 2,
+};
+
+enum mt76x2_cal_channel_group {
+	MT_CH_5G_JAPAN,
+	MT_CH_5G_UNII_1,
+	MT_CH_5G_UNII_2,
+	MT_CH_5G_UNII_2E_1,
+	MT_CH_5G_UNII_2E_2,
+	MT_CH_5G_UNII_3,
+	__MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+	u8 target_power;
+
+	s8 delta_bw40;
+	s8 delta_bw80;
+
+	struct {
+		s8 tssi_slope;
+		s8 tssi_offset;
+		s8 target_power;
+		s8 delta;
+	} chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+	u8 temp_25_ref;
+	int lower_bound; /* J */
+	int upper_bound; /* J */
+	unsigned int high_slope; /* J / dB */
+	unsigned int low_slope; /* J / dB */
+};
+
+static inline int
+mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
+{
+	if ((field & 1) || field >= __MT_EE_MAX)
+		return -1;
+
+	return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+			   struct ieee80211_channel *chan);
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+			   struct mt76x2_tx_power_info *t,
+			   struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
+{
+	return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+	       MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x2_dev *dev)
+{
+	return !mt76x2_temp_tx_alc_enabled(dev) &&
+	       (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+		MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+{
+	u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+		return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+	else
+		return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
new file mode 100644
index 000000000000..1b00ae4465a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_mcu.h"
+
+struct mt76x2_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+static bool
+mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 500; i++) {
+		switch (mt76_rr(dev, MT_MAC_CSR0)) {
+		case 0:
+		case ~0:
+			break;
+		default:
+			return true;
+		}
+		usleep_range(5000, 10000);
+	}
+
+	return false;
+}
+
+static bool
+wait_for_wpdma(struct mt76x2_dev *dev)
+{
+	return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+			 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+			 0, 1000);
+}
+
+static void
+mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	val = MT_PBF_SYS_CTRL_MCU_RESET |
+	      MT_PBF_SYS_CTRL_DMA_RESET |
+	      MT_PBF_SYS_CTRL_MAC_RESET |
+	      MT_PBF_SYS_CTRL_PBF_RESET |
+	      MT_PBF_SYS_CTRL_ASY_RESET;
+
+	mt76_set(dev, MT_PBF_SYS_CTRL, val);
+	mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+	mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+	mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+		       const struct mt76x2_reg_pair *data, int len)
+{
+	while (len > 0) {
+		mt76_wr(dev, data->reg, data->value);
+		len--;
+		data++;
+	}
+}
+
+static void
+mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
+	 MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+	static const struct mt76x2_reg_pair vals[] = {
+		/* Copied from MediaTek reference source */
+		{ MT_PBF_SYS_CTRL,		0x00080c00 },
+		{ MT_PBF_CFG,			0x1efebcff },
+		{ MT_FCE_PSE_CTRL,		0x00000001 },
+		{ MT_MAC_SYS_CTRL,		0x0000000c },
+		{ MT_MAX_LEN_CFG,		0x003e3f00 },
+		{ MT_AMPDU_MAX_LEN_20M1S,	0xaaa99887 },
+		{ MT_AMPDU_MAX_LEN_20M2S,	0x000000aa },
+		{ MT_XIFS_TIME_CFG,		0x33a40d0a },
+		{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+		{ MT_TBTT_SYNC_CFG,		0x00422010 },
+		{ MT_PWR_PIN_CFG,		0x00000000 },
+		{ 0x1238,			0x001700c8 },
+		{ MT_TX_SW_CFG0,		0x00101001 },
+		{ MT_TX_SW_CFG1,		0x00010000 },
+		{ MT_TX_SW_CFG2,		0x00000000 },
+		{ MT_TXOP_CTRL_CFG,		0x0400583f },
+		{ MT_TX_RTS_CFG,		0x00100020 },
+		{ MT_TX_TIMEOUT_CFG,		0x000a2290 },
+		{ MT_TX_RETRY_CFG,		0x47f01f0f },
+		{ MT_EXP_ACK_TIME,		0x002c00dc },
+		{ MT_TX_PROT_CFG6,		0xe3f42004 },
+		{ MT_TX_PROT_CFG7,		0xe3f42084 },
+		{ MT_TX_PROT_CFG8,		0xe3f42104 },
+		{ MT_PIFS_TX_CFG,		0x00060fff },
+		{ MT_RX_FILTR_CFG,		0x00015f97 },
+		{ MT_LEGACY_BASIC_RATE,		0x0000017f },
+		{ MT_HT_BASIC_RATE,		0x00004003 },
+		{ MT_PN_PAD_MODE,		0x00000003 },
+		{ MT_TXOP_HLDR_ET,		0x00000002 },
+		{ 0xa44,			0x00000000 },
+		{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+		{ MT_TSO_CTRL,			0x00000000 },
+		{ MT_AUX_CLK_CFG,		0x00000000 },
+		{ MT_DACCLK_EN_DLY_CFG,		0x00000000 },
+		{ MT_TX_ALC_CFG_4,		0x00000000 },
+		{ MT_TX_ALC_VGA3,		0x00000000 },
+		{ MT_TX_PWR_CFG_0,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_1,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_2,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_3,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_4,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_7,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_8,		0x0000003a },
+		{ MT_TX_PWR_CFG_9,		0x0000003a },
+		{ MT_EFUSE_CTRL,		0x0000d000 },
+		{ MT_PAUSE_ENABLE_CONTROL1,	0x0000000a },
+		{ MT_FCE_WLAN_FLOW_CONTROL1,	0x60401c18 },
+		{ MT_WPDMA_DELAY_INT_CFG,	0x94ff0000 },
+		{ MT_TX_SW_CFG3,		0x00000004 },
+		{ MT_HT_FBK_TO_LEGACY,		0x00001818 },
+		{ MT_VHT_HT_FBK_CFG1,		0xedcba980 },
+		{ MT_PROT_AUTO_TX_CFG,		0x00830083 },
+		{ MT_HT_CTRL_CFG,		0x000001ff },
+	};
+	struct mt76x2_reg_pair prot_vals[] = {
+		{ MT_CCK_PROT_CFG,		DEFAULT_PROT_CFG },
+		{ MT_OFDM_PROT_CFG,		DEFAULT_PROT_CFG },
+		{ MT_MM20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_MM40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+		{ MT_GF20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_GF40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+	};
+
+	mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+	mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+{
+	u16 eep_val;
+	s8 offset = 0;
+
+	eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+	offset = eep_val & 0x7f;
+	if ((eep_val & 0xff) == 0xff)
+		offset = 0;
+	else if (eep_val & 0x80)
+		offset = 0 - offset;
+
+	eep_val >>= 8;
+	if (eep_val == 0x00 || eep_val == 0xff) {
+		eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+		eep_val &= 0xff;
+
+		if (eep_val == 0x00 || eep_val == 0xff)
+			eep_val = 0x14;
+	}
+
+	eep_val &= 0x7f;
+	mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+	mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+	eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+	case 0:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+		break;
+	case 1:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+	u16 base = MT_BEACON_BASE;
+	u32 regs[4] = {};
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		u16 addr = dev->beacon_offsets[i];
+
+		regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+	}
+
+	for (i = 0; i < 4; i++)
+		mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+{
+	static const u8 null_addr[ETH_ALEN] = {};
+	const u8 *macaddr = dev->mt76.macaddr;
+	u32 val;
+	int i, k;
+
+	if (!mt76x2_wait_for_mac(dev))
+		return -ETIMEDOUT;
+
+	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+	val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		 MT_WPDMA_GLO_CFG_RX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+		 MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+	val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+	mt76x2_mac_pbf_init(dev);
+	mt76_write_mac_initvals(dev);
+	mt76x2_fixup_xtal(dev);
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_RESET_CSR |
+		   MT_MAC_SYS_CTRL_RESET_BBP);
+
+	if (is_mt7612(dev))
+		mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+	mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+	mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+	mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+	mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+	usleep_range(5000, 10000);
+	mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+	mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
+
+	mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
+	mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
+		FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+		MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
+	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+		       8 << 4);
+	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+		       MT_DFS_GP_INTERVAL);
+	mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+	if (!hard)
+		return 0;
+
+	for (i = 0; i < 256 / 32; i++)
+		mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+	for (i = 0; i < 256; i++)
+		mt76x2_mac_wcid_setup(dev, i, 0, NULL);
+
+	for (i = 0; i < 16; i++)
+		for (k = 0; k < 4; k++)
+			mt76x2_mac_shared_key_setup(dev, i, k, NULL);
+
+	for (i = 0; i < 8; i++) {
+		mt76x2_mac_set_bssid(dev, i, null_addr);
+		mt76x2_mac_set_beacon(dev, i, NULL);
+	}
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	mt76_wr(dev, MT_CH_TIME_CFG,
+		MT_CH_TIME_CFG_TIMER_EN |
+		MT_CH_TIME_CFG_TX_AS_BUSY |
+		MT_CH_TIME_CFG_RX_AS_BUSY |
+		MT_CH_TIME_CFG_NAV_AS_BUSY |
+		MT_CH_TIME_CFG_EIFS_AS_BUSY |
+		FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+	mt76x2_init_beacon_offsets(dev);
+
+	mt76x2_set_tx_ackto(dev);
+
+	return 0;
+}
+
+int mt76x2_mac_start(struct mt76x2_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+	wait_for_wpdma(dev);
+	usleep_range(50, 100);
+
+	mt76_set(dev, MT_WPDMA_GLO_CFG,
+		 MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+			       MT_INT_TX_STAT);
+
+	return 0;
+}
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+	bool stopped = false;
+	u32 rts_cfg;
+	int i;
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+	/* Wait for MAC to become idle */
+	for (i = 0; i < 300; i++) {
+		if (mt76_rr(dev, MT_MAC_STATUS) &
+		    (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX))
+			continue;
+
+		if (mt76_rr(dev, MT_BBP(IBI, 12)))
+			continue;
+
+		stopped = true;
+		break;
+	}
+
+	if (force && !stopped) {
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+	}
+
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+
+void mt76x2_mac_resume(struct mt76x2_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+	mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+	udelay(1);
+
+	mt76_clear(dev, 0x1001c, 0xff);
+	mt76_set(dev, 0x1001c, 0x30);
+
+	mt76_wr(dev, 0x10014, 0x484f);
+	udelay(1);
+
+	mt76_set(dev, 0x10130, BIT(17));
+	udelay(125);
+
+	mt76_clear(dev, 0x10130, BIT(16));
+	udelay(50);
+
+	mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+	int shift = unit ? 8 : 0;
+
+	/* Enable RF BG */
+	mt76_set(dev, 0x10130, BIT(0) << shift);
+	udelay(10);
+
+	/* Enable RFDIG LDO/AFE/ABB/ADDA */
+	mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+	udelay(10);
+
+	/* Switch RFDIG power to internal LDO */
+	mt76_clear(dev, 0x10130, BIT(2) << shift);
+	udelay(10);
+
+	mt76x2_power_on_rf_patch(dev);
+
+	mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	/* Turn on WL MTCMOS */
+	mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+	val = MT_WLAN_MTC_CTRL_STATE_UP |
+	      MT_WLAN_MTC_CTRL_PWR_ACK |
+	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+	mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+	udelay(10);
+
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+	udelay(10);
+
+	mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+	/* Turn on AD/DA power down */
+	mt76_clear(dev, 0x11204, BIT(3));
+
+	/* WLAN function enable */
+	mt76_set(dev, 0x10080, BIT(0));
+
+	/* Release BBP software reset */
+	mt76_clear(dev, 0x10064, BIT(18));
+
+	mt76x2_power_on_rf(dev, 0);
+	mt76x2_power_on_rf(dev, 1);
+}
+
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+{
+	u8 ackto, sifs, slottime = dev->slottime;
+
+	slottime += 3 * dev->coverage_class;
+
+	sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+			      MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+	ackto = slottime + sifs;
+	mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+		       MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+	u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+			 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+}
+
+static void
+mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+	if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+		val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+		mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+		udelay(20);
+
+		val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+	}
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt76x2_set_wlan_state(dev, enable);
+}
+
+int mt76x2_init_hardware(struct mt76x2_dev *dev)
+{
+	static const u16 beacon_offsets[16] = {
+		/* 1024 byte per beacon */
+		0xc000,
+		0xc400,
+		0xc800,
+		0xcc00,
+		0xd000,
+		0xd400,
+		0xd800,
+		0xdc00,
+
+		/* BSS idx 8-15 not used for beacons */
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+	};
+	u32 val;
+	int ret;
+
+	dev->beacon_offsets = beacon_offsets;
+	tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
+		     (unsigned long) dev);
+
+	dev->chainmask = 0x202;
+	dev->global_wcid.idx = 255;
+	dev->global_wcid.hw_key_idx = -1;
+	dev->slottime = 9;
+
+	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+	mt76x2_reset_wlan(dev, true);
+	mt76x2_power_on(dev);
+
+	ret = mt76x2_eeprom_init(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_mac_reset(dev, true);
+	if (ret)
+		return ret;
+
+	dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+	ret = mt76x2_dma_init(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+	ret = mt76x2_mac_start(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_mcu_init(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_mac_stop(dev, false);
+
+	return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+{
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mac_work);
+	mt76x2_mcu_set_radio_state(dev, false);
+	mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x2_dev *dev)
+{
+	tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+	tasklet_disable(&dev->pre_tbtt_tasklet);
+	mt76x2_stop_hardware(dev);
+	mt76x2_dma_cleanup(dev);
+	mt76x2_mcu_cleanup(dev);
+}
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.txwi_size = sizeof(struct mt76x2_txwi),
+		.update_survey = mt76x2_update_channel,
+		.tx_prepare_skb = mt76x2_tx_prepare_skb,
+		.tx_complete_skb = mt76x2_tx_complete_skb,
+		.rx_skb = mt76x2_queue_rx_skb,
+		.rx_poll_complete = mt76x2_rx_poll_complete,
+	};
+	struct ieee80211_hw *hw;
+	struct mt76x2_dev *dev;
+
+	hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops);
+	if (!hw)
+		return NULL;
+
+	dev = hw->priv;
+	dev->mt76.dev = pdev;
+	dev->mt76.hw = hw;
+	dev->mt76.drv = &drv_ops;
+	mutex_init(&dev->mutex);
+	spin_lock_init(&dev->irq_lock);
+
+	return dev;
+}
+
+static void mt76x2_regd_notifier(struct wiphy *wiphy,
+				 struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct mt76x2_dev *dev = hw->priv;
+
+	mt76x2_dfs_set_domain(dev, request->dfs_region);
+}
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
+}
+
+static struct ieee80211_rate mt76x2_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC)
+	}, {
+		.max = 8,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_AP)
+	 },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+	{
+		.limits = if_limits,
+		.n_limits = ARRAY_SIZE(if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+		.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				       BIT(NL80211_CHAN_WIDTH_20) |
+				       BIT(NL80211_CHAN_WIDTH_40) |
+				       BIT(NL80211_CHAN_WIDTH_80),
+	}
+};
+
+static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+				  u8 delay_off)
+{
+	struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
+					      mt76);
+	u32 val;
+
+	val = MT_LED_STATUS_DURATION(0xff) |
+	      MT_LED_STATUS_OFF(delay_off) |
+	      MT_LED_STATUS_ON(delay_on);
+
+	mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
+	mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
+
+	val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+	      MT_LED_CTRL_KICK(mt76->led_pin);
+	if (mt76->led_al)
+		val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+	mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
+				unsigned long *delay_on,
+				unsigned long *delay_off)
+{
+	struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+	u8 delta_on, delta_off;
+
+	delta_off = max_t(u8, *delay_off / 10, 1);
+	delta_on = max_t(u8, *delay_on / 10, 1);
+
+	mt76x2_led_set_config(mt76, delta_on, delta_off);
+	return 0;
+}
+
+static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
+				      enum led_brightness brightness)
+{
+	struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+
+	if (!brightness)
+		mt76x2_led_set_config(mt76, 0, 0xff);
+	else
+		mt76x2_led_set_config(mt76, 0xff, 0);
+}
+
+static void
+mt76x2_init_txpower(struct mt76x2_dev *dev,
+		    struct ieee80211_supported_band *sband)
+{
+	struct ieee80211_channel *chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76_rate_power t = {};
+	int target_power;
+	int i;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		chan = &sband->channels[i];
+
+		mt76x2_get_power_info(dev, &txp, chan);
+
+		target_power = max_t(int, (txp.chain[0].target_power +
+					   txp.chain[0].delta),
+					  (txp.chain[1].target_power +
+					   txp.chain[1].delta));
+
+		mt76x2_get_rate_power(dev, &t, chan);
+
+		chan->max_power = mt76x2_get_max_rate_power(&t) +
+				  target_power;
+		chan->max_power /= 2;
+
+		/* convert to combined output power on 2x2 devices */
+		chan->max_power += 3;
+	}
+}
+
+int mt76x2_register_device(struct mt76x2_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+	void *status_fifo;
+	int fifo_size;
+	int i, ret;
+
+	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
+	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+	if (!status_fifo)
+		return -ENOMEM;
+
+	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+	ret = mt76x2_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	hw->queues = 4;
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+	hw->extra_tx_headroom = 2;
+
+	hw->sta_data_size = sizeof(struct mt76x2_sta);
+	hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+	for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+		u8 *addr = dev->macaddr_list[i].addr;
+
+		memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+		if (!i)
+			continue;
+
+		addr[0] |= BIT(1);
+		addr[0] ^= ((i - 1) << 2);
+	}
+	wiphy->addresses = dev->macaddr_list;
+	wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+
+	wiphy->iface_combinations = if_comb;
+	wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+	wiphy->reg_notifier = mt76x2_regd_notifier;
+
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+	INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+	dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+	dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+	mt76x2_dfs_init_detector(dev);
+
+	/* init led callbacks */
+	dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+	dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+
+	ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+				   ARRAY_SIZE(mt76x2_rates));
+	if (ret)
+		goto fail;
+
+	mt76x2_init_debugfs(dev);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+	return 0;
+
+fail:
+	mt76x2_stop_hardware(dev);
+	return ret;
+}
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
new file mode 100644
index 000000000000..6c30b5eaa9ca
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+{
+	idx &= 7;
+	mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+	mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+		       get_unaligned_le16(addr + 4));
+}
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (idx >= 8)
+			idx = 0;
+
+		if (status->band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		status->rate_idx = idx;
+		return 0;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+		}
+
+		if (idx >= 4)
+			idx = 0;
+
+		status->rate_idx = idx;
+		return 0;
+	case MT_PHY_TYPE_HT_GF:
+		status->enc_flags |= RX_ENC_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->encoding = RX_ENC_HT;
+		status->rate_idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		status->encoding = RX_ENC_VHT;
+		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+		status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate & MT_RXWI_RATE_LDPC)
+		status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		status->bw = RATE_INFO_BW_40;
+		break;
+	case MT_PHY_BW_80:
+		status->bw = RATE_INFO_BW_80;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+		       const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u16 rateval;
+	u8 phy, rate_idx;
+	u8 nss = 1;
+	u8 bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 4);
+		phy = MT_PHY_TYPE_VHT;
+		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+			bw = 2;
+		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		bw = 0;
+	}
+
+	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+	u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+	u32 bit = MT_WCID_DROP_MASK(idx);
+
+	/* prevent unnecessary writes */
+	if ((val & bit) != (bit * drop))
+		mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+			      const struct ieee80211_tx_rate *rate)
+{
+	spin_lock_bh(&dev->mt76.lock);
+	wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+	wcid->tx_rate_set = true;
+	spin_unlock_bh(&dev->mt76.lock);
+}
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+			   struct sk_buff *skb, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct ieee80211_key_conf *key = info->control.hw_key;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+	u16 txwi_flags = 0;
+	u8 nss;
+	s8 txpwr_adj, max_txpwr_adj;
+	u8 ccmp_pn[8];
+
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (wcid)
+		txwi->wcid = wcid->idx;
+	else
+		txwi->wcid = 0xff;
+
+	txwi->pktid = 1;
+
+	if (wcid && wcid->sw_iv && key) {
+		u64 pn = atomic64_inc_return(&key->tx_pn);
+		ccmp_pn[0] = pn;
+		ccmp_pn[1] = pn >> 8;
+		ccmp_pn[2] = 0;
+		ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+		ccmp_pn[4] = pn >> 16;
+		ccmp_pn[5] = pn >> 24;
+		ccmp_pn[6] = pn >> 32;
+		ccmp_pn[7] = pn >> 40;
+		txwi->iv = *((u32 *) &ccmp_pn[0]);
+		txwi->eiv = *((u32 *) &ccmp_pn[1]);
+	}
+
+	spin_lock_bh(&dev->mt76.lock);
+	if (wcid && (rate->idx < 0 || !rate->count)) {
+		txwi->rate = wcid->tx_rate;
+		max_txpwr_adj = wcid->max_txpwr_adj;
+		nss = wcid->tx_rate_nss;
+	} else {
+		txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+		max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+	}
+	spin_unlock_bh(&dev->mt76.lock);
+
+	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+					    max_txpwr_adj);
+	txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+		txwi->txstream = 0x13;
+	else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+		 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+		txwi->txstream = 0x93;
+
+	if (info->flags & IEEE80211_TX_CTL_LDPC)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+		txwi_flags |= MT_TXWI_FLAGS_MMPS;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		txwi->pktid |= MT_TXWI_PKTID_PROBE;
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 63, ba_size - 1);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			ba_size = 0;
+		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+		txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+			 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+				    sta->ht_cap.ampdu_density);
+	}
+
+	if (ieee80211_is_probe_resp(hdr->frame_control) ||
+	    ieee80211_is_beacon(hdr->frame_control))
+		txwi_flags |= MT_TXWI_FLAGS_TS;
+
+	txwi->flags |= cpu_to_le16(txwi_flags);
+	txwi->len_ctl = cpu_to_le16(skb->len);
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+	int hdrlen;
+
+	if (!len)
+		return;
+
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	memmove(skb->data + len, skb->data, hdrlen);
+	skb_pull(skb, len);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
+{
+	struct mt76x2_sta *sta;
+	struct mt76_wcid *wcid;
+
+	if (idx >= ARRAY_SIZE(dev->wcid))
+		return NULL;
+
+	wcid = rcu_dereference(dev->wcid[idx]);
+	if (unicast || !wcid)
+		return wcid;
+
+	sta = container_of(wcid, struct mt76x2_sta, wcid);
+	return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+			  void *rxi)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+	struct mt76x2_rxwi *rxwi = rxi;
+	u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+	u32 ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+	bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+	int pad_len = 0;
+	u8 pn_len;
+	u8 wcid;
+	int len;
+
+	if (rxinfo & MT_RXINFO_L2PAD)
+		pad_len += 2;
+
+	if (rxinfo & MT_RXINFO_DECRYPT) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_MMIC_STRIPPED;
+		status->flag |= RX_FLAG_MIC_STRIPPED;
+		status->flag |= RX_FLAG_IV_STRIPPED;
+	}
+
+	wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+	status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
+
+	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+	if (pn_len) {
+		int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+		u8 *data = skb->data + offset;
+
+		status->iv[0] = data[7];
+		status->iv[1] = data[6];
+		status->iv[2] = data[5];
+		status->iv[3] = data[4];
+		status->iv[4] = data[1];
+		status->iv[5] = data[0];
+
+		/*
+		 * Driver CCMP validation can't deal with fragments.
+		 * Let mac80211 take care of it.
+		 */
+		if (rxinfo & MT_RXINFO_FRAG) {
+			status->flag &= ~RX_FLAG_IV_STRIPPED;
+		} else {
+			pad_len += pn_len << 2;
+			len -= pn_len << 2;
+		}
+	}
+
+	mt76x2_remove_hdr_pad(skb, pad_len);
+
+	if (rxinfo & MT_RXINFO_BA)
+		status->aggr = true;
+
+	if (WARN_ON_ONCE(len > skb->len))
+		return -EINVAL;
+
+	pskb_trim(skb, len);
+	status->chains = BIT(0) | BIT(1);
+	status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
+	status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
+	status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+	status->freq = dev->mt76.chandef.chan->center_freq;
+	status->band = dev->mt76.chandef.chan->band;
+
+	status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+	status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+	return mt76x2_mac_process_rate(status, rate);
+}
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+			   enum nl80211_band band)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		txrate->idx = idx;
+		return 0;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return 0;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		break;
+	case MT_PHY_BW_80:
+		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+	return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+			  struct ieee80211_tx_info *info,
+			  struct mt76x2_tx_status *st, int n_frames)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	int cur_idx, last_rate;
+	int i;
+
+	if (!n_frames)
+		return;
+
+	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+	mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+				 dev->mt76.chandef.chan->band);
+	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+		rate[last_rate + 1].idx = -1;
+
+	cur_idx = rate[last_rate].idx + st->retry;
+	for (i = 0; i <= last_rate; i++) {
+		rate[i].flags = rate[last_rate].flags;
+		rate[i].idx = max_t(int, 0, cur_idx - i);
+		rate[i].count = 1;
+	}
+
+	if (last_rate > 0)
+		rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+	info->status.ampdu_len = n_frames;
+	info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+	if (st->pktid & MT_TXWI_PKTID_PROBE)
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+static void
+mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
+		      u8 *update)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt76_wcid *wcid = NULL;
+	struct mt76x2_sta *msta = NULL;
+
+	rcu_read_lock();
+	if (stat->wcid < ARRAY_SIZE(dev->wcid))
+		wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+	if (wcid) {
+		void *priv;
+
+		priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+		sta = container_of(priv, struct ieee80211_sta,
+				   drv_priv);
+	}
+
+	if (msta && stat->aggr) {
+		u32 stat_val, stat_cache;
+
+		stat_val = stat->rate;
+		stat_val |= ((u32) stat->retry) << 16;
+		stat_cache = msta->status.rate;
+		stat_cache |= ((u32) msta->status.retry) << 16;
+
+		if (*update == 0 && stat_val == stat_cache &&
+		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+			msta->n_frames++;
+			goto out;
+		}
+
+		mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+					  msta->n_frames);
+
+		msta->status = *stat;
+		msta->n_frames = 1;
+		*update = 0;
+	} else {
+		mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+		*update = 1;
+	}
+
+	ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+	rcu_read_unlock();
+}
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
+{
+	struct mt76x2_tx_status stat = {};
+	unsigned long flags;
+	u8 update = 1;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return;
+
+	trace_mac_txstat_poll(dev);
+
+	while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+		u32 stat1, stat2;
+
+		spin_lock_irqsave(&dev->irq_lock, flags);
+		stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+		stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+		if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
+			spin_unlock_irqrestore(&dev->irq_lock, flags);
+			break;
+		}
+
+		spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+		stat.valid = 1;
+		stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+		stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+		stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+		stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+		stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+		stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+		stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+		trace_mac_txstat_fetch(dev, &stat);
+
+		if (!irq) {
+			mt76x2_send_tx_status(dev, &stat, &update);
+			continue;
+		}
+
+		kfifo_put(&dev->txstatus_fifo, stat);
+	}
+}
+
+static void
+mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
+			void *txwi_ptr)
+{
+	struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
+	struct mt76x2_txwi *txwi = txwi_ptr;
+
+	mt76x2_mac_poll_tx_status(dev, false);
+
+	txi->tries = 0;
+	txi->jiffies = jiffies;
+	txi->wcid = txwi->wcid;
+	txi->pktid = txwi->pktid;
+	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+	mt76x2_tx_complete(dev, skb);
+}
+
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
+{
+	struct mt76x2_tx_status stat;
+	u8 update = 1;
+
+	while (kfifo_get(&dev->txstatus_fifo, &stat))
+		mt76x2_send_tx_status(dev, &stat, &update);
+}
+
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+			    struct mt76_queue_entry *e, bool flush)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	if (e->txwi)
+		mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+	else
+		dev_kfree_skb_any(e->skb);
+}
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+	struct mt76_wcid_addr addr = {};
+	u32 attr;
+
+	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+	mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+	if (idx >= 128)
+		return;
+
+	if (mac)
+		memcpy(addr.macaddr, mac, ETH_ALEN);
+
+	mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+			    struct ieee80211_key_conf *key)
+{
+	enum mt76x2_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+
+	cipher = mt76x2_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+			       !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP)
+			iv_data[3] |= 0x20;
+	}
+
+	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	return 0;
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key)
+{
+	enum mt76x2_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76x2_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+		     sizeof(key_data));
+
+	return 0;
+}
+
+static int
+mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+{
+	int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+	struct mt76x2_txwi txwi;
+
+	if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
+		return -ENOSPC;
+
+	mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+
+	mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+	offset += sizeof(txwi);
+
+	mt76_wr_copy(dev, offset, skb->data, skb->len);
+	return 0;
+}
+
+static int
+__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+{
+	int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+	int beacon_addr = dev->beacon_offsets[bcn_idx];
+	int ret = 0;
+	int i;
+
+	/* Prevent corrupt transmissions during update */
+	mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+	if (skb) {
+		ret = mt76_write_beacon(dev, beacon_addr, skb);
+		if (!ret)
+			dev->beacon_data_mask |= BIT(bcn_idx) &
+						 dev->beacon_mask;
+	} else {
+		dev->beacon_data_mask &= ~BIT(bcn_idx);
+		for (i = 0; i < beacon_len; i += 4)
+			mt76_wr(dev, beacon_addr + i, 0);
+	}
+
+	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+	return ret;
+}
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+			  struct sk_buff *skb)
+{
+	bool force_update = false;
+	int bcn_idx = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+		if (vif_idx == i) {
+			force_update = !!dev->beacons[i] ^ !!skb;
+
+			if (dev->beacons[i])
+				dev_kfree_skb(dev->beacons[i]);
+
+			dev->beacons[i] = skb;
+			__mt76x2_mac_set_beacon(dev, bcn_idx, skb);
+		} else if (force_update && dev->beacons[i]) {
+			__mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
+		}
+
+		bcn_idx += !!dev->beacons[i];
+	}
+
+	for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+		if (!(dev->beacon_data_mask & BIT(i)))
+			break;
+
+		__mt76x2_mac_set_beacon(dev, i, NULL);
+	}
+
+	mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+		       bcn_idx - 1);
+	return 0;
+}
+
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+{
+	u8 old_mask = dev->beacon_mask;
+	bool en;
+	u32 reg;
+
+	if (val) {
+		dev->beacon_mask |= BIT(vif_idx);
+	} else {
+		dev->beacon_mask &= ~BIT(vif_idx);
+		mt76x2_mac_set_beacon(dev, vif_idx, NULL);
+	}
+
+	if (!!old_mask == !!dev->beacon_mask)
+		return;
+
+	en = dev->beacon_mask;
+
+	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+	reg = MT_BEACON_TIME_CFG_BEACON_TX |
+	      MT_BEACON_TIME_CFG_TBTT_EN |
+	      MT_BEACON_TIME_CFG_TIMER_EN;
+	mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+	if (en)
+		mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+	else
+		mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x2_update_channel(struct mt76_dev *mdev)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	struct mt76_channel_state *state;
+	u32 active, busy;
+
+	state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+	busy = mt76_rr(dev, MT_CH_BUSY);
+	active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+	spin_lock_bh(&dev->mt76.cc_lock);
+	state->cc_busy += busy;
+	state->cc_active += active;
+	spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void mt76x2_mac_work(struct work_struct *work)
+{
+	struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
+					    mac_work.work);
+	int i, idx;
+
+	mt76x2_update_channel(&dev->mt76);
+	for (i = 0, idx = 0; i < 16; i++) {
+		u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+		dev->aggr_stats[idx++] += val & 0xffff;
+		dev->aggr_stats[idx++] += val >> 16;
+	}
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
new file mode 100644
index 000000000000..8a8a25e32d5f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76.h"
+
+struct mt76x2_dev;
+struct mt76x2_sta;
+struct mt76x2_vif;
+struct mt76x2_txwi;
+
+struct mt76x2_tx_status {
+	u8 valid:1;
+	u8 success:1;
+	u8 aggr:1;
+	u8 ack_req:1;
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+	u16 rate;
+} __packed __aligned(2);
+
+struct mt76x2_tx_info {
+	unsigned long jiffies;
+	u8 tries;
+
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+};
+
+struct mt76x2_rxwi {
+	__le32 rxinfo;
+
+	__le32 ctl;
+
+	__le16 tid_sn;
+	__le16 rate;
+
+	u8 rssi[4];
+
+	__le32 bbp_rxinfo[4];
+};
+
+#define MT_RXINFO_BA			BIT(0)
+#define MT_RXINFO_DATA			BIT(1)
+#define MT_RXINFO_NULL			BIT(2)
+#define MT_RXINFO_FRAG			BIT(3)
+#define MT_RXINFO_UNICAST		BIT(4)
+#define MT_RXINFO_MULTICAST		BIT(5)
+#define MT_RXINFO_BROADCAST		BIT(6)
+#define MT_RXINFO_MYBSS			BIT(7)
+#define MT_RXINFO_CRCERR		BIT(8)
+#define MT_RXINFO_ICVERR		BIT(9)
+#define MT_RXINFO_MICERR		BIT(10)
+#define MT_RXINFO_AMSDU			BIT(11)
+#define MT_RXINFO_HTC			BIT(12)
+#define MT_RXINFO_RSSI			BIT(13)
+#define MT_RXINFO_L2PAD			BIT(14)
+#define MT_RXINFO_AMPDU			BIT(15)
+#define MT_RXINFO_DECRYPT		BIT(16)
+#define MT_RXINFO_BSSIDX3		BIT(17)
+#define MT_RXINFO_WAPI_KEY		BIT(18)
+#define MT_RXINFO_PN_LEN		GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0		BIT(22)
+#define MT_RXINFO_SW_FTYPE1		BIT(23)
+#define MT_RXINFO_PROBE_RESP		BIT(24)
+#define MT_RXINFO_BEACON		BIT(25)
+#define MT_RXINFO_DISASSOC		BIT(26)
+#define MT_RXINFO_DEAUTH		BIT(27)
+#define MT_RXINFO_ACTION		BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR		BIT(30)
+#define MT_RXINFO_IP_SUM_ERR		BIT(31)
+
+#define MT_RXWI_CTL_WCID		GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX		GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX		GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF			GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN		GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF			BIT(31)
+
+#define MT_RXWI_TID			GENMASK(3, 0)
+#define MT_RXWI_SN			GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX		GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC		BIT(6)
+#define MT_RXWI_RATE_BW			GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI		BIT(9)
+#define MT_RXWI_RATE_STBC		BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM		BIT(11)
+#define MT_RXWI_RATE_PHY		GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX		GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS		GENMASK(5, 4)
+
+#define MT_TX_PWR_ADJ			GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+	MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG		BIT(0)
+#define MT_TXWI_FLAGS_MMPS		BIT(1)
+#define MT_TXWI_FLAGS_CFACK		BIT(2)
+#define MT_TXWI_FLAGS_TS		BIT(3)
+#define MT_TXWI_FLAGS_AMPDU		BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY	GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP		GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS		BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG		BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW		GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND		BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT	BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ		BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ		BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW	GENMASK(7, 2)
+
+#define MT_TXWI_PKTID_PROBE		BIT(7)
+
+struct mt76x2_txwi {
+	__le16 flags;
+	__le16 rate;
+	u8 ack_ctl;
+	u8 wcid;
+	__le16 len_ctl;
+	__le32 iv;
+	__le32 eiv;
+	u8 aid;
+	u8 txstream;
+	u8 ctl2;
+	u8 pktid;
+} __packed __aligned(4);
+
+static inline struct mt76x2_tx_info *
+mt76x2_skb_tx_info(struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	return (void *) info->status.status_driver_data;
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
+int mt76x2_mac_start(struct mt76x2_dev *dev);
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x2_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+			  void *rxi);
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+			   struct sk_buff *skb, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta);
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+			    struct ieee80211_key_conf *key);
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+			      const struct ieee80211_tx_rate *rate);
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+				struct ieee80211_key_conf *key);
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+			  struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
new file mode 100644
index 000000000000..bf26284b9989
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+
+	ret = mt76x2_mac_start(dev);
+	if (ret)
+		goto out;
+
+	ret = mt76x2_phy_start(dev);
+	if (ret)
+		goto out;
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt76x2_stop_hardware(dev);
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return;
+
+	mtxq = (struct mt76_txq *) txq->drv_priv;
+	if (txq->sta) {
+		struct mt76x2_sta *sta;
+
+		sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+		mtxq->wcid = &sta->wcid;
+	} else {
+		struct mt76x2_vif *mvif;
+
+		mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+		mtxq->wcid = &mvif->group_wcid;
+	}
+
+	mt76_txq_init(&dev->mt76, txq);
+}
+
+static int
+mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	unsigned int idx = 0;
+
+	if (vif->addr[0] & BIT(1))
+		idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+	/*
+	 * Client mode typically only has one configurable BSSID register,
+	 * which is used for bssidx=0. This is linked to the MAC address.
+	 * Since mac80211 allows changing interface types, and we cannot
+	 * force the use of the primary MAC address for a station mode
+	 * interface, we need some other way of configuring a per-interface
+	 * remote BSSID.
+	 * The hardware provides an AP-Client feature, where bssidx 0-7 are
+	 * used for AP mode and bssidx 8-15 for client mode.
+	 * We shift the station interface bss index by 8 to force the
+	 * hardware to recognize the BSSID.
+	 * The resulting bssidx mismatch for unicast frames is ignored by hw.
+	 */
+	if (vif->type == NL80211_IFTYPE_STATION)
+		idx += 8;
+
+	mvif->idx = idx;
+	mvif->group_wcid.idx = 254 - idx;
+	mvif->group_wcid.hw_key_idx = -1;
+	mt76x2_txq_init(dev, vif->txq);
+
+	return 0;
+}
+
+static void
+mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mt76_txq_remove(&dev->mt76, vif->txq);
+}
+
+static int
+mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	mt76_set_channel(&dev->mt76);
+
+	tasklet_disable(&dev->pre_tbtt_tasklet);
+	tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+	cancel_delayed_work_sync(&dev->cal_work);
+
+	mt76x2_mac_stop(dev, true);
+	ret = mt76x2_phy_set_channel(dev, chandef);
+
+	/* channel cycle counters read-and-clear */
+	mt76_rr(dev, MT_CH_IDLE);
+	mt76_rr(dev, MT_CH_BUSY);
+
+	mt76x2_dfs_init_params(dev);
+
+	mt76x2_mac_resume(dev);
+	tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+	tasklet_enable(&dev->pre_tbtt_tasklet);
+
+	return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+		else
+			dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+		mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->txpower_conf = hw->conf.power_level * 2;
+
+		/* convert to per-chain power for 2x2 devices */
+		dev->txpower_conf -= 6;
+
+		if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+			mt76x2_phy_set_txpower(dev);
+			mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
+		}
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static void
+mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mutex);
+
+	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & BSS_CHANGED_BSSID)
+		mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+	if (changed & BSS_CHANGED_BEACON_INT)
+		mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+			       MT_BEACON_TIME_CFG_INTVAL,
+			       info->beacon_int << 4);
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+		tasklet_disable(&dev->pre_tbtt_tasklet);
+		mt76x2_mac_set_beacon_enable(dev, mvif->idx,
+					     info->enable_beacon);
+		tasklet_enable(&dev->pre_tbtt_tasklet);
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		dev->slottime = slottime;
+		mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+			       MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+	}
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	       struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	int ret = 0;
+	int idx = 0;
+	int i;
+
+	mutex_lock(&dev->mutex);
+
+	idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	msta->vif = mvif;
+	msta->wcid.sta = 1;
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76x2_mac_wcid_set_drop(dev, idx, false);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76x2_txq_init(dev, sta->txq[i]);
+
+	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static int
+mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	int idx = msta->wcid.idx;
+	int i;
+
+	mutex_lock(&dev->mutex);
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76_txq_remove(&dev->mt76, sta->txq[i]);
+	mt76x2_mac_wcid_set_drop(dev, idx, true);
+	mt76_wcid_free(dev->wcid_mask, idx);
+	mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void
+mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct mt76x2_dev *dev = hw->priv;
+	int idx = msta->wcid.idx;
+
+	switch (cmd) {
+	case STA_NOTIFY_SLEEP:
+		mt76x2_mac_wcid_set_drop(dev, idx, true);
+		mt76_stop_tx_queues(&dev->mt76, sta, true);
+		break;
+	case STA_NOTIFY_AWAKE:
+		mt76x2_mac_wcid_set_drop(dev, idx, false);
+		break;
+	}
+}
+
+static int
+mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+	       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+	       struct ieee80211_key_conf *key)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct mt76x2_sta *msta;
+	struct mt76_wcid *wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	/*
+	 * The hardware does not support per-STA RX GTK, fall back
+	 * to software mode for these.
+	 */
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+		if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+			wcid->sw_iv = true;
+		}
+	} else {
+		if (idx == wcid->hw_key_idx) {
+			wcid->hw_key_idx = -1;
+			wcid->sw_iv = true;
+		}
+
+		key = NULL;
+	}
+	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int
+mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+	       const struct ieee80211_tx_queue_params *params)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10, qid;
+	u32 val;
+
+	qid = dev->mt76.q_tx[queue].hw_idx;
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+	      FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+	mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(qid));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
+
+static void
+mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	       const u8 *mac)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	tasklet_disable(&dev->pre_tbtt_tasklet);
+	set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	clear_bit(MT76_SCANNING, &dev->mt76.state);
+	tasklet_enable(&dev->pre_tbtt_tasklet);
+	mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	     u32 queues, bool drop)
+{
+}
+
+static int
+mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	*dbm = dev->txpower_cur / 2;
+
+	/* convert from per-chain power to combined output on 2x2 devices */
+	*dbm += 3;
+
+	return 0;
+}
+
+static int
+mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    struct ieee80211_ampdu_params *params)
+{
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	struct ieee80211_sta *sta = params->sta;
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct ieee80211_txq *txq = sta->txq[params->tid];
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return -EINVAL;
+
+	mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+			   BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		mtxq->aggr = true;
+		mtxq->send_bar = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		mtxq->aggr = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		mtxq->agg_ssn = *ssn << 4;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		mtxq->aggr = false;
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+
+	return 0;
+}
+
+static void
+mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+	struct ieee80211_tx_rate rate = {};
+
+	if (!rates)
+		return;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+	msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+
+static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
+				      s16 coverage_class)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+	dev->coverage_class = coverage_class;
+	mt76x2_set_tx_ackto(dev);
+	mutex_unlock(&dev->mutex);
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+	.tx = mt76x2_tx,
+	.start = mt76x2_start,
+	.stop = mt76x2_stop,
+	.add_interface = mt76x2_add_interface,
+	.remove_interface = mt76x2_remove_interface,
+	.config = mt76x2_config,
+	.configure_filter = mt76x2_configure_filter,
+	.bss_info_changed = mt76x2_bss_info_changed,
+	.sta_add = mt76x2_sta_add,
+	.sta_remove = mt76x2_sta_remove,
+	.sta_notify = mt76x2_sta_notify,
+	.set_key = mt76x2_set_key,
+	.conf_tx = mt76x2_conf_tx,
+	.sw_scan_start = mt76x2_sw_scan,
+	.sw_scan_complete = mt76x2_sw_scan_complete,
+	.flush = mt76x2_flush,
+	.ampdu_action = mt76x2_ampdu_action,
+	.get_txpower = mt76x2_get_txpower,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+	.release_buffered_frames = mt76_release_buffered_frames,
+	.set_coverage_class = mt76x2_set_coverage_class,
+	.get_survey = mt76_get_survey,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
new file mode 100644
index 000000000000..15820b11f9db
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_eeprom.h"
+
+struct mt76x2_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76x2_patch_header {
+	char build_time[16];
+	char platform[4];
+	char hw_version[4];
+	char patch_version[4];
+	u8 pad[2];
+};
+
+static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+	memcpy(skb_put(skb, len), data, len);
+
+	return skb;
+}
+
+static struct sk_buff *
+mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
+{
+	unsigned long timeout;
+
+	if (!time_is_after_jiffies(expires))
+		return NULL;
+
+	timeout = expires - jiffies;
+	wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
+			   timeout);
+	return skb_dequeue(&dev->mcu.res_q);
+}
+
+static int
+mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
+		    enum mcu_cmd cmd)
+{
+	unsigned long expires = jiffies + HZ;
+	int ret;
+	u8 seq;
+
+	if (!skb)
+		return -EINVAL;
+
+	mutex_lock(&dev->mcu.mutex);
+
+	seq = ++dev->mcu.msg_seq & 0xf;
+	if (!seq)
+		seq = ++dev->mcu.msg_seq & 0xf;
+
+	ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+	if (ret)
+		goto out;
+
+	while (1) {
+		u32 *rxfce;
+		bool check_seq = false;
+
+		skb = mt76x2_mcu_get_response(dev, expires);
+		if (!skb) {
+			dev_err(dev->mt76.dev,
+				"MCU message %d (seq %d) timed out\n", cmd,
+				seq);
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+		rxfce = (u32 *) skb->cb;
+
+		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+			check_seq = true;
+
+		dev_kfree_skb(skb);
+		if (check_seq)
+			break;
+	}
+
+out:
+	mutex_unlock(&dev->mcu.mutex);
+
+	return ret;
+}
+
+static int
+mt76pci_load_rom_patch(struct mt76x2_dev *dev)
+{
+	const struct firmware *fw = NULL;
+	struct mt76x2_patch_header *hdr;
+	bool rom_protect = !is_mt7612(dev);
+	int len, ret = 0;
+	__le32 *cur;
+	u32 patch_mask, patch_reg;
+
+	if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+		dev_err(dev->mt76.dev,
+			"Could not get hardware semaphore for ROM PATCH\n");
+		return -ETIMEDOUT;
+	}
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+		patch_mask = BIT(0);
+		patch_reg = MT_MCU_CLOCK_CTL;
+	} else {
+		patch_mask = BIT(1);
+		patch_reg = MT_MCU_COM_REG0;
+	}
+
+	if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+		dev_info(dev->mt76.dev, "ROM patch already applied\n");
+		goto out;
+	}
+
+	ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+	if (ret)
+		goto out;
+
+	if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+		ret = -EIO;
+		dev_err(dev->mt76.dev, "Failed to load firmware\n");
+		goto out;
+	}
+
+	hdr = (struct mt76x2_patch_header *) fw->data;
+	dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+	cur = (__le32 *) (fw->data + sizeof(*hdr));
+	len = fw->size - sizeof(*hdr);
+	mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+	/* Trigger ROM */
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+	if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+		dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+		ret = -ETIMEDOUT;
+	}
+
+out:
+	/* release semaphore */
+	if (rom_protect)
+		mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+	release_firmware(fw);
+	return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x2_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76x2_fw_header *hdr;
+	int i, len, ret;
+	__le32 *cur;
+	u32 offset, val;
+
+	ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto error;
+
+	hdr = (const struct mt76x2_fw_header *) fw->data;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto error;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+	val = le16_to_cpu(hdr->build_ver);
+	dev_info(dev->mt76.dev, "Build: %x\n", val);
+	dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+	cur = (__le32 *) (fw->data + sizeof(*hdr));
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+	mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+	cur += len / sizeof(*cur);
+	len = le32_to_cpu(hdr->dlm_len);
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		offset = MT_MCU_DLM_ADDR_E3;
+	else
+		offset = MT_MCU_DLM_ADDR;
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+	mt76_wr_copy(dev, offset, cur, len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+		mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+	/* trigger firmware */
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+	for (i = 200; i > 0; i--) {
+		val = mt76_rr(dev, MT_MCU_COM_REG0);
+
+		if (val & 1)
+			break;
+
+		msleep(10);
+	}
+
+	if (!i) {
+		dev_err(dev->mt76.dev, "Firmware failed to start\n");
+		release_firmware(fw);
+		return -ETIMEDOUT;
+	}
+
+	dev_info(dev->mt76.dev, "Firmware running!\n");
+
+	release_firmware(fw);
+
+	return ret;
+
+error:
+	dev_err(dev->mt76.dev, "Invalid firmware\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+static int
+mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+			   u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+	    __le32 id;
+	    __le32 value;
+	} __packed __aligned(4) msg = {
+	    .id = cpu_to_le32(func),
+	    .value = cpu_to_le32(val),
+	};
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
+}
+
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+		       u8 channel)
+{
+	struct sk_buff *skb;
+	struct {
+		u8 cr_mode;
+		u8 temp;
+		u8 ch;
+		u8 _pad0;
+
+		__le32 cfg;
+	} __packed __aligned(4) msg = {
+		.cr_mode = type,
+		.temp = temp_level,
+		.ch = channel,
+	};
+	u32 val;
+
+	val = BIT(31);
+	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+	msg.cfg = cpu_to_le32(val);
+
+	/* first set the channel without the extension channel info */
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
+}
+
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+			   u8 bw_index, bool scan)
+{
+	struct sk_buff *skb;
+	struct {
+		u8 idx;
+		u8 scan;
+		u8 bw;
+		u8 _pad0;
+
+		__le16 chainmask;
+		u8 ext_chan;
+		u8 _pad1;
+
+	} __packed __aligned(4) msg = {
+		.idx = channel,
+		.scan = scan,
+		.bw = bw,
+		.chainmask = cpu_to_le16(dev->chainmask),
+	};
+
+	/* first set the channel without the extension channel info */
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+
+	usleep_range(5000, 10000);
+
+	msg.ext_chan = 0xe0 + bw_index;
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+}
+
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 mode;
+		__le32 level;
+	} __packed __aligned(4) msg = {
+		.mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+		.level = cpu_to_le32(0),
+	};
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
+}
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+			 u32 param)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(type),
+		.value = cpu_to_le32(param),
+	};
+	int ret;
+
+	mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+				    BIT(31), BIT(31), 100)))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
+			 struct mt76x2_tssi_comp *tssi_data)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		struct mt76x2_tssi_comp data;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+		.data = *tssi_data,
+	};
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+}
+
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+			 bool force)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 channel;
+		__le32 gain_val;
+	} __packed __aligned(4) msg = {
+		.channel = cpu_to_le32(channel),
+		.gain_val = cpu_to_le32(gain),
+	};
+
+	if (force)
+		msg.channel |= cpu_to_le32(BIT(31));
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
+}
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev)
+{
+	int ret;
+
+	mutex_init(&dev->mcu.mutex);
+
+	ret = mt76pci_load_rom_patch(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76pci_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_function_select(dev, Q_SELECT, 1);
+	return 0;
+}
+
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
+{
+	struct sk_buff *skb;
+
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+	usleep_range(20000, 30000);
+
+	while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
+		dev_kfree_skb(skb);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
new file mode 100644
index 000000000000..d7a7e83262ce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL			0x0704
+#define MT_MCU_CLOCK_CTL		0x0708
+#define MT_MCU_RESET_CTL		0x070C
+#define MT_MCU_INT_LEVEL		0x0718
+#define MT_MCU_COM_REG0			0x0730
+#define MT_MCU_COM_REG1			0x0734
+#define MT_MCU_COM_REG2			0x0738
+#define MT_MCU_COM_REG3			0x073C
+#define MT_MCU_PCIE_REMAP_BASE1		0x0740
+#define MT_MCU_PCIE_REMAP_BASE2		0x0744
+#define MT_MCU_PCIE_REMAP_BASE3		0x0748
+#define MT_MCU_PCIE_REMAP_BASE4		0x074C
+
+#define MT_LED_CTRL			0x0770
+#define MT_LED_CTRL_REPLAY(_n)		BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n)	BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n)	BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n)		BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0		0x0774
+#define MT_LED_TX_BLINK_1		0x0778
+
+#define MT_LED_S0_BASE			0x077C
+#define MT_LED_S0(_n)			(MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE			0x0780
+#define MT_LED_S1(_n)			(MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK		GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v)		(((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+					 MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK		GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v)		(((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+					 MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK	GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v)	(((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+					 MT_LED_STATUS_DURATION_MASK)
+
+#define MT_MCU_SEMAPHORE_00		0x07B0
+#define MT_MCU_SEMAPHORE_01		0x07B4
+#define MT_MCU_SEMAPHORE_02		0x07B8
+#define MT_MCU_SEMAPHORE_03		0x07BC
+
+#define MT_MCU_ROM_PATCH_OFFSET		0x80000
+#define MT_MCU_ROM_PATCH_ADDR		0x90000
+
+#define MT_MCU_ILM_OFFSET		0x80000
+#define MT_MCU_ILM_ADDR			0x80000
+
+#define MT_MCU_DLM_OFFSET		0x100000
+#define MT_MCU_DLM_ADDR			0x90000
+#define MT_MCU_DLM_ADDR_E3		0x90800
+
+enum mcu_cmd {
+	CMD_FUN_SET_OP = 1,
+	CMD_LOAD_CR = 2,
+	CMD_INIT_GAIN_OP = 3,
+	CMD_DYNC_VGA_OP = 6,
+	CMD_TDLS_CH_SW = 7,
+	CMD_BURST_WRITE = 8,
+	CMD_READ_MODIFY_WRITE = 9,
+	CMD_RANDOM_READ = 10,
+	CMD_BURST_READ = 11,
+	CMD_RANDOM_WRITE = 12,
+	CMD_LED_MODE_OP = 16,
+	CMD_POWER_SAVING_OP = 20,
+	CMD_WOW_CONFIG = 21,
+	CMD_WOW_QUERY = 22,
+	CMD_WOW_FEATURE = 24,
+	CMD_CARRIER_DETECT_OP = 28,
+	CMD_RADOR_DETECT_OP = 29,
+	CMD_SWITCH_CHANNEL_OP = 30,
+	CMD_CALIBRATION_OP = 31,
+	CMD_BEACON_OP = 32,
+	CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+	Q_SELECT = 1,
+	BW_SETTING = 2,
+	USB2_SW_DISCONNECT = 2,
+	USB3_SW_DISCONNECT = 3,
+	LOG_FW_DEBUG_MSG = 4,
+	GET_FW_VERSION = 5,
+};
+
+enum mcu_power_mode {
+	RADIO_OFF = 0x30,
+	RADIO_ON = 0x31,
+	RADIO_OFF_AUTO_WAKEUP = 0x32,
+	RADIO_OFF_ADVANCE = 0x33,
+	RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibration {
+	MCU_CAL_R = 1,
+	MCU_CAL_TEMP_SENSOR,
+	MCU_CAL_RXDCOC,
+	MCU_CAL_RC,
+	MCU_CAL_SX_LOGEN,
+	MCU_CAL_LC,
+	MCU_CAL_TX_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_TSSI,
+	MCU_CAL_TSSI_COMP,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQC_FI,
+	MCU_CAL_RXIQC_FD,
+	MCU_CAL_PWRON,
+	MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+	MT_RF_CR,
+	MT_BBP_CR,
+	MT_RF_BBP_CR,
+	MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+	u8 pa_mode;
+	u8 cal_mode;
+	u16 pad;
+
+	u8 slope0;
+	u8 slope1;
+	u8 offset0;
+	u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+			 u32 param);
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+			 bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
new file mode 100644
index 000000000000..e66f047ea448
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+	{ PCI_DEVICE(0x14c3, 0x7662) },
+	{ PCI_DEVICE(0x14c3, 0x7612) },
+	{ PCI_DEVICE(0x14c3, 0x7602) },
+	{ },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct mt76x2_dev *dev;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	dev = mt76x2_alloc_device(&pdev->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+	dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+	dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+	ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt76x2_register_device(dev);
+	if (ret)
+		goto error;
+
+	/* Fix up ASPM configuration */
+
+	/* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+	mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+	/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+	mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+
+	/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+	mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+	return 0;
+
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	mt76_unregister_device(mdev);
+	mt76x2_cleanup(dev);
+	ieee80211_free_hw(mdev->hw);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76pci_device_table,
+	.probe		= mt76pci_probe,
+	.remove		= mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
new file mode 100644
index 000000000000..5b742749d5de
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain -= offset / 2;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain += offset;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+static void
+mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+	s8 *gain_adj = dev->cal.rx.high_gain;
+
+	mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+	mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+	mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+	mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+	u32 val = 0;
+
+	val |= (v1 & (BIT(6) - 1)) << 0;
+	val |= (v2 & (BIT(6) - 1)) << 8;
+	val |= (v3 & (BIT(6) - 1)) << 16;
+	val |= (v4 & (BIT(6) - 1)) << 24;
+	return val;
+}
+
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+	struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+	rssi += cal->rssi_offset[chain];
+	rssi -= cal->lna_gain;
+
+	return rssi;
+}
+
+static u8
+mt76x2_txpower_check(int value)
+{
+	if (value < 0)
+		return 0;
+	if (value > 0x2f)
+		return 0x2f;
+	return value;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		r->all[i] += offset;
+}
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		if (r->all[i] > limit)
+			r->all[i] = limit;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+	enum nl80211_chan_width width = dev->mt76.chandef.width;
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	int txp_0, txp_1, delta = 0;
+	struct mt76_rate_power t = {};
+
+	mt76x2_get_power_info(dev, &txp, chan);
+
+	if (width == NL80211_CHAN_WIDTH_40)
+		delta = txp.delta_bw40;
+	else if (width == NL80211_CHAN_WIDTH_80)
+		delta = txp.delta_bw80;
+
+	if (txp.target_power > dev->txpower_conf)
+		delta -= txp.target_power - dev->txpower_conf;
+
+	mt76x2_get_rate_power(dev, &t, chan);
+	mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
+				   txp.chain[0].delta);
+	mt76x2_limit_rate_power(&t, dev->txpower_conf);
+	dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+	mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
+					 txp.chain[0].delta + delta));
+	dev->target_power = txp.chain[0].target_power;
+	dev->target_power_delta[0] = txp.chain[0].delta + delta;
+	dev->target_power_delta[1] = txp.chain[1].delta + delta;
+	dev->rate_power = t;
+
+	txp_0 = mt76x2_txpower_check(txp.chain[0].target_power +
+				   txp.chain[0].delta + delta);
+
+	txp_1 = mt76x2_txpower_check(txp.chain[1].target_power +
+				   txp.chain[1].delta + delta);
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+	mt76_wr(dev, MT_TX_PWR_CFG_0,
+		mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_1,
+		mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_2,
+		mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+	mt76_wr(dev, MT_TX_PWR_CFG_3,
+		mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_4,
+		mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_7,
+		mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+	mt76_wr(dev, MT_TX_PWR_CFG_8,
+		mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_9,
+		mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+
+static bool
+mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+	return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+		chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	u32 flag = 0;
+
+	if (!mt76x2_tssi_enabled(dev))
+		return false;
+
+	if (mt76x2_channel_silent(dev))
+		return false;
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		flag |= BIT(0);
+
+	if (mt76x2_ext_pa_enabled(dev, chan->band))
+		flag |= BIT(8);
+
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+	dev->cal.tssi_cal_done = true;
+	return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	if (dev->cal.channel_cal_done)
+		return;
+
+	if (mt76x2_channel_silent(dev))
+		return;
+
+	if (!dev->cal.tssi_cal_done)
+		mt76x2_phy_tssi_init_cal(dev);
+
+	if (!mac_stopped)
+		mt76x2_mac_stop(dev, false);
+
+	if (is_5ghz)
+		mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+	if (!mac_stopped)
+		mt76x2_mac_resume(dev);
+
+	mt76x2_apply_gain_adj(dev);
+
+	dev->cal.channel_cal_done = true;
+}
+
+static void
+mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+	u32 pa_mode[2];
+	u32 pa_mode_adj;
+
+	if (band == NL80211_BAND_2GHZ) {
+		pa_mode[0] = 0x010055ff;
+		pa_mode[1] = 0x00550055;
+
+		mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+		mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+		if (mt76x2_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+		} else {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+		}
+	} else {
+		pa_mode[0] = 0x0000ffff;
+		pa_mode[1] = 0x00ff00ff;
+
+		if (mt76x2_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+		} else {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+		}
+		mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+
+		if (mt76x2_ext_pa_enabled(dev, band))
+			pa_mode_adj = 0x04000000;
+		else
+			pa_mode_adj = 0;
+
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+	}
+
+	mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+	if (mt76x2_ext_pa_enabled(dev, band)) {
+		u32 val;
+
+		if (band == NL80211_BAND_2GHZ)
+			val = 0x3c3c023c;
+		else
+			val = 0x363c023c;
+
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+	} else {
+		if (band == NL80211_BAND_2GHZ) {
+			u32 val = 0x0f3c3c3c;
+
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+		} else {
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+		}
+	}
+}
+
+static void
+mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
+{
+	u32 cfg0, cfg1;
+
+	if (mt76x2_ext_pa_enabled(dev, band)) {
+		cfg0 = bw ? 0x000b0c01 : 0x00101101;
+		cfg1 = 0x00011414;
+	} else {
+		cfg0 = bw ? 0x000b0b01 : 0x00101001;
+		cfg1 = 0x00021414;
+	}
+	mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+	mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+
+static void
+mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+	int core_val, agc_val;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_80:
+		core_val = 3;
+		agc_val = 7;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		core_val = 2;
+		agc_val = 3;
+		break;
+	default:
+		core_val = 0;
+		agc_val = 1;
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+static void
+mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	case NL80211_BAND_5GHZ:
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+		       primary_upper);
+}
+
+static void
+mt76x2_set_rx_chains(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+	val &= ~(BIT(3) | BIT(4));
+
+	if (dev->chainmask & BIT(1))
+		val |= BIT(3);
+
+	mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+static void
+mt76x2_set_tx_dac(struct mt76x2_dev *dev)
+{
+	if (dev->chainmask & BIT(1))
+		mt76_set(dev, MT_BBP(TXBE, 5), 3);
+	else
+		mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
+{
+	dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
+	dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
+}
+
+static int
+mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80:
+		return -62;
+	case NL80211_CHAN_WIDTH_40:
+		return -65;
+	default:
+		return -68;
+	}
+}
+
+static int
+mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80:
+		return -76;
+	case NL80211_CHAN_WIDTH_40:
+		return -79;
+	default:
+		return -82;
+	}
+}
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+{
+	u32 val;
+	u8 gain_val[2];
+
+	gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+	gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+	if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+		val = 0x1e42 << 16;
+	else
+		val = 0x1836 << 16;
+
+	val |= 0xf8;
+
+	mt76_wr(dev, MT_BBP(AGC, 8),
+		val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+	mt76_wr(dev, MT_BBP(AGC, 9),
+		val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+	if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+		mt76x2_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
+{
+	u32 false_cca;
+	u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+
+	false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+	if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
+		dev->cal.agc_gain_adjust += 2;
+	else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+		dev->cal.agc_gain_adjust -= 2;
+	else
+		return;
+
+	mt76x2_phy_set_gain_val(dev);
+}
+
+static void
+mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+	u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
+	int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
+	int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
+	u8 *gain = dev->cal.agc_gain_init;
+	u8 gain_delta;
+	int low_gain;
+
+	dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8);
+	dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8);
+	dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
+				 dev->cal.avg_rssi[1]) / 512;
+
+	low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
+		   (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+
+	if (dev->cal.low_gain == low_gain) {
+		mt76x2_phy_adjust_vga_gain(dev);
+		return;
+	}
+
+	dev->cal.low_gain = low_gain;
+
+	if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+		mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+	else
+		mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+
+	if (low_gain) {
+		mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+		mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+		mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+		if (mt76x2_has_ext_lna(dev))
+			gain_delta = 10;
+		else
+			gain_delta = 14;
+	} else {
+		mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+		if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+			mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+		else
+			mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+		mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+		gain_delta = 0;
+	}
+
+	dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+	dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+	dev->cal.agc_gain_adjust = 0;
+	mt76x2_phy_set_gain_val(dev);
+}
+
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+			   struct cfg80211_chan_def *chandef)
+{
+	struct ieee80211_channel *chan = chandef->chan;
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	enum nl80211_band band = chan->band;
+	u8 channel;
+
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	int ch_group_index;
+	u8 bw, bw_index;
+	int freq, freq1;
+	int ret;
+
+	dev->cal.channel_cal_done = false;
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+	channel = chan->hw_value;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		bw = 1;
+		if (freq1 > freq) {
+			bw_index = 1;
+			ch_group_index = 0;
+		} else {
+			bw_index = 3;
+			ch_group_index = 1;
+		}
+		channel += 2 - ch_group_index * 4;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		bw = 2;
+		bw_index = ch_group_index;
+		channel += 6 - ch_group_index * 4;
+		break;
+	default:
+		bw = 0;
+		bw_index = 0;
+		ch_group_index = 0;
+		break;
+	}
+
+	mt76x2_read_rx_gain(dev);
+	mt76x2_phy_set_txpower_regs(dev, band);
+	mt76x2_configure_tx_delay(dev, band, bw);
+	mt76x2_phy_set_txpower(dev);
+
+	mt76x2_set_rx_chains(dev);
+	mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+	mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+	mt76x2_set_tx_dac(dev);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+	/* Enable LDPC Rx */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+	if (!dev->cal.init_cal_done) {
+		u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+		if (val != 0xff)
+			mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
+	}
+
+	mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+	/* Rx LPF calibration */
+	if (!dev->cal.init_cal_done)
+		mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+	dev->cal.init_cal_done = true;
+
+	mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+	mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+	mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+	mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+	if (scan)
+		return 0;
+
+	dev->cal.low_gain = -1;
+	mt76x2_phy_channel_calibrate(dev, true);
+	mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
+	memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+	       sizeof(dev->cal.agc_gain_cur));
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+
+	return 0;
+}
+
+static void
+mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76x2_tssi_comp t = {};
+
+	if (!dev->cal.tssi_cal_done)
+		return;
+
+	if (!dev->cal.tssi_comp_pending) {
+		/* TSSI trigger */
+		t.cal_mode = BIT(0);
+		mt76x2_mcu_tssi_comp(dev, &t);
+		dev->cal.tssi_comp_pending = true;
+	} else {
+		if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+			return;
+
+		dev->cal.tssi_comp_pending = false;
+		mt76x2_get_power_info(dev, &txp, chan);
+
+		if (mt76x2_ext_pa_enabled(dev, chan->band))
+			t.pa_mode = 1;
+
+		t.cal_mode = BIT(1);
+		t.slope0 = txp.chain[0].tssi_slope;
+		t.offset0 = txp.chain[0].tssi_offset;
+		t.slope1 = txp.chain[1].tssi_slope;
+		t.offset1 = txp.chain[1].tssi_offset;
+		mt76x2_mcu_tssi_comp(dev, &t);
+
+		if (t.pa_mode || dev->cal.dpd_cal_done)
+			return;
+
+		usleep_range(10000, 20000);
+		mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+		dev->cal.dpd_cal_done = true;
+	}
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+{
+	struct mt76x2_temp_comp t;
+	int temp, db_diff;
+
+	if (mt76x2_get_temp_comp(dev, &t))
+		return;
+
+	temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+	temp -= t.temp_25_ref;
+	temp = (temp * 1789) / 1000 + 25;
+	dev->cal.temp = temp;
+
+	if (temp > 25)
+		db_diff = (temp - 25) / t.high_slope;
+	else
+		db_diff = (25 - temp) / t.low_slope;
+
+	db_diff = min(db_diff, t.upper_bound);
+	db_diff = max(db_diff, t.lower_bound);
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+		       db_diff * 2);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+		       db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+	struct mt76x2_dev *dev;
+
+	dev = container_of(work, struct mt76x2_dev, cal_work.work);
+	mt76x2_phy_channel_calibrate(dev, false);
+	mt76x2_phy_tssi_compensate(dev);
+	mt76x2_phy_temp_compensate(dev);
+	mt76x2_phy_update_channel_gain(dev);
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x2_dev *dev)
+{
+	int ret;
+
+	ret = mt76x2_mcu_set_radio_state(dev, true);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
new file mode 100644
index 000000000000..ce3ab85c8b0f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_REGS_H
+#define __MT76x2_REGS_H
+
+#define MT_ASIC_VERSION			0x0000
+
+#define MT76XX_REV_E3		0x22
+#define MT76XX_REV_E4		0x33
+
+#define MT_CMB_CTRL			0x0020
+#define MT_CMB_CTRL_XTAL_RDY		BIT(22)
+#define MT_CMB_CTRL_PLL_LD		BIT(23)
+
+#define MT_EFUSE_CTRL			0x0024
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_DATA_BASE		0x0028
+#define MT_EFUSE_DATA(_n)		(MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0			0x0040
+#define MT_COEXCFG0_COEX_EN		BIT(0)
+
+#define MT_WLAN_FUN_CTRL		0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN	BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN	BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF	BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET	BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN	BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ	BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL	BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL	BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST	BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST	BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN	BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN	GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT	GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN	GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0			0x0100
+#define MT_XO_CTRL1			0x0104
+#define MT_XO_CTRL2			0x0108
+#define MT_XO_CTRL3			0x010c
+#define MT_XO_CTRL4			0x0110
+
+#define MT_XO_CTRL5			0x0114
+#define MT_XO_CTRL5_C2_VAL		GENMASK(14, 8)
+
+#define MT_XO_CTRL6			0x0118
+#define MT_XO_CTRL6_C2_CTRL		GENMASK(14, 8)
+
+#define MT_XO_CTRL7			0x011c
+
+#define MT_WLAN_MTC_CTRL		0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S	BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD	GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD	BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD	BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD	BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB	BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB	BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB	BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB	BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP	BIT(28)
+
+#define MT_INT_SOURCE_CSR		0x0200
+#define MT_INT_MASK_CSR			0x0204
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)		BIT(_n + 4)
+#define MT_INT_RX_COHERENT		BIT(16)
+#define MT_INT_TX_COHERENT		BIT(17)
+#define MT_INT_ANY_COHERENT		BIT(18)
+#define MT_INT_MCU_CMD			BIT(19)
+#define MT_INT_TBTT			BIT(20)
+#define MT_INT_PRE_TBTT			BIT(21)
+#define MT_INT_TX_STAT			BIT(22)
+#define MT_INT_AUTO_WAKEUP		BIT(23)
+#define MT_INT_GPTIMER			BIT(24)
+#define MT_INT_RXDELAYINT		BIT(26)
+#define MT_INT_TXDELAYINT		BIT(27)
+
+#define MT_WPDMA_GLO_CFG		0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG		0x0210
+
+#define MT_WMM_AIFSN		0x0214
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMIN		0x0218
+#define MT_WMM_CWMIN_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX		0x021c
+#define MT_WMM_CWMAX_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_TXOP_BASE		0x0220
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_TSO_CTRL			0x0250
+#define MT_HEADER_TRANS_CTRL_REG	0x0260
+
+#define MT_TX_RING_BASE			0x0300
+#define MT_RX_RING_BASE			0x03c0
+
+#define MT_TX_HW_QUEUE_MCU		8
+#define MT_TX_HW_QUEUE_MGMT		9
+
+#define MT_PBF_SYS_CTRL			0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET	BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET	BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET	BIT(4)
+
+#define MT_PBF_CFG			0x0404
+#define MT_PBF_CFG_TX0Q_EN		BIT(0)
+#define MT_PBF_CFG_TX1Q_EN		BIT(1)
+#define MT_PBF_CFG_TX2Q_EN		BIT(2)
+#define MT_PBF_CFG_TX3Q_EN		BIT(3)
+#define MT_PBF_CFG_RX0Q_EN		BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN		BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT		0x0408
+#define MT_PBF_RX_MAX_PCNT		0x040c
+
+#define MT_BCN_OFFSET_BASE		0x041c
+#define MT_BCN_OFFSET(_n)		(MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_BYPASS_0			0x0504
+#define MT_RF_BYPASS_1			0x0508
+#define MT_RF_SETTING_0			0x050c
+
+#define MT_RF_DATA_WRITE		0x0524
+
+#define MT_RF_CTRL			0x0528
+#define MT_RF_CTRL_ADDR			GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE		BIT(12)
+#define MT_RF_CTRL_BUSY			BIT(13)
+#define MT_RF_CTRL_IDX			BIT(16)
+
+#define MT_RF_DATA_READ			0x052c
+
+#define MT_FCE_PSE_CTRL			0x0800
+#define MT_FCE_PARAMETERS		0x0804
+#define MT_FCE_CSO			0x0808
+
+#define MT_FCE_L2_STUFF			0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN	BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN	BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN	BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN	BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN	BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP	BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN	GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN	GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT	GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
+
+#define MT_PAUSE_ENABLE_CONTROL1	0x0a38
+
+#define MT_MAC_CSR0			0x1000
+
+#define MT_MAC_SYS_CTRL			0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR	BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP	BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX	BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX	BIT(3)
+
+#define MT_MAC_ADDR_DW0			0x1008
+#define MT_MAC_ADDR_DW1			0x100c
+
+#define MT_MAC_BSSID_DW0		0x1010
+#define MT_MAC_BSSID_DW1		0x1014
+#define MT_MAC_BSSID_DW1_ADDR		GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE	GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N	GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT	BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2	BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3	BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE	GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG			0x1018
+
+#define MT_AMPDU_MAX_LEN_20M1S		0x1030
+#define MT_AMPDU_MAX_LEN_20M2S		0x1034
+#define MT_AMPDU_MAX_LEN_40M1S		0x1038
+#define MT_AMPDU_MAX_LEN_40M2S		0x103c
+#define MT_AMPDU_MAX_LEN		0x1040
+
+#define MT_WCID_DROP_BASE		0x106c
+#define MT_WCID_DROP(_n)		(MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)		BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK		0x108c
+
+#define MT_MAC_APC_BSSID_BASE		0x1090
+#define MT_MAC_APC_BSSID_L(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR		GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN		BIT(16)
+
+#define MT_XIFS_TIME_CFG		0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS	GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS	GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS	GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS		GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN	BIT(29)
+
+#define MT_BKOFF_SLOT_CFG		0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME	GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY	GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG			0x110c
+#define MT_CH_TIME_CFG_TIMER_EN		BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY	BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY	BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY	BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY	BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN	BIT(5)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR	GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR	GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER		0x1110
+
+#define MT_BEACON_TIME_CFG		0x1114
+#define MT_BEACON_TIME_CFG_INTVAL	GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN	BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE	GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN	BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX	BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP	GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG		0x1118
+#define MT_TBTT_TIMER_CFG		0x1124
+
+#define MT_INT_TIMER_CFG		0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT	GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER	GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN			0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN	BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN	BIT(1)
+
+#define MT_CH_IDLE			0x1130
+#define MT_CH_BUSY			0x1134
+#define MT_EXT_CH_BUSY			0x1138
+#define MT_ED_CCA_TIMER			0x1140
+
+#define MT_MAC_STATUS			0x1200
+#define MT_MAC_STATUS_TX		BIT(0)
+#define MT_MAC_STATUS_RX		BIT(1)
+
+#define MT_PWR_PIN_CFG			0x1204
+#define MT_AUX_CLK_CFG			0x120c
+
+#define MT_BB_PA_MODE_CFG0		0x1214
+#define MT_BB_PA_MODE_CFG1		0x1218
+#define MT_RF_PA_MODE_CFG0		0x121c
+#define MT_RF_PA_MODE_CFG1		0x1220
+
+#define MT_RF_PA_MODE_ADJ0		0x1228
+#define MT_RF_PA_MODE_ADJ1		0x122c
+
+#define MT_DACCLK_EN_DLY_CFG		0x1264
+
+#define MT_EDCA_CFG_BASE		0x1300
+#define MT_EDCA_CFG_AC(_n)		(MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP		GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN		GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN		GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX		GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0			0x1314
+#define MT_TX_PWR_CFG_1			0x1318
+#define MT_TX_PWR_CFG_2			0x131c
+#define MT_TX_PWR_CFG_3			0x1320
+#define MT_TX_PWR_CFG_4			0x1324
+
+#define MT_TX_BAND_CFG			0x132c
+#define MT_TX_BAND_CFG_UPPER_40M	BIT(0)
+#define MT_TX_BAND_CFG_5G		BIT(1)
+#define MT_TX_BAND_CFG_2G		BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY		0x1384
+#define MT_TX_MPDU_ADJ_INT		0x1388
+
+#define MT_TX_PWR_CFG_7			0x13d4
+#define MT_TX_PWR_CFG_8			0x13d8
+#define MT_TX_PWR_CFG_9			0x13dc
+
+#define MT_TX_SW_CFG0			0x1330
+#define MT_TX_SW_CFG1			0x1334
+#define MT_TX_SW_CFG2			0x1338
+
+#define MT_TXOP_CTRL_CFG		0x1340
+
+#define MT_TX_RTS_CFG			0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT	GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH		GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK		BIT(24)
+
+#define MT_TX_TIMEOUT_CFG		0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO		GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG			0x134c
+#define MT_VHT_HT_FBK_CFG1		0x1358
+
+#define MT_PROT_CFG_RATE		GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL		GENMASK(17, 16)
+#define MT_PROT_CFG_NAV			GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW		GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH		BIT(26)
+
+#define MT_CCK_PROT_CFG			0x1364
+#define MT_OFDM_PROT_CFG		0x1368
+#define MT_MM20_PROT_CFG		0x136c
+#define MT_MM40_PROT_CFG		0x1370
+#define MT_GF20_PROT_CFG		0x1374
+#define MT_GF40_PROT_CFG		0x1378
+
+#define MT_EXP_ACK_TIME			0x1380
+
+#define MT_TX_PWR_CFG_0_EXT		0x1390
+#define MT_TX_PWR_CFG_1_EXT		0x1394
+
+#define MT_TX_FBK_LIMIT			0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK	GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK	GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR	BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR	BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT	BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR		0x13a0
+#define MT_TX1_RF_GAIN_CORR		0x13a4
+
+#define MT_TX_ALC_CFG_0			0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0	GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1	GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0		GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1		GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1			0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2			0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3			0x13ac
+#define MT_TX_ALC_CFG_4			0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN	BIT(31)
+
+#define MT_TX_ALC_VGA3			0x13c8
+
+#define MT_TX_PROT_CFG6			0x13e0
+#define MT_TX_PROT_CFG7			0x13e4
+#define MT_TX_PROT_CFG8			0x13e8
+
+#define MT_PIFS_TX_CFG			0x13ec
+
+#define MT_RX_FILTR_CFG			0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR		BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR		BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC		BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS	BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR		BIT(4)
+#define MT_RX_FILTR_CFG_MCAST		BIT(5)
+#define MT_RX_FILTR_CFG_BCAST		BIT(6)
+#define MT_RX_FILTR_CFG_DUP		BIT(7)
+#define MT_RX_FILTR_CFG_CFACK		BIT(8)
+#define MT_RX_FILTR_CFG_CFEND		BIT(9)
+#define MT_RX_FILTR_CFG_ACK		BIT(10)
+#define MT_RX_FILTR_CFG_CTS		BIT(11)
+#define MT_RX_FILTR_CFG_RTS		BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL		BIT(13)
+#define MT_RX_FILTR_CFG_BA		BIT(14)
+#define MT_RX_FILTR_CFG_BAR		BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
+
+#define MT_LEGACY_BASIC_RATE		0x1408
+#define MT_HT_BASIC_RATE		0x140c
+
+#define MT_HT_CTRL_CFG			0x1410
+
+#define MT_EXT_CCA_CFG			0x141c
+#define MT_EXT_CCA_CFG_CCA0		GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1		GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2		GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3		GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK		GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK	GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3			0x1478
+
+#define MT_PN_PAD_MODE			0x150c
+
+#define MT_TXOP_HLDR_ET			0x1608
+
+#define MT_PROT_AUTO_TX_CFG		0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ	GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ	GENMASK(27, 24)
+
+#define MT_RX_STAT_0			0x1700
+#define MT_RX_STAT_0_CRC_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS		GENMASK(31, 16)
+
+#define MT_RX_STAT_1			0x1704
+#define MT_RX_STAT_1_CCA_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS	GENMASK(31, 16)
+
+#define MT_RX_STAT_2			0x1708
+#define MT_RX_STAT_2_DUP_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS	GENMASK(31, 16)
+
+#define MT_TX_STAT_FIFO			0x1718
+#define MT_TX_STAT_FIFO_VALID		BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
+#define MT_TX_STAT_FIFO_AGGR		BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ		BIT(7)
+#define MT_TX_STAT_FIFO_WCID		GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE		GENMASK(31, 16)
+
+#define MT_TX_AGG_CNT_BASE0		0x1720
+#define MT_TX_AGG_CNT_BASE1		0x174c
+
+#define MT_TX_AGG_CNT(_id)		((_id) < 8 ?			\
+					 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+					 MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT		0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY	GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID	GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE		0x1c00
+#define MT_WCID_TX_RATE(_i)		(MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE		0x2000
+#define MT_BBP_IBI_BASE			0x2100
+#define MT_BBP_AGC_BASE			0x2300
+#define MT_BBP_TXC_BASE			0x2400
+#define MT_BBP_RXC_BASE			0x2500
+#define MT_BBP_TXO_BASE			0x2600
+#define MT_BBP_TXBE_BASE		0x2700
+#define MT_BBP_RXFE_BASE		0x2800
+#define MT_BBP_RXO_BASE			0x2900
+#define MT_BBP_DFS_BASE			0x2a00
+#define MT_BBP_TR_BASE			0x2b00
+#define MT_BBP_CAL_BASE			0x2c00
+#define MT_BBP_DSC_BASE			0x2e00
+#define MT_BBP_PFMU_BASE		0x2f00
+
+#define MT_BBP(_type, _n)		(MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW		GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN		GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW		GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN	GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN		GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN		GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN	GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE	GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN			GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0		GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1		GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN	GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE		0x1800
+#define MT_WCID_ADDR(_n)		(MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE			0x4000
+
+#define MT_WCID_KEY_BASE		0x8000
+#define MT_WCID_KEY(_n)			(MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE			0xa000
+#define MT_WCID_IV(_n)			(MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE		0xa800
+#define MT_WCID_ATTR(_n)		(MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE		BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE		GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX		GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF		GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT	BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT	BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC		BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID		GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0			0xac00
+#define MT_SKEY_BASE_1			0xb400
+#define MT_SKEY_0(_bss, _idx)		(MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)		(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)		((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0		0xb000
+#define MT_SKEY_MODE_BASE_1		0xb3f0
+#define MT_SKEY_MODE_0(_bss)		(MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)		(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)		((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK		GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx)	(4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE			0xc000
+
+#define MT_TEMP_SENSOR			0x1d000
+#define MT_TEMP_SENSOR_VAL		GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+	u8 macaddr[6];
+	__le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+	u8 key[16];
+	u8 tx_mic[8];
+	u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x2_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_CKIP40,
+	MT_CIPHER_CKIP104,
+	MT_CIPHER_CKIP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
new file mode 100644
index 000000000000..a09f117848d6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x2_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
new file mode 100644
index 000000000000..4cd424148d4b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x2_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x2.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x2
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s"
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define TXID_ENTRY	__field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN	__entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT	" [%d:%d]"
+#define TXID_PR_ARG	__entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+	TP_PROTO(struct mt76x2_dev *dev),
+	TP_ARGS(dev),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+	),
+	TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+	TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+	TP_ARGS(dev, wcid, pktid),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		TXID_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		TXID_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT TXID_PR_FMT,
+		DEV_PR_ARG, TXID_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+	TP_PROTO(struct mt76x2_dev *dev),
+	TP_ARGS(dev)
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+	TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+	TP_ARGS(dev, wcid, pktid)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+	TP_PROTO(struct mt76x2_dev *dev,
+		 struct mt76x2_tx_status *stat),
+
+	TP_ARGS(dev, stat),
+
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		TXID_ENTRY
+		__field(bool, success)
+		__field(bool, aggr)
+		__field(bool, ack_req)
+		__field(u16, rate)
+		__field(u8, retry)
+	),
+
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->success = stat->success;
+		__entry->aggr = stat->aggr;
+		__entry->ack_req = stat->ack_req;
+		__entry->wcid = stat->wcid;
+		__entry->pktid = stat->pktid;
+		__entry->rate = stat->rate;
+		__entry->retry = stat->retry;
+	),
+
+	TP_printk(
+		DEV_PR_FMT TXID_PR_FMT
+		" success:%d aggr:%d ack_req:%d"
+		" rate:%04x retry:%d",
+		DEV_PR_ARG, TXID_PR_ARG,
+		__entry->success, __entry->aggr, __entry->ack_req,
+		__entry->rate, __entry->retry
+	)
+);
+
+
+TRACE_EVENT(dev_irq,
+	TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+
+	TP_ARGS(dev, val, mask),
+
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, val)
+		__field(u32, mask)
+	),
+
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->val = val;
+		__entry->mask = mask;
+	),
+
+	TP_printk(
+		DEV_PR_FMT " %08x & %08x",
+		DEV_PR_ARG, __entry->val, __entry->mask
+	)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x2_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
new file mode 100644
index 000000000000..534e4bf9a34c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+struct beacon_bc_data {
+	struct mt76x2_dev *dev;
+	struct sk_buff_head q;
+	struct sk_buff *tail[8];
+};
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+	     struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76x2_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt76_wcid *wcid = &dev->global_wcid;
+
+	if (control->sta) {
+		struct mt76x2_sta *msta;
+
+		msta = (struct mt76x2_sta *) control->sta->drv_priv;
+		wcid = &msta->wcid;
+	}
+
+	if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+		struct mt76x2_vif *mvif;
+
+		mvif = (struct mt76x2_vif *) vif->drv_priv;
+		wcid = &mvif->group_wcid;
+	}
+
+	mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+		ieee80211_free_txskb(mt76_hw(dev), skb);
+	} else {
+		ieee80211_tx_info_clear_status(info);
+		info->status.rates[0].idx = -1;
+		info->flags |= IEEE80211_TX_STAT_ACK;
+		ieee80211_tx_status(mt76_hw(dev), skb);
+	}
+}
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+			       const struct ieee80211_tx_rate *rate)
+{
+	s8 max_txpwr;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+		if (mcs == 8 || mcs == 9) {
+			max_txpwr = dev->rate_power.vht[8];
+		} else {
+			u8 nss, idx;
+
+			nss = ieee80211_rate_get_vht_nss(rate);
+			idx = ((nss - 1) << 3) + mcs;
+			max_txpwr = dev->rate_power.ht[idx & 0xf];
+		}
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+	} else {
+		enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+		if (band == NL80211_BAND_2GHZ) {
+			const struct ieee80211_rate *r;
+			struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+			struct mt76_rate_power *rp = &dev->rate_power;
+
+			r = &wiphy->bands[band]->bitrates[rate->idx];
+			if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+				max_txpwr = rp->cck[r->hw_value & 0x3];
+			else
+				max_txpwr = rp->ofdm[r->hw_value & 0x7];
+		} else {
+			max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+		}
+	}
+
+	return max_txpwr;
+}
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+	txpwr = min_t(s8, txpwr, dev->txpower_conf);
+	txpwr -= (dev->target_power + dev->target_power_delta[0]);
+	txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+	if (!dev->enable_tpc)
+		return 0;
+	else if (txpwr >= 0)
+		return min_t(s8, txpwr, 7);
+	else
+		return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+	s8 txpwr_adj;
+
+	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+					    dev->rate_power.ofdm[4]);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+
+static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	if (len % 4 == 0)
+		return 0;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+	return 2;
+}
+
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+			  struct sk_buff *skb, struct mt76_queue *q,
+			  struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+			  u32 *tx_info)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int qsel = MT_QSEL_EDCA;
+	int ret;
+
+	if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+		mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
+
+	mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+
+	ret = mt76x2_insert_hdr_pad(skb);
+	if (ret < 0)
+		return ret;
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		qsel = MT_QSEL_MGMT;
+
+	*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+		   MT_TXD_INFO_80211;
+
+	if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+		*tx_info |= MT_TXD_INFO_WIV;
+
+	return 0;
+}
+
+static void
+mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct sk_buff *skb = NULL;
+
+	if (!(dev->beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	mt76x2_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct beacon_bc_data *data = priv;
+	struct mt76x2_dev *dev = data->dev;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *skb;
+
+	if (!(dev->beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	info = IEEE80211_SKB_CB(skb);
+	info->control.vif = vif;
+	info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+	mt76_skb_set_moredata(skb, true);
+	__skb_queue_tail(&data->q, skb);
+	data->tail[mvif->idx] = skb;
+}
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+	struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+	struct beacon_bc_data data = {};
+	struct sk_buff *skb;
+	int i, nframes;
+
+	data.dev = dev;
+	__skb_queue_head_init(&data.q);
+
+	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		mt76x2_update_beacon_iter, dev);
+
+	do {
+		nframes = skb_queue_len(&data.q);
+		ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+			IEEE80211_IFACE_ITER_RESUME_ALL,
+			mt76x2_add_buffered_bc, &data);
+	} while (nframes != skb_queue_len(&data.q));
+
+	if (!nframes)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+		if (!data.tail[i])
+			continue;
+
+		mt76_skb_set_moredata(data.tail[i], false);
+	}
+
+	spin_lock_bh(&q->lock);
+	while ((skb = __skb_dequeue(&data.q)) != NULL) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		struct ieee80211_vif *vif = info->control.vif;
+		struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+		mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+	}
+	spin_unlock_bh(&q->lock);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644
index 000000000000..ea4ab8729ae4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644
index 000000000000..ea30895933c5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME		32
+#define DEV_ENTRY   __array(char, wiphy_name, 32)
+#define DEV_ASSIGN  strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT  "%s"
+#define DEV_PR_ARG  __entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	" %04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644
index 000000000000..4eef69bd8a9e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t;
+	dma_addr_t addr;
+	int size;
+
+	size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
+	t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+	if (!t)
+		return NULL;
+
+	addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+			      DMA_TO_DEVICE);
+	t->dma_addr = addr;
+
+	return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t = NULL;
+
+	spin_lock_bh(&dev->lock);
+	if (!list_empty(&dev->txwi_cache)) {
+		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+				     list);
+		list_del(&t->list);
+	}
+	spin_unlock_bh(&dev->lock);
+
+	return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+	if (t)
+		return t;
+
+	return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+	if (!t)
+		return;
+
+	spin_lock_bh(&dev->lock);
+	list_add(&t->list, &dev->txwi_cache);
+	spin_unlock_bh(&dev->lock);
+}
+
+void mt76_tx_free(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t;
+
+	while ((t = __mt76_get_txwi(dev)) != NULL)
+		dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+				 DMA_TO_DEVICE);
+}
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+	if (!txq->sta)
+		return MT_TXQ_BE;
+
+	return txq->ac;
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+		      struct sk_buff *skb, struct mt76_wcid *wcid,
+		      struct ieee80211_sta *sta)
+{
+	struct mt76_queue_entry e;
+	struct mt76_txwi_cache *t;
+	struct mt76_queue_buf buf[32];
+	struct sk_buff *iter;
+	dma_addr_t addr;
+	int len;
+	u32 tx_info = 0;
+	int n, ret;
+
+	t = mt76_get_txwi(dev);
+	if (!t) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return -ENOMEM;
+	}
+
+	dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+				DMA_TO_DEVICE);
+	ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+				       &tx_info);
+	dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+				   DMA_TO_DEVICE);
+	if (ret < 0)
+		goto free;
+
+	len = skb->len - skb->data_len;
+	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->dev, addr)) {
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	n = 0;
+	buf[n].addr = t->dma_addr;
+	buf[n++].len = dev->drv->txwi_size;
+	buf[n].addr = addr;
+	buf[n++].len = len;
+
+	skb_walk_frags(skb, iter) {
+		if (n == ARRAY_SIZE(buf))
+			goto unmap;
+
+		addr = dma_map_single(dev->dev, iter->data, iter->len,
+				      DMA_TO_DEVICE);
+		if (dma_mapping_error(dev->dev, addr))
+			goto unmap;
+
+		buf[n].addr = addr;
+		buf[n++].len = iter->len;
+	}
+
+	if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+		goto unmap;
+
+	return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+	ret = -ENOMEM;
+	for (n--; n > 0; n--)
+		dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+				 DMA_TO_DEVICE);
+
+free:
+	e.skb = skb;
+	e.txwi = t;
+	dev->drv->tx_complete_skb(dev, q, &e, true);
+	mt76_put_txwi(dev, t);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
+
+void
+mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+	struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76_queue *q;
+	int qid = skb_get_queue_mapping(skb);
+
+	if (WARN_ON(qid >= MT_TXQ_PSD)) {
+		qid = MT_TXQ_BE;
+		skb_set_queue_mapping(skb, qid);
+	}
+
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+
+	q = &dev->q_tx[qid];
+
+	spin_lock_bh(&q->lock);
+	mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+	dev->queue_ops->kick(dev, q);
+
+	if (q->queued > q->ndesc - 8)
+		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+	spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+{
+	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+	struct sk_buff *skb;
+
+	skb = skb_dequeue(&mtxq->retry_q);
+	if (skb) {
+		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+		if (ps && skb_queue_empty(&mtxq->retry_q))
+			ieee80211_sta_set_buffered(txq->sta, tid, false);
+
+		return skb;
+	}
+
+	skb = ieee80211_tx_dequeue(dev->hw, txq);
+	if (!skb)
+		return NULL;
+
+	return skb;
+}
+
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return;
+
+	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+		  struct sk_buff *skb, bool last)
+{
+	struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+
+	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+	if (last)
+		info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+	mt76_skb_set_moredata(skb, !last);
+	mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+			     u16 tids, int nframes,
+			     enum ieee80211_frame_release_type reason,
+			     bool more_data)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct sk_buff *last_skb = NULL;
+	struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+	int i;
+
+	spin_lock_bh(&hwq->lock);
+	for (i = 0; tids && nframes; i++, tids >>= 1) {
+		struct ieee80211_txq *txq = sta->txq[i];
+		struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+		struct sk_buff *skb;
+
+		if (!(tids & 1))
+			continue;
+
+		do {
+			skb = mt76_txq_dequeue(dev, mtxq, true);
+			if (!skb)
+				break;
+
+			if (mtxq->aggr)
+				mt76_check_agg_ssn(mtxq, skb);
+
+			nframes--;
+			if (last_skb)
+				mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+			last_skb = skb;
+		} while (nframes);
+	}
+
+	if (last_skb) {
+		mt76_queue_ps_skb(dev, sta, last_skb, true);
+		dev->queue_ops->kick(dev, hwq);
+	}
+	spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+		    struct mt76_txq *mtxq, bool *empty)
+{
+	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+	struct ieee80211_tx_info *info;
+	struct mt76_wcid *wcid = mtxq->wcid;
+	struct sk_buff *skb;
+	int n_frames = 1, limit;
+	struct ieee80211_tx_rate tx_rate;
+	bool ampdu;
+	bool probe;
+	int idx;
+
+	skb = mt76_txq_dequeue(dev, mtxq, false);
+	if (!skb) {
+		*empty = true;
+		return 0;
+	}
+
+	info = IEEE80211_SKB_CB(skb);
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+				       info->control.rates, 1);
+	tx_rate = info->control.rates[0];
+
+	probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+	ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
+	limit = ampdu ? 16 : 3;
+
+	if (ampdu)
+		mt76_check_agg_ssn(mtxq, skb);
+
+	idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+
+	if (idx < 0)
+		return idx;
+
+	do {
+		bool cur_ampdu;
+
+		if (probe)
+			break;
+
+		if (test_bit(MT76_SCANNING, &dev->state) ||
+		    test_bit(MT76_RESET, &dev->state))
+			return -EBUSY;
+
+		skb = mt76_txq_dequeue(dev, mtxq, false);
+		if (!skb) {
+			*empty = true;
+			break;
+		}
+
+		info = IEEE80211_SKB_CB(skb);
+		cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+
+		if (ampdu != cur_ampdu ||
+		    (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+			skb_queue_tail(&mtxq->retry_q, skb);
+			break;
+		}
+
+		info->control.rates[0] = tx_rate;
+
+		if (cur_ampdu)
+			mt76_check_agg_ssn(mtxq, skb);
+
+		idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+		if (idx < 0)
+			return idx;
+
+		n_frames++;
+	} while (n_frames < limit);
+
+	if (!probe) {
+		hwq->swq_queued++;
+		hwq->entry[idx].schedule = true;
+	}
+
+	dev->queue_ops->kick(dev, hwq);
+
+	return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+	struct mt76_txq *mtxq, *mtxq_last;
+	int len = 0;
+
+restart:
+	mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
+	while (!list_empty(&hwq->swq)) {
+		bool empty = false;
+		int cur;
+
+		mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
+		if (mtxq->send_bar && mtxq->aggr) {
+			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+			struct ieee80211_sta *sta = txq->sta;
+			struct ieee80211_vif *vif = txq->vif;
+			u16 agg_ssn = mtxq->agg_ssn;
+			u8 tid = txq->tid;
+
+			mtxq->send_bar = false;
+			spin_unlock_bh(&hwq->lock);
+			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+			spin_lock_bh(&hwq->lock);
+			goto restart;
+		}
+
+		list_del_init(&mtxq->list);
+
+		cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
+		if (!empty)
+			list_add_tail(&mtxq->list, &hwq->swq);
+
+		if (cur < 0)
+			return cur;
+
+		len += cur;
+
+		if (mtxq == mtxq_last)
+			break;
+	}
+
+	return len;
+}
+
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+	int len;
+
+	do {
+		if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
+			break;
+
+		len = mt76_txq_schedule_list(dev, hwq);
+	} while (len > 0);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_dev *dev)
+{
+	int i;
+
+	for (i = 0; i <= MT_TXQ_BK; i++) {
+		struct mt76_queue *q = &dev->q_tx[i];
+
+		spin_lock_bh(&q->lock);
+		mt76_txq_schedule(dev, q);
+		spin_unlock_bh(&q->lock);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+			 bool send_bar)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+		struct ieee80211_txq *txq = sta->txq[i];
+		struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+		spin_lock_bh(&mtxq->hwq->lock);
+		mtxq->send_bar = mtxq->aggr && send_bar;
+		if (!list_empty(&mtxq->list))
+			list_del_init(&mtxq->list);
+		spin_unlock_bh(&mtxq->hwq->lock);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+	struct mt76_queue *hwq = mtxq->hwq;
+
+	spin_lock_bh(&hwq->lock);
+	if (list_empty(&mtxq->list))
+		list_add_tail(&mtxq->list, &hwq->swq);
+	mt76_txq_schedule(dev, hwq);
+	spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq;
+	struct mt76_queue *hwq;
+	struct sk_buff *skb;
+
+	if (!txq)
+		return;
+
+	mtxq = (struct mt76_txq *) txq->drv_priv;
+	hwq = mtxq->hwq;
+
+	spin_lock_bh(&hwq->lock);
+	if (!list_empty(&mtxq->list))
+		list_del(&mtxq->list);
+	spin_unlock_bh(&hwq->lock);
+
+	while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
+		ieee80211_free_txskb(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_remove);
+
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+	INIT_LIST_HEAD(&mtxq->list);
+	skb_queue_head_init(&mtxq->retry_q);
+
+	mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+}
+EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644
index 000000000000..0c35b8db58cd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		 int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		cur = dev->bus->rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		udelay(10);
+	} while (timeout-- > 0);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		      int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		cur = dev->bus->rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		usleep_range(10000, 20000);
+	} while (timeout-- > 0);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(unsigned long *mask, int size)
+{
+	int i, idx = 0, cur;
+
+	for (i = 0; i < size / BITS_PER_LONG; i++) {
+		idx = ffs(~mask[i]);
+		if (!idx)
+			continue;
+
+		idx--;
+		cur = i * BITS_PER_LONG + idx;
+		if (cur >= size)
+			break;
+
+		mask[i] |= BIT(idx);
+		return cur;
+	}
+
+	return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644
index 000000000000..018d475504a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define MT76_INCR(_var, _size) \
+	_var = (((_var) + 1) % _size)
+
+int mt76_wcid_alloc(unsigned long *mask, int size);
+
+static inline void
+mt76_wcid_free(unsigned long *mask, int idx)
+{
+	mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+	if (enable)
+		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	else
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+#endif
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 6711e7fb6926..0398bece5782 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -81,6 +81,41 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
 };
 
 static int
+qtnf_validate_iface_combinations(struct wiphy *wiphy,
+				 struct qtnf_vif *change_vif,
+				 enum nl80211_iftype new_type)
+{
+	struct qtnf_wmac *mac;
+	struct qtnf_vif *vif;
+	int i;
+	int ret = 0;
+	struct iface_combination_params params = {
+		.num_different_channels = 1,
+	};
+
+	mac = wiphy_priv(wiphy);
+	if (!mac)
+		return -EFAULT;
+
+	for (i = 0; i < QTNF_MAX_INTF; i++) {
+		vif = &mac->iflist[i];
+		if (vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+			params.iftype_num[vif->wdev.iftype]++;
+	}
+
+	if (change_vif) {
+		params.iftype_num[new_type]++;
+		params.iftype_num[change_vif->wdev.iftype]--;
+	} else {
+		params.iftype_num[new_type]++;
+	}
+
+	ret = cfg80211_check_combinations(wiphy, &params);
+
+	return ret;
+}
+
+static int
 qtnf_change_virtual_intf(struct wiphy *wiphy,
 			 struct net_device *dev,
 			 enum nl80211_iftype type,
@@ -90,6 +125,13 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
 	u8 *mac_addr;
 	int ret;
 
+	ret = qtnf_validate_iface_combinations(wiphy, vif, type);
+	if (ret) {
+		pr_err("VIF%u.%u combination check: failed to set type %d\n",
+		       vif->mac->macid, vif->vifid, type);
+		return ret;
+	}
+
 	if (params)
 		mac_addr = params->macaddr;
 	else
@@ -120,10 +162,6 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 
 	qtnf_scan_done(vif->mac, true);
 
-	if (qtnf_cmd_send_del_intf(vif))
-		pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
-		       vif->vifid);
-
 	/* Stop data */
 	netif_tx_stop_all_queues(netdev);
 	if (netif_carrier_ok(netdev))
@@ -132,6 +170,10 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdevice(netdev);
 
+	if (qtnf_cmd_send_del_intf(vif))
+		pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
+		       vif->vifid);
+
 	vif->netdev->ieee80211_ptr = NULL;
 	vif->netdev = NULL;
 	vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
@@ -150,12 +192,20 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
 	struct qtnf_wmac *mac;
 	struct qtnf_vif *vif;
 	u8 *mac_addr = NULL;
+	int ret;
 
 	mac = wiphy_priv(wiphy);
 
 	if (!mac)
 		return ERR_PTR(-EFAULT);
 
+	ret = qtnf_validate_iface_combinations(wiphy, NULL, type);
+	if (ret) {
+		pr_err("MAC%u invalid combination: failed to add type %d\n",
+		       mac->macid, type);
+		return ERR_PTR(ret);
+	}
+
 	switch (type) {
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_AP:
@@ -190,7 +240,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
 		goto err_mac;
 	}
 
-	if (qtnf_core_net_attach(mac, vif, name, name_assign_t, type)) {
+	if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
 		pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
 		       vif->vifid);
 		goto err_net;
@@ -381,6 +431,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
 	u32 short_cookie = prandom_u32();
 	u16 flags = 0;
+	u16 freq;
 
 	*cookie = short_cookie;
 
@@ -393,13 +444,21 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	if (params->dont_wait_for_ack)
 		flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
 
+	/* If channel is not specified, pass "freq = 0" to tell device
+	 * firmware to use current channel.
+	 */
+	if (params->chan)
+		freq = params->chan->center_freq;
+	else
+		freq = 0;
+
 	pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
-		 wdev->netdev->name, params->chan->center_freq,
+		 wdev->netdev->name, freq,
 		 le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
 		 params->len, short_cookie, flags);
 
 	return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
-					params->chan->center_freq,
+					freq,
 					params->buf, params->len);
 }
 
@@ -409,6 +468,7 @@ qtnf_get_station(struct wiphy *wiphy, struct net_device *dev,
 {
 	struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
 
+	sinfo->generation = vif->generation;
 	return qtnf_cmd_get_sta_info(vif, mac, sinfo);
 }
 
@@ -430,11 +490,13 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
 	ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
 
 	if (unlikely(ret == -ENOENT)) {
-		qtnf_sta_list_del(&vif->sta_list, mac);
+		qtnf_sta_list_del(vif, mac);
 		cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
 		sinfo->filled = 0;
 	}
 
+	sinfo->generation = vif->generation;
+
 	return ret;
 }
 
@@ -533,19 +595,13 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
 	return ret;
 }
 
-static void qtnf_scan_timeout(struct timer_list *t)
-{
-	struct qtnf_wmac *mac = from_timer(mac, t, scan_timeout);
-
-	pr_warn("mac%d scan timed out\n", mac->macid);
-	qtnf_scan_done(mac, true);
-}
-
 static int
 qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 {
 	struct qtnf_wmac *mac = wiphy_priv(wiphy);
 
+	cancel_delayed_work_sync(&mac->scan_timeout);
+
 	mac->scan_req = request;
 
 	if (qtnf_cmd_send_scan(mac)) {
@@ -554,9 +610,8 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 		return -EFAULT;
 	}
 
-	mac->scan_timeout.function = qtnf_scan_timeout;
-	mod_timer(&mac->scan_timeout,
-		  jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
+	queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
+			   QTNF_SCAN_TIMEOUT_SEC * HZ);
 
 	return 0;
 }
@@ -617,7 +672,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
 		return ret;
 	}
 
-	vif->sta_state = QTNF_STA_DISCONNECTED;
 	return 0;
 }
 
@@ -717,7 +771,8 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
 	}
 
 	if (!cfg80211_chandef_valid(chandef)) {
-		pr_err("%s: bad chan freq1=%u freq2=%u bw=%u\n", ndev->name,
+		pr_err("%s: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+		       ndev->name, chandef->chan->center_freq,
 		       chandef->center_freq1, chandef->center_freq2,
 		       chandef->width);
 		ret = -ENODATA;
@@ -750,6 +805,35 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 	return ret;
 }
 
+static int qtnf_start_radar_detection(struct wiphy *wiphy,
+				      struct net_device *ndev,
+				      struct cfg80211_chan_def *chandef,
+				      u32 cac_time_ms)
+{
+	struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+	int ret;
+
+	ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms);
+	if (ret)
+		pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret);
+
+	return ret;
+}
+
+static int qtnf_set_mac_acl(struct wiphy *wiphy,
+			    struct net_device *dev,
+			    const struct cfg80211_acl_data *params)
+{
+	struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+	int ret;
+
+	ret = qtnf_cmd_set_mac_acl(vif, params);
+	if (ret)
+		pr_err("%s: failed to set mac ACL ret=%d\n", dev->name, ret);
+
+	return ret;
+}
+
 static struct cfg80211_ops qtn_cfg80211_ops = {
 	.add_virtual_intf	= qtnf_add_virtual_intf,
 	.change_virtual_intf	= qtnf_change_virtual_intf,
@@ -773,7 +857,9 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
 	.disconnect		= qtnf_disconnect,
 	.dump_survey		= qtnf_dump_survey,
 	.get_channel		= qtnf_get_channel,
-	.channel_switch		= qtnf_channel_switch
+	.channel_switch		= qtnf_channel_switch,
+	.start_radar_detection	= qtnf_start_radar_detection,
+	.set_mac_acl		= qtnf_set_mac_acl,
 };
 
 static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -802,6 +888,9 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
 			continue;
 
 		mac = bus->mac[mac_idx];
+		if (!mac)
+			continue;
+
 		wiphy = priv_to_wiphy(mac);
 
 		for (band = 0; band < NUM_NL80211_BANDS; ++band) {
@@ -829,29 +918,29 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
 	return wiphy;
 }
 
-static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
-				    struct ieee80211_iface_combination *if_comb,
-				    const struct qtnf_mac_info *mac_info)
+static int
+qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info)
 {
-	size_t max_interfaces = 0;
+	struct ieee80211_iface_combination *if_comb;
+	size_t n_if_comb;
 	u16 interface_modes = 0;
-	size_t i;
+	size_t i, j;
+
+	if_comb = mac_info->if_comb;
+	n_if_comb = mac_info->n_if_comb;
 
-	if (unlikely(!mac_info->limits || !mac_info->n_limits))
+	if (!if_comb || !n_if_comb)
 		return -ENOENT;
 
-	if_comb->limits = mac_info->limits;
-	if_comb->n_limits = mac_info->n_limits;
+	for (i = 0; i < n_if_comb; i++) {
+		if_comb[i].radar_detect_widths = mac_info->radar_detect_widths;
 
-	for (i = 0; i < mac_info->n_limits; i++) {
-		max_interfaces += mac_info->limits[i].max;
-		interface_modes |= mac_info->limits[i].types;
+		for (j = 0; j < if_comb[i].n_limits; j++)
+			interface_modes |= if_comb[i].limits[j].types;
 	}
 
-	if_comb->num_different_channels = 1;
-	if_comb->beacon_int_infra_match = true;
-	if_comb->max_interfaces = max_interfaces;
-	if_comb->radar_detect_widths = mac_info->radar_detect_widths;
+	wiphy->iface_combinations = if_comb;
+	wiphy->n_iface_combinations = n_if_comb;
 	wiphy->interface_modes = interface_modes;
 
 	return 0;
@@ -860,7 +949,6 @@ static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
 int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 {
 	struct wiphy *wiphy = priv_to_wiphy(mac);
-	struct ieee80211_iface_combination *iface_comb = NULL;
 	int ret;
 
 	if (!wiphy) {
@@ -868,14 +956,6 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 		return -EFAULT;
 	}
 
-	iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL);
-	if (!iface_comb)
-		return -ENOMEM;
-
-	ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo);
-	if (ret)
-		goto out;
-
 	wiphy->frag_threshold = mac->macinfo.frag_thr;
 	wiphy->rts_threshold = mac->macinfo.rts_thr;
 	wiphy->retry_short = mac->macinfo.sretry_limit;
@@ -886,11 +966,13 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 	wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
 	wiphy->mgmt_stypes = qtnf_mgmt_stypes;
 	wiphy->max_remain_on_channel_duration = 5000;
-
-	wiphy->iface_combinations = iface_comb;
-	wiphy->n_iface_combinations = 1;
+	wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs;
 	wiphy->max_num_csa_counters = 2;
 
+	ret = qtnf_wiphy_setup_if_comb(wiphy, &mac->macinfo);
+	if (ret)
+		goto out;
+
 	/* Initialize cipher suits */
 	wiphy->cipher_suites = qtnf_cipher_suites;
 	wiphy->n_cipher_suites = ARRAY_SIZE(qtnf_cipher_suites);
@@ -924,6 +1006,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 		wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
 	}
 
+	strlcpy(wiphy->fw_version, hw_info->fw_version,
+		sizeof(wiphy->fw_version));
+	wiphy->hw_version = hw_info->hw_version;
+
 	ret = wiphy_register(wiphy);
 	if (ret < 0)
 		goto out;
@@ -935,12 +1021,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 		ret = regulatory_hint(wiphy, hw_info->rd->alpha2);
 
 out:
-	if (ret) {
-		kfree(iface_comb);
-		return ret;
-	}
-
-	return 0;
+	return ret;
 }
 
 void qtnf_netdev_updown(struct net_device *ndev, bool up)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 66db26613b1f..b73425122a10 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -28,23 +28,4 @@ void qtnf_band_init_rates(struct ieee80211_supported_band *band);
 void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo,
 				struct ieee80211_supported_band *band);
 
-static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
-{
-	struct cfg80211_scan_info info = {
-		.aborted = aborted,
-	};
-
-	if (timer_pending(&mac->scan_timeout))
-		del_timer_sync(&mac->scan_timeout);
-
-	mutex_lock(&mac->mac_lock);
-
-	if (mac->scan_req) {
-		cfg80211_scan_done(mac->scan_req, &info);
-		mac->scan_req = NULL;
-	}
-
-	mutex_unlock(&mac->mac_lock);
-}
-
 #endif /* _QTN_FMAC_CFG80211_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 8bc8dd637315..deca0060eb27 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -162,6 +162,14 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
 		memcpy(tlv->ie_data, buf, len);
 }
 
+static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
+{
+	size_t size = sizeof(struct qlink_acl_data) +
+		      acl->n_acl_entries * sizeof(struct qlink_mac_address);
+
+	return size;
+}
+
 static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
 				      const struct cfg80211_ap_settings *s)
 {
@@ -178,6 +186,10 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
 	if (cfg80211_chandef_valid(&s->chandef))
 		len += sizeof(struct qlink_tlv_chandef);
 
+	if (s->acl)
+		len += sizeof(struct qlink_tlv_hdr) +
+		       qtnf_cmd_acl_data_size(s->acl);
+
 	if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
 		pr_err("VIF%u.%u: can not fit AP settings: %u\n",
 		       vif->mac->macid, vif->vifid, len);
@@ -203,7 +215,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_START_AP,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	cmd = (struct qlink_cmd_start_ap *)cmd_skb->data;
@@ -247,7 +259,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
 		chtlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANDEF);
 		chtlv->hdr.len = cpu_to_le16(sizeof(*chtlv) -
 					     sizeof(chtlv->hdr));
-		qlink_chandef_cfg2q(&s->chandef, &chtlv->chan);
+		qlink_chandef_cfg2q(&s->chandef, &chtlv->chdef);
 	}
 
 	qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_HEAD,
@@ -283,6 +295,16 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
 		memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
 	}
 
+	if (s->acl) {
+		size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
+		struct qlink_tlv_hdr *tlv =
+			skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+
+		tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+		tlv->len = cpu_to_le16(acl_size);
+		qlink_acl_data_cfg2q(s->acl, (struct qlink_acl_data *)tlv->val);
+	}
+
 	qtnf_bus_lock(vif->mac->bus);
 
 	ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -313,7 +335,7 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_STOP_AP,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -347,7 +369,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_REGISTER_MGMT,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -390,7 +412,7 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_SEND_MGMT_FRAME,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -436,7 +458,7 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_MGMT_SET_APPIE,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
@@ -461,30 +483,8 @@ out:
 }
 
 static void
-qtnf_sta_info_parse_basic_counters(struct station_info *sinfo,
-		const struct qlink_sta_stat_basic_counters *counters)
-{
-	sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES) |
-			 BIT(NL80211_STA_INFO_TX_BYTES);
-	sinfo->rx_bytes = get_unaligned_le64(&counters->rx_bytes);
-	sinfo->tx_bytes = get_unaligned_le64(&counters->tx_bytes);
-
-	sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
-			 BIT(NL80211_STA_INFO_TX_PACKETS) |
-			 BIT(NL80211_STA_INFO_BEACON_RX);
-	sinfo->rx_packets = get_unaligned_le32(&counters->rx_packets);
-	sinfo->tx_packets = get_unaligned_le32(&counters->tx_packets);
-	sinfo->rx_beacon = get_unaligned_le64(&counters->rx_beacons);
-
-	sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC) |
-			 BIT(NL80211_STA_INFO_TX_FAILED);
-	sinfo->rx_dropped_misc = get_unaligned_le32(&counters->rx_dropped);
-	sinfo->tx_failed = get_unaligned_le32(&counters->tx_failed);
-}
-
-static void
 qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
-			 const struct  qlink_sta_info_rate *rate_src)
+			 const struct qlink_sta_info_rate *rate_src)
 {
 	rate_dst->legacy = get_unaligned_le16(&rate_src->rate) * 10;
 
@@ -493,22 +493,23 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
 	rate_dst->flags = 0;
 
 	switch (rate_src->bw) {
-	case QLINK_STA_INFO_RATE_BW_5:
+	case QLINK_CHAN_WIDTH_5:
 		rate_dst->bw = RATE_INFO_BW_5;
 		break;
-	case QLINK_STA_INFO_RATE_BW_10:
+	case QLINK_CHAN_WIDTH_10:
 		rate_dst->bw = RATE_INFO_BW_10;
 		break;
-	case QLINK_STA_INFO_RATE_BW_20:
+	case QLINK_CHAN_WIDTH_20:
+	case QLINK_CHAN_WIDTH_20_NOHT:
 		rate_dst->bw = RATE_INFO_BW_20;
 		break;
-	case QLINK_STA_INFO_RATE_BW_40:
+	case QLINK_CHAN_WIDTH_40:
 		rate_dst->bw = RATE_INFO_BW_40;
 		break;
-	case QLINK_STA_INFO_RATE_BW_80:
+	case QLINK_CHAN_WIDTH_80:
 		rate_dst->bw = RATE_INFO_BW_80;
 		break;
-	case QLINK_STA_INFO_RATE_BW_160:
+	case QLINK_CHAN_WIDTH_160:
 		rate_dst->bw = RATE_INFO_BW_160;
 		break;
 	default:
@@ -578,87 +579,125 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
 }
 
 static void
-qtnf_sta_info_parse_generic_info(struct station_info *sinfo,
-				 const struct qlink_sta_info_generic *info)
+qtnf_cmd_sta_info_parse(struct station_info *sinfo,
+			const struct qlink_tlv_hdr *tlv,
+			size_t resp_size)
 {
-	sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME) |
-			 BIT(NL80211_STA_INFO_INACTIVE_TIME);
-	sinfo->connected_time = get_unaligned_le32(&info->connected_time);
-	sinfo->inactive_time = get_unaligned_le32(&info->inactive_time);
+	const struct qlink_sta_stats *stats = NULL;
+	const u8 *map = NULL;
+	unsigned int map_len = 0;
+	unsigned int stats_len = 0;
+	u16 tlv_len;
+
+#define qtnf_sta_stat_avail(stat_name, bitn)	\
+	(qtnf_utils_is_bit_set(map, bitn, map_len) && \
+	 (offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
+
+	while (resp_size >= sizeof(*tlv)) {
+		tlv_len = le16_to_cpu(tlv->len);
+
+		switch (le16_to_cpu(tlv->type)) {
+		case QTN_TLV_ID_STA_STATS_MAP:
+			map_len = tlv_len;
+			map = tlv->val;
+			break;
+		case QTN_TLV_ID_STA_STATS:
+			stats_len = tlv_len;
+			stats = (const struct qlink_sta_stats *)tlv->val;
+			break;
+		default:
+			break;
+		}
 
-	sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
-			 BIT(NL80211_STA_INFO_SIGNAL_AVG);
-	sinfo->signal = info->rssi - 120;
-	sinfo->signal_avg = info->rssi_avg - QLINK_RSSI_OFFSET;
+		resp_size -= tlv_len + sizeof(*tlv);
+		tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+	}
 
-	if (info->rx_rate.rate) {
+	if (!map || !stats)
+		return;
+
+	if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+		sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
+	}
+
+	if (qtnf_sta_stat_avail(connected_time,
+				QLINK_STA_INFO_CONNECTED_TIME)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+		sinfo->connected_time = le32_to_cpu(stats->connected_time);
+	}
+
+	if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+		sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
+	}
+
+	if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+		sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
+	}
+
+	if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
 		sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
-		qtnf_sta_info_parse_rate(&sinfo->rxrate, &info->rx_rate);
+		qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
 	}
 
-	if (info->tx_rate.rate) {
+	if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
 		sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
-		qtnf_sta_info_parse_rate(&sinfo->txrate, &info->tx_rate);
+		qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
 	}
 
-	sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
-	qtnf_sta_info_parse_flags(&sinfo->sta_flags, &info->state);
-}
+	if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+		qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
+	}
 
-static int qtnf_cmd_sta_info_parse(struct station_info *sinfo,
-				   const u8 *payload, size_t payload_size)
-{
-	const struct qlink_sta_stat_basic_counters *counters;
-	const struct qlink_sta_info_generic *sta_info;
-	u16 tlv_type;
-	u16 tlv_value_len;
-	size_t tlv_full_len;
-	const struct qlink_tlv_hdr *tlv;
+	if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+		sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+	}
 
-	sinfo->filled = 0;
+	if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+		sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+	}
 
-	tlv = (const struct qlink_tlv_hdr *)payload;
-	while (payload_size >= sizeof(struct qlink_tlv_hdr)) {
-		tlv_type = le16_to_cpu(tlv->type);
-		tlv_value_len = le16_to_cpu(tlv->len);
-		tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-		if (tlv_full_len > payload_size) {
-			pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
-				tlv_type, tlv_value_len);
-			return -EINVAL;
-		}
-		switch (tlv_type) {
-		case QTN_TLV_ID_STA_BASIC_COUNTERS:
-			if (unlikely(tlv_value_len < sizeof(*counters))) {
-				pr_err("invalid TLV size %.4X: %u\n",
-				       tlv_type, tlv_value_len);
-				break;
-			}
+	if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+		sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+	}
 
-			counters = (void *)tlv->val;
-			qtnf_sta_info_parse_basic_counters(sinfo, counters);
-			break;
-		case QTN_TLV_ID_STA_GENERIC_INFO:
-			if (unlikely(tlv_value_len < sizeof(*sta_info)))
-				break;
+	if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+		sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+	}
 
-			sta_info = (void *)tlv->val;
-			qtnf_sta_info_parse_generic_info(sinfo, sta_info);
-			break;
-		default:
-			pr_warn("unexpected TLV type: %.4X\n", tlv_type);
-			break;
-		}
-		payload_size -= tlv_full_len;
-		tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+	if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+		sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
 	}
 
-	if (payload_size) {
-		pr_warn("malformed TLV buf; bytes left: %zu\n", payload_size);
-		return -EINVAL;
+	if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+		sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
 	}
 
-	return 0;
+	if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+		sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
+	}
+
+	if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+		sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
+	}
+
+	if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+		sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
+	}
+
+#undef qtnf_sta_stat_avail
 }
 
 int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
@@ -674,8 +713,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_GET_STA_INFO,
 					    sizeof(*cmd));
-
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -715,7 +753,9 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
 		goto out;
 	}
 
-	ret = qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
+	qtnf_cmd_sta_info_parse(sinfo,
+				(const struct qlink_tlv_hdr *)resp->info,
+				var_resp_len);
 
 out:
 	qtnf_bus_unlock(vif->mac->bus);
@@ -738,7 +778,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    cmd_type,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -811,7 +851,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_DEL_INTF,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -908,6 +948,16 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
 	struct qtnf_hw_info *hwinfo = &bus->hw_info;
 	const struct qlink_tlv_hdr *tlv;
 	const struct qlink_tlv_reg_rule *tlv_rule;
+	const char *bld_name = NULL;
+	const char *bld_rev = NULL;
+	const char *bld_type = NULL;
+	const char *bld_label = NULL;
+	u32 bld_tmstamp = 0;
+	u32 plat_id = 0;
+	const char *hw_id = NULL;
+	const char *calibration_ver = NULL;
+	const char *uboot_ver = NULL;
+	u32 hw_ver = 0;
 	struct ieee80211_reg_rule *rule;
 	u16 tlv_type;
 	u16 tlv_value_len;
@@ -934,6 +984,10 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
 	hwinfo->rd->alpha2[0] = resp->alpha2[0];
 	hwinfo->rd->alpha2[1] = resp->alpha2[1];
 
+	bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
+	plat_id = le32_to_cpu(resp->plat_id);
+	hw_ver = le32_to_cpu(resp->hw_ver);
+
 	switch (resp->dfs_region) {
 	case QLINK_DFS_FCC:
 		hwinfo->rd->dfs_region = NL80211_DFS_FCC;
@@ -994,6 +1048,27 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
 			rule->flags = qtnf_cmd_resp_reg_rule_flags_parse(
 					le32_to_cpu(tlv_rule->flags));
 			break;
+		case QTN_TLV_ID_BUILD_NAME:
+			bld_name = (const void *)tlv->val;
+			break;
+		case QTN_TLV_ID_BUILD_REV:
+			bld_rev = (const void *)tlv->val;
+			break;
+		case QTN_TLV_ID_BUILD_TYPE:
+			bld_type = (const void *)tlv->val;
+			break;
+		case QTN_TLV_ID_BUILD_LABEL:
+			bld_label = (const void *)tlv->val;
+			break;
+		case QTN_TLV_ID_HW_ID:
+			hw_id = (const void *)tlv->val;
+			break;
+		case QTN_TLV_ID_CALIBRATION_VER:
+			calibration_ver = (const void *)tlv->val;
+			break;
+		case QTN_TLV_ID_UBOOT_VER:
+			uboot_ver = (const void *)tlv->val;
+			break;
 		default:
 			break;
 		}
@@ -1016,21 +1091,46 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
 		hwinfo->total_tx_chain, hwinfo->total_rx_chain,
 		hwinfo->hw_capab);
 
+	pr_info("\nBuild name:            %s"  \
+		"\nBuild revision:        %s"  \
+		"\nBuild type:            %s"  \
+		"\nBuild label:           %s"  \
+		"\nBuild timestamp:       %lu" \
+		"\nPlatform ID:           %lu" \
+		"\nHardware ID:           %s"  \
+		"\nCalibration version:   %s"  \
+		"\nU-Boot version:        %s"  \
+		"\nHardware version:      0x%08x",
+		bld_name, bld_rev, bld_type, bld_label,
+		(unsigned long)bld_tmstamp,
+		(unsigned long)plat_id,
+		hw_id, calibration_ver, uboot_ver, hw_ver);
+
+	strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
+	hwinfo->hw_version = hw_ver;
+
 	return 0;
 }
 
 static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
 					const u8 *tlv_buf, size_t tlv_buf_size)
 {
-	struct ieee80211_iface_limit *limits = NULL;
-	const struct qlink_iface_limit *limit_record;
-	size_t record_count = 0, rec = 0;
-	u16 tlv_type, tlv_value_len;
-	struct qlink_iface_comb_num *comb;
+	struct ieee80211_iface_combination *comb = NULL;
+	size_t n_comb = 0;
+	struct ieee80211_iface_limit *limits;
+	const struct qlink_iface_comb_num *comb_num;
+	const struct qlink_iface_limit_record *rec;
+	const struct qlink_iface_limit *lim;
+	u16 rec_len;
+	u16 tlv_type;
+	u16 tlv_value_len;
 	size_t tlv_full_len;
 	const struct qlink_tlv_hdr *tlv;
-
-	mac->macinfo.n_limits = 0;
+	u8 *ext_capa = NULL;
+	u8 *ext_capa_mask = NULL;
+	u8 ext_capa_len = 0;
+	u8 ext_capa_mask_len = 0;
+	int i = 0;
 
 	tlv = (const struct qlink_tlv_hdr *)tlv_buf;
 	while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
@@ -1045,52 +1145,89 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
 
 		switch (tlv_type) {
 		case QTN_TLV_ID_NUM_IFACE_COMB:
-			if (unlikely(tlv_value_len != sizeof(*comb)))
+			if (tlv_value_len != sizeof(*comb_num))
 				return -EINVAL;
 
-			comb = (void *)tlv->val;
-			record_count = le16_to_cpu(comb->iface_comb_num);
+			comb_num = (void *)tlv->val;
+
+			/* free earlier iface comb memory */
+			qtnf_mac_iface_comb_free(mac);
 
-			mac->macinfo.n_limits = record_count;
-			/* free earlier iface limits memory */
-			kfree(mac->macinfo.limits);
-			mac->macinfo.limits =
-				kzalloc(sizeof(*mac->macinfo.limits) *
-					record_count, GFP_KERNEL);
+			mac->macinfo.n_if_comb =
+				le32_to_cpu(comb_num->iface_comb_num);
 
-			if (unlikely(!mac->macinfo.limits))
+			mac->macinfo.if_comb =
+				kcalloc(mac->macinfo.n_if_comb,
+					sizeof(*mac->macinfo.if_comb),
+					GFP_KERNEL);
+
+			if (!mac->macinfo.if_comb)
 				return -ENOMEM;
 
-			limits = mac->macinfo.limits;
+			comb = mac->macinfo.if_comb;
+
+			pr_debug("MAC%u: %zu iface combinations\n",
+				 mac->macid, mac->macinfo.n_if_comb);
+
 			break;
 		case QTN_TLV_ID_IFACE_LIMIT:
-			if (unlikely(!limits)) {
-				pr_warn("MAC%u: limits are not inited\n",
+			if (unlikely(!comb)) {
+				pr_warn("MAC%u: no combinations advertised\n",
 					mac->macid);
 				return -EINVAL;
 			}
 
-			if (unlikely(tlv_value_len != sizeof(*limit_record))) {
-				pr_warn("MAC%u: record size mismatch\n",
+			if (n_comb >= mac->macinfo.n_if_comb) {
+				pr_warn("MAC%u: combinations count exceeded\n",
 					mac->macid);
-				return -EINVAL;
+				n_comb++;
+				break;
 			}
 
-			limit_record = (void *)tlv->val;
-			limits[rec].max = le16_to_cpu(limit_record->max_num);
-			limits[rec].types = qlink_iface_type_to_nl_mask(
-				le16_to_cpu(limit_record->type));
+			rec = (void *)tlv->val;
+			rec_len = sizeof(*rec) + rec->n_limits * sizeof(*lim);
+
+			if (unlikely(tlv_value_len != rec_len)) {
+				pr_warn("MAC%u: record %zu size mismatch\n",
+					mac->macid, n_comb);
+				return -EINVAL;
+			}
 
-			/* supported modes: STA, AP */
-			limits[rec].types &= BIT(NL80211_IFTYPE_AP) |
-					     BIT(NL80211_IFTYPE_AP_VLAN) |
-					     BIT(NL80211_IFTYPE_STATION);
+			limits = kzalloc(sizeof(*limits) * rec->n_limits,
+					 GFP_KERNEL);
+			if (!limits)
+				return -ENOMEM;
 
-			pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid,
-				 limits[rec].max, limits[rec].types);
+			comb[n_comb].num_different_channels =
+				rec->num_different_channels;
+			comb[n_comb].max_interfaces =
+				le16_to_cpu(rec->max_interfaces);
+			comb[n_comb].n_limits = rec->n_limits;
+			comb[n_comb].limits = limits;
+
+			for (i = 0; i < rec->n_limits; i++) {
+				lim = &rec->limits[i];
+				limits[i].max = le16_to_cpu(lim->max_num);
+				limits[i].types =
+					qlink_iface_type_to_nl_mask(le16_to_cpu(lim->type));
+				pr_debug("MAC%u: comb[%zu]: MAX:%u TYPES:%.4X\n",
+					 mac->macid, n_comb,
+					 limits[i].max, limits[i].types);
+			}
 
-			if (limits[rec].types)
-				rec++;
+			n_comb++;
+			break;
+		case WLAN_EID_EXT_CAPABILITY:
+			if (unlikely(tlv_value_len > U8_MAX))
+				return -EINVAL;
+			ext_capa = (u8 *)tlv->val;
+			ext_capa_len = tlv_value_len;
+			break;
+		case QTN_TLV_ID_EXT_CAPABILITY_MASK:
+			if (unlikely(tlv_value_len > U8_MAX))
+				return -EINVAL;
+			ext_capa_mask = (u8 *)tlv->val;
+			ext_capa_mask_len = tlv_value_len;
 			break;
 		default:
 			break;
@@ -1106,12 +1243,40 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
 		return -EINVAL;
 	}
 
-	if (mac->macinfo.n_limits != rec) {
+	if (mac->macinfo.n_if_comb != n_comb) {
 		pr_err("MAC%u: combination mismatch: reported=%zu parsed=%zu\n",
-		       mac->macid, mac->macinfo.n_limits, rec);
+		       mac->macid, mac->macinfo.n_if_comb, n_comb);
+		return -EINVAL;
+	}
+
+	if (ext_capa_len != ext_capa_mask_len) {
+		pr_err("MAC%u: ext_capa/_mask lengths mismatch: %u != %u\n",
+		       mac->macid, ext_capa_len, ext_capa_mask_len);
 		return -EINVAL;
 	}
 
+	if (ext_capa_len > 0) {
+		ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
+		if (!ext_capa)
+			return -ENOMEM;
+
+		ext_capa_mask =
+			kmemdup(ext_capa_mask, ext_capa_mask_len, GFP_KERNEL);
+		if (!ext_capa_mask) {
+			kfree(ext_capa);
+			return -ENOMEM;
+		}
+	} else {
+		ext_capa = NULL;
+		ext_capa_mask = NULL;
+	}
+
+	kfree(mac->macinfo.extended_capabilities);
+	kfree(mac->macinfo.extended_capabilities_mask);
+	mac->macinfo.extended_capabilities = ext_capa;
+	mac->macinfo.extended_capabilities_mask = ext_capa_mask;
+	mac->macinfo.extended_capabilities_len = ext_capa_len;
+
 	return 0;
 }
 
@@ -1143,6 +1308,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
 	mac_info->radar_detect_widths =
 			qlink_chan_width_mask_to_nl(le16_to_cpu(
 					resp_info->radar_detect_widths));
+	mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
 
 	memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
 	       sizeof(mac_info->ht_cap_mod_mask));
@@ -1186,7 +1352,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
 	size_t tlv_len;
 	size_t tlv_dlen;
 	const struct qlink_tlv_hdr *tlv;
-	const struct qlink_tlv_channel *qchan;
+	const struct qlink_channel *qchan;
 	struct ieee80211_channel *chan;
 	unsigned int chidx = 0;
 	u32 qflags;
@@ -1232,7 +1398,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
 
 		switch (tlv_type) {
 		case QTN_TLV_ID_CHANNEL:
-			if (unlikely(tlv_len != sizeof(*qchan))) {
+			if (unlikely(tlv_dlen != sizeof(*qchan))) {
 				pr_err("invalid channel TLV len %zu\n",
 				       tlv_len);
 				goto error_ret;
@@ -1243,7 +1409,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
 				goto error_ret;
 			}
 
-			qchan = (const struct qlink_tlv_channel *)tlv;
+			qchan = (const struct qlink_channel *)tlv->val;
 			chan = &band->channels[chidx++];
 			qflags = le32_to_cpu(qchan->flags);
 
@@ -1490,7 +1656,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
 					    QLINK_CMD_MAC_INFO,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(mac->bus);
@@ -1528,7 +1694,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
 					    QLINK_CMD_GET_HW_INFO,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(bus);
@@ -1708,7 +1874,7 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
 					    QLINK_CMD_FW_INIT,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(bus);
@@ -1757,7 +1923,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_ADD_KEY,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -1810,7 +1976,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_DEL_KEY,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -1851,7 +2017,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_SET_DEFAULT_KEY,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -1886,7 +2052,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_SET_DEFAULT_MGMT_KEY,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -1941,28 +2107,24 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_CHANGE_STA,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
 
 	cmd = (struct qlink_cmd_change_sta *)cmd_skb->data;
 	ether_addr_copy(cmd->sta_addr, mac);
+	cmd->flag_update.mask =
+		cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_mask));
+	cmd->flag_update.value =
+		cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_set));
 
 	switch (vif->wdev.iftype) {
 	case NL80211_IFTYPE_AP:
 		cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP);
-		cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
-						  params->sta_flags_mask));
-		cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
-						 params->sta_flags_set));
 		break;
 	case NL80211_IFTYPE_STATION:
 		cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION);
-		cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
-						  params->sta_flags_mask));
-		cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
-						 params->sta_flags_set));
 		break;
 	default:
 		pr_err("unsupported iftype %d\n", vif->wdev.iftype);
@@ -1997,7 +2159,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_DEL_STA,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -2037,8 +2199,8 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
 	qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
 	qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
 	qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
-	qchan->center_freq = cpu_to_le16(sc->center_freq);
-	qchan->hw_value = cpu_to_le16(sc->hw_value);
+	qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
+	qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
 
 	if (sc->flags & IEEE80211_CHAN_NO_IR)
 		flags |= QLINK_CHAN_NO_IR;
@@ -2046,7 +2208,7 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
 	if (sc->flags & IEEE80211_CHAN_RADAR)
 		flags |= QLINK_CHAN_RADAR;
 
-	qchan->flags = cpu_to_le32(flags);
+	qchan->chan.flags = cpu_to_le32(flags);
 }
 
 int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
@@ -2067,7 +2229,7 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
 					    QLINK_CMD_SCAN,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(mac->bus);
@@ -2137,7 +2299,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_CONNECT,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	cmd = (struct qlink_cmd_connect *)cmd_skb->data;
@@ -2239,7 +2401,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_DISCONNECT,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(vif->mac->bus);
@@ -2273,7 +2435,7 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_UPDOWN_INTF,
 					    sizeof(*cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	cmd = (struct qlink_cmd_updown *)cmd_skb->data;
@@ -2434,8 +2596,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
 					    QLINK_CMD_CHAN_SWITCH,
 					    sizeof(*cmd));
-
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(mac->bus);
@@ -2487,7 +2648,7 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
 					    QLINK_CMD_CHAN_GET,
 					    sizeof(struct qlink_cmd));
-	if (unlikely(!cmd_skb))
+	if (!cmd_skb)
 		return -ENOMEM;
 
 	qtnf_bus_lock(bus);
@@ -2512,3 +2673,83 @@ out:
 	consume_skb(resp_skb);
 	return ret;
 }
+
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+		       const struct cfg80211_chan_def *chdef,
+		       u32 cac_time_ms)
+{
+	struct qtnf_bus *bus = vif->mac->bus;
+	struct sk_buff *cmd_skb;
+	struct qlink_cmd_start_cac *cmd;
+	int ret;
+	u16 res_code;
+
+	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+					    QLINK_CMD_START_CAC,
+					    sizeof(*cmd));
+	if (!cmd_skb)
+		return -ENOMEM;
+
+	cmd = (struct qlink_cmd_start_cac *)cmd_skb->data;
+	cmd->cac_time_ms = cpu_to_le32(cac_time_ms);
+	qlink_chandef_cfg2q(chdef, &cmd->chan);
+
+	qtnf_bus_lock(bus);
+	ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+	qtnf_bus_unlock(bus);
+
+	if (ret)
+		return ret;
+
+	switch (res_code) {
+	case QLINK_CMD_RESULT_OK:
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+			 const struct cfg80211_acl_data *params)
+{
+	struct qtnf_bus *bus = vif->mac->bus;
+	struct sk_buff *cmd_skb;
+	struct qlink_tlv_hdr *tlv;
+	size_t acl_size = qtnf_cmd_acl_data_size(params);
+	u16 res_code;
+	int ret;
+
+	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+					    QLINK_CMD_SET_MAC_ACL,
+					    sizeof(struct qlink_cmd));
+	if (!cmd_skb)
+		return -ENOMEM;
+
+	tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+	tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+	tlv->len = cpu_to_le16(acl_size);
+	qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
+
+	qtnf_bus_lock(bus);
+	ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+	qtnf_bus_unlock(bus);
+
+	if (unlikely(ret))
+		return ret;
+
+	switch (res_code) {
+	case QLINK_CMD_RESULT_OK:
+		break;
+	case QLINK_CMD_RESULT_INVALID:
+		ret = -EINVAL;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index d981a76e5835..69a7d56f7e58 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
 int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
 			      struct cfg80211_csa_settings *params);
 int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+		       const struct cfg80211_chan_def *chdef,
+		       u32 cac_time_ms);
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+			 const struct cfg80211_acl_data *params);
 
 #endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 3423dc51198b..cf26c15a84f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -119,9 +119,38 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
 /* Netdev handler for getting stats.
  */
-static struct net_device_stats *qtnf_netdev_get_stats(struct net_device *dev)
+static void qtnf_netdev_get_stats64(struct net_device *ndev,
+				    struct rtnl_link_stats64 *stats)
 {
-	return &dev->stats;
+	struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+	unsigned int start;
+	int cpu;
+
+	netdev_stats_to_stats64(stats, &ndev->stats);
+
+	if (!vif->stats64)
+		return;
+
+	for_each_possible_cpu(cpu) {
+		struct pcpu_sw_netstats *stats64;
+		u64 rx_packets, rx_bytes;
+		u64 tx_packets, tx_bytes;
+
+		stats64 = per_cpu_ptr(vif->stats64, cpu);
+
+		do {
+			start = u64_stats_fetch_begin_irq(&stats64->syncp);
+			rx_packets = stats64->rx_packets;
+			rx_bytes = stats64->rx_bytes;
+			tx_packets = stats64->tx_packets;
+			tx_bytes = stats64->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
+
+		stats->rx_packets += rx_packets;
+		stats->rx_bytes += rx_bytes;
+		stats->tx_packets += tx_packets;
+		stats->tx_bytes += tx_bytes;
+	}
 }
 
 /* Netdev handler for transmission timeout.
@@ -156,7 +185,7 @@ const struct net_device_ops qtnf_netdev_ops = {
 	.ndo_stop = qtnf_netdev_close,
 	.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
 	.ndo_tx_timeout = qtnf_netdev_tx_timeout,
-	.ndo_get_stats = qtnf_netdev_get_stats,
+	.ndo_get_stats64 = qtnf_netdev_get_stats64,
 };
 
 static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -233,6 +262,23 @@ struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac)
 	return vif;
 }
 
+void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
+{
+	struct ieee80211_iface_combination *comb;
+	int i;
+
+	if (mac->macinfo.if_comb) {
+		for (i = 0; i < mac->macinfo.n_if_comb; i++) {
+			comb = &mac->macinfo.if_comb[i];
+			kfree(comb->limits);
+			comb->limits = NULL;
+		}
+
+		kfree(mac->macinfo.if_comb);
+		mac->macinfo.if_comb = NULL;
+	}
+}
+
 static void qtnf_vif_reset_handler(struct work_struct *work)
 {
 	struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
@@ -258,13 +304,44 @@ static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac)
 {
 	struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX];
 
-	vif->wdev.iftype = NL80211_IFTYPE_AP;
+	vif->wdev.iftype = NL80211_IFTYPE_STATION;
 	vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
 	vif->wdev.wiphy = priv_to_wiphy(mac);
 	INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler);
 	vif->cons_tx_timeout_cnt = 0;
 }
 
+static void qtnf_mac_scan_finish(struct qtnf_wmac *mac, bool aborted)
+{
+	struct cfg80211_scan_info info = {
+		.aborted = aborted,
+	};
+
+	mutex_lock(&mac->mac_lock);
+
+	if (mac->scan_req) {
+		cfg80211_scan_done(mac->scan_req, &info);
+		mac->scan_req = NULL;
+	}
+
+	mutex_unlock(&mac->mac_lock);
+}
+
+void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
+{
+	cancel_delayed_work_sync(&mac->scan_timeout);
+	qtnf_mac_scan_finish(mac, aborted);
+}
+
+static void qtnf_mac_scan_timeout(struct work_struct *work)
+{
+	struct qtnf_wmac *mac =
+		container_of(work, struct qtnf_wmac, scan_timeout.work);
+
+	pr_warn("MAC%d: scan timed out\n", mac->macid);
+	qtnf_mac_scan_finish(mac, true);
+}
+
 static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
 					     unsigned int macid)
 {
@@ -288,7 +365,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
 		mac->iflist[i].vifid = i;
 		qtnf_sta_list_init(&mac->iflist[i].sta_list);
 		mutex_init(&mac->mac_lock);
-		timer_setup(&mac->scan_timeout, NULL, 0);
+		INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
+		mac->iflist[i].stats64 =
+			netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+		if (!mac->iflist[i].stats64)
+			pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
+				macid, i);
 	}
 
 	qtnf_mac_init_primary_intf(mac);
@@ -297,9 +379,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
 	return mac;
 }
 
+static const struct ethtool_ops qtnf_ethtool_ops = {
+	.get_drvinfo = cfg80211_get_drvinfo,
+};
+
 int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
-			 const char *name, unsigned char name_assign_type,
-			 enum nl80211_iftype iftype)
+			 const char *name, unsigned char name_assign_type)
 {
 	struct wiphy *wiphy = priv_to_wiphy(mac);
 	struct net_device *dev;
@@ -320,12 +405,12 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
 	dev->needs_free_netdev = true;
 	dev_net_set(dev, wiphy_net(wiphy));
 	dev->ieee80211_ptr = &vif->wdev;
-	dev->ieee80211_ptr->iftype = iftype;
 	ether_addr_copy(dev->dev_addr, vif->mac_addr);
 	SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
 	dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
 	dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
 	dev->tx_queue_len = 100;
+	dev->ethtool_ops = &qtnf_ethtool_ops;
 
 	qdev_vif = netdev_priv(dev);
 	*((void **)qdev_vif) = vif;
@@ -366,6 +451,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
 		}
 		rtnl_unlock();
 		qtnf_sta_list_free(&vif->sta_list);
+		free_percpu(vif->stats64);
 	}
 
 	if (mac->wiphy_registered)
@@ -382,8 +468,9 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
 		wiphy->bands[band] = NULL;
 	}
 
-	kfree(mac->macinfo.limits);
-	kfree(wiphy->iface_combinations);
+	qtnf_mac_iface_comb_free(mac);
+	kfree(mac->macinfo.extended_capabilities);
+	kfree(mac->macinfo.extended_capabilities_mask);
 	wiphy_free(wiphy);
 	bus->mac[macid] = NULL;
 }
@@ -418,7 +505,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
 		goto error;
 	}
 
-	ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr);
+	ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr);
 	if (ret) {
 		pr_err("MAC%u: failed to add VIF\n", macid);
 		goto error;
@@ -446,8 +533,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
 
 	rtnl_lock();
 
-	ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM,
-				   NL80211_IFTYPE_AP);
+	ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM);
 	rtnl_unlock();
 
 	if (ret) {
@@ -644,6 +730,46 @@ void qtnf_wake_all_queues(struct net_device *ndev)
 }
 EXPORT_SYMBOL_GPL(qtnf_wake_all_queues);
 
+void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb)
+{
+	struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+	struct pcpu_sw_netstats *stats64;
+
+	if (unlikely(!vif || !vif->stats64)) {
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += skb->len;
+		return;
+	}
+
+	stats64 = this_cpu_ptr(vif->stats64);
+
+	u64_stats_update_begin(&stats64->syncp);
+	stats64->rx_packets++;
+	stats64->rx_bytes += skb->len;
+	u64_stats_update_end(&stats64->syncp);
+}
+EXPORT_SYMBOL_GPL(qtnf_update_rx_stats);
+
+void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
+{
+	struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+	struct pcpu_sw_netstats *stats64;
+
+	if (unlikely(!vif || !vif->stats64)) {
+		ndev->stats.tx_packets++;
+		ndev->stats.tx_bytes += skb->len;
+		return;
+	}
+
+	stats64 = this_cpu_ptr(vif->stats64);
+
+	u64_stats_update_begin(&stats64->syncp);
+	stats64->tx_packets++;
+	stats64->tx_bytes += skb->len;
+	u64_stats_update_end(&stats64->syncp);
+}
+EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
+
 MODULE_AUTHOR("Quantenna Communications");
 MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 1b7bc0318f3e..3b884c80b6ab 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -88,6 +88,9 @@ struct qtnf_vif {
 	struct work_struct reset_work;
 	struct qtnf_sta_list sta_list;
 	unsigned long cons_tx_timeout_cnt;
+	int generation;
+
+	struct pcpu_sw_netstats __percpu *stats64;
 };
 
 struct qtnf_mac_info {
@@ -102,10 +105,14 @@ struct qtnf_mac_info {
 	u8 sretry_limit;
 	u8 coverage_class;
 	u8 radar_detect_widths;
+	u32 max_acl_mac_addrs;
 	struct ieee80211_ht_cap ht_cap_mod_mask;
 	struct ieee80211_vht_cap vht_cap_mod_mask;
-	struct ieee80211_iface_limit *limits;
-	size_t n_limits;
+	struct ieee80211_iface_combination *if_comb;
+	size_t n_if_comb;
+	u8 *extended_capabilities;
+	u8 *extended_capabilities_mask;
+	u8 extended_capabilities_len;
 };
 
 struct qtnf_chan_stats {
@@ -126,7 +133,7 @@ struct qtnf_wmac {
 	struct qtnf_vif iflist[QTNF_MAX_INTF];
 	struct cfg80211_scan_request *scan_req;
 	struct mutex mac_lock;	/* lock during wmac speicific ops */
-	struct timer_list scan_timeout;
+	struct delayed_work scan_timeout;
 };
 
 struct qtnf_hw_info {
@@ -138,14 +145,16 @@ struct qtnf_hw_info {
 	struct ieee80211_regdomain *rd;
 	u8 total_tx_chain;
 	u8 total_rx_chain;
+	char fw_version[ETHTOOL_FWVERS_LEN];
+	u32 hw_version;
 };
 
 struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
 struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
+void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
 struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
 int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
-			 const char *name, unsigned char name_assign_type,
-			 enum nl80211_iftype iftype);
+			 const char *name, unsigned char name_assign_type);
 void qtnf_main_work_queue(struct work_struct *work);
 int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
 int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
@@ -153,9 +162,13 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
 struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
 struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
 void qtnf_wake_all_queues(struct net_device *ndev);
+void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb);
+void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb);
+
 void qtnf_virtual_intf_cleanup(struct net_device *ndev);
 
 void qtnf_netdev_updown(struct net_device *ndev, bool up);
+void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
 
 static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
 {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 4abc6d9ed560..bcd415f96412 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -59,10 +59,11 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
 	pr_debug("VIF%u.%u: MAC:%pM FC:%x\n", mac->macid, vif->vifid, sta_addr,
 		 frame_control);
 
-	qtnf_sta_list_add(&vif->sta_list, sta_addr);
+	qtnf_sta_list_add(vif, sta_addr);
 
 	sinfo.assoc_req_ies = NULL;
 	sinfo.assoc_req_ies_len = 0;
+	sinfo.generation = vif->generation;
 
 	payload_len = len - sizeof(*sta_assoc);
 	tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
@@ -132,7 +133,7 @@ qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif,
 	pr_debug("VIF%u.%u: MAC:%pM reason:%x\n", mac->macid, vif->vifid,
 		 sta_addr, reason);
 
-	if (qtnf_sta_list_del(&vif->sta_list, sta_addr))
+	if (qtnf_sta_list_del(vif, sta_addr))
 		cfg80211_del_sta(vif->netdev, sta_deauth->sta_addr,
 				 GFP_KERNEL);
 
@@ -237,9 +238,8 @@ qtnf_event_handle_mgmt_received(struct qtnf_vif *vif,
 	pr_debug("%s LEN:%u FC:%.4X SA:%pM\n", vif->netdev->name, frame_len,
 		 le16_to_cpu(frame->frame_control), frame->addr2);
 
-	cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq),
-			 le32_to_cpu(rxmgmt->sig_dbm), rxmgmt->frame_data,
-			 frame_len, flags);
+	cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq), rxmgmt->sig_dbm,
+			 rxmgmt->frame_data, frame_len, flags);
 
 	return 0;
 }
@@ -324,7 +324,7 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
 				  sr->bssid, get_unaligned_le64(&sr->tsf),
 				  le16_to_cpu(sr->capab),
 				  le16_to_cpu(sr->bintval), ies, ies_len,
-				  sr->signal, GFP_KERNEL);
+				  DBM_TO_MBM(sr->sig_dbm), GFP_KERNEL);
 	if (!bss)
 		return -ENOMEM;
 
@@ -369,7 +369,8 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
 	qlink_chandef_q2cfg(wiphy, &data->chan, &chandef);
 
 	if (!cfg80211_chandef_valid(&chandef)) {
-		pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", mac->macid,
+		pr_err("MAC%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+		       mac->macid, chandef.chan->center_freq,
 		       chandef.center_freq1, chandef.center_freq2,
 		       chandef.width);
 		return -EINVAL;
@@ -394,6 +395,63 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
 	return 0;
 }
 
+static int qtnf_event_handle_radar(struct qtnf_vif *vif,
+				   const struct qlink_event_radar *ev,
+				   u16 len)
+{
+	struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+	struct cfg80211_chan_def chandef;
+
+	if (len < sizeof(*ev)) {
+		pr_err("MAC%u: payload is too short\n", vif->mac->macid);
+		return -EINVAL;
+	}
+
+	if (!wiphy->registered || !vif->netdev)
+		return 0;
+
+	qlink_chandef_q2cfg(wiphy, &ev->chan, &chandef);
+
+	if (!cfg80211_chandef_valid(&chandef)) {
+		pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n",
+		       vif->mac->macid,
+		       chandef.center_freq1, chandef.center_freq2,
+		       chandef.width);
+		return -EINVAL;
+	}
+
+	pr_info("%s: radar event=%u f1=%u f2=%u bw=%u\n",
+		vif->netdev->name, ev->event,
+		chandef.center_freq1, chandef.center_freq2,
+		chandef.width);
+
+	switch (ev->event) {
+	case QLINK_RADAR_DETECTED:
+		cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
+		break;
+	case QLINK_RADAR_CAC_FINISHED:
+		if (!vif->wdev.cac_started)
+			break;
+
+		cfg80211_cac_event(vif->netdev, &chandef,
+				   NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+		break;
+	case QLINK_RADAR_CAC_ABORTED:
+		if (!vif->wdev.cac_started)
+			break;
+
+		cfg80211_cac_event(vif->netdev, &chandef,
+				   NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+		break;
+	default:
+		pr_warn("%s: unhandled radar event %u\n",
+			vif->netdev->name, ev->event);
+		break;
+	}
+
+	return 0;
+}
+
 static int qtnf_event_parse(struct qtnf_wmac *mac,
 			    const struct sk_buff *event_skb)
 {
@@ -448,6 +506,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
 		ret = qtnf_event_handle_freq_change(mac, (const void *)event,
 						    event_len);
 		break;
+	case QLINK_EVENT_RADAR:
+		ret = qtnf_event_handle_radar(vif, (const void *)event,
+					      event_len);
+		break;
 	default:
 		pr_warn("unknown event type: %x\n", event_id);
 		break;
@@ -479,9 +541,9 @@ static int qtnf_event_process_skb(struct qtnf_bus *bus,
 	if (unlikely(!mac))
 		return -ENXIO;
 
-	qtnf_bus_lock(bus);
+	rtnl_lock();
 	res = qtnf_event_parse(mac, skb);
-	qtnf_bus_unlock(bus);
+	rtnl_unlock();
 
 	return res;
 }
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 7e487622d87d..6f6190964320 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -615,8 +615,7 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
 					 PCI_DMA_TODEVICE);
 
 			if (skb->dev) {
-				skb->dev->stats.tx_packets++;
-				skb->dev->stats.tx_bytes += skb->len;
+				qtnf_update_tx_stats(skb->dev, skb);
 				if (unlikely(priv->tx_stopped)) {
 					qtnf_wake_all_queues(skb->dev);
 					priv->tx_stopped = 0;
@@ -855,9 +854,7 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget)
 			skb_put(skb, psize);
 			ndev = qtnf_classify_skb(bus, skb);
 			if (likely(ndev)) {
-				ndev->stats.rx_packets++;
-				ndev->stats.rx_bytes += skb->len;
-
+				qtnf_update_rx_stats(ndev, skb);
 				skb->protocol = eth_type_trans(skb, ndev);
 				napi_gro_receive(napi, skb);
 			} else {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index a432fb001c41..9bf3ae4d1b3b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -19,7 +19,7 @@
 
 #include <linux/ieee80211.h>
 
-#define QLINK_PROTO_VER		6
+#define QLINK_PROTO_VER		11
 
 #define QLINK_MACID_RSVD		0xFF
 #define QLINK_VIFID_RSVD		0xFF
@@ -122,17 +122,49 @@ enum qlink_channel_width {
 };
 
 /**
+ * struct qlink_channel - qlink control channel definition
+ *
+ * @hw_value: hardware-specific value for the channel
+ * @center_freq: center frequency in MHz
+ * @flags: channel flags from &enum qlink_channel_flags
+ * @band: band this channel belongs to
+ * @max_antenna_gain: maximum antenna gain in dBi
+ * @max_power: maximum transmission power (in dBm)
+ * @max_reg_power: maximum regulatory transmission power (in dBm)
+ * @dfs_state: current state of this channel.
+ *      Only relevant if radar is required on this channel.
+ * @beacon_found: helper to regulatory code to indicate when a beacon
+ *	has been found on this channel. Use regulatory_hint_found_beacon()
+ *	to enable this, this is useful only on 5 GHz band.
+ */
+struct qlink_channel {
+	__le16 hw_value;
+	__le16 center_freq;
+	__le32 flags;
+	u8 band;
+	u8 max_antenna_gain;
+	u8 max_power;
+	u8 max_reg_power;
+	__le32 dfs_cac_ms;
+	u8 dfs_state;
+	u8 beacon_found;
+	u8 rsvd[2];
+} __packed;
+
+/**
  * struct qlink_chandef - qlink channel definition
  *
+ * @chan: primary channel definition
  * @center_freq1: center frequency of first segment
  * @center_freq2: center frequency of second segment (80+80 only)
  * @width: channel width, one of @enum qlink_channel_width
  */
 struct qlink_chandef {
+	struct qlink_channel chan;
 	__le16 center_freq1;
 	__le16 center_freq2;
 	u8 width;
-	u8 rsvd[3];
+	u8 rsvd;
 } __packed;
 
 #define QLINK_MAX_NR_CIPHER_SUITES            5
@@ -153,6 +185,17 @@ struct qlink_auth_encr {
 	u8 rsvd[2];
 } __packed;
 
+/**
+ * struct qlink_sta_info_state - station flags mask/value
+ *
+ * @mask: STA flags mask, bitmap of &enum qlink_sta_flags
+ * @value: STA flags values, bitmap of &enum qlink_sta_flags
+ */
+struct qlink_sta_info_state {
+	__le32 mask;
+	__le32 value;
+} __packed;
+
 /* QLINK Command messages related definitions
  */
 
@@ -173,6 +216,7 @@ struct qlink_auth_encr {
  * @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This
  *	command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
  *	capability.
+ * @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
  */
 enum qlink_cmd_type {
 	QLINK_CMD_FW_INIT		= 0x0001,
@@ -192,8 +236,10 @@ enum qlink_cmd_type {
 	QLINK_CMD_BAND_INFO_GET		= 0x001A,
 	QLINK_CMD_CHAN_SWITCH		= 0x001B,
 	QLINK_CMD_CHAN_GET		= 0x001C,
+	QLINK_CMD_START_CAC		= 0x001D,
 	QLINK_CMD_START_AP		= 0x0021,
 	QLINK_CMD_STOP_AP		= 0x0022,
+	QLINK_CMD_SET_MAC_ACL		= 0x0023,
 	QLINK_CMD_GET_STA_INFO		= 0x0030,
 	QLINK_CMD_ADD_KEY		= 0x0040,
 	QLINK_CMD_DEL_KEY		= 0x0041,
@@ -368,16 +414,14 @@ struct qlink_cmd_set_def_mgmt_key {
 /**
  * struct qlink_cmd_change_sta - data for QLINK_CMD_CHANGE_STA command
  *
- * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags
- * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags
+ * @flag_update: STA flags to update
  * @if_type: Mode of interface operation, one of &enum qlink_iface_type
  * @vlanid: VLAN ID to assign to specific STA
  * @sta_addr: address of the STA for which parameters are set.
  */
 struct qlink_cmd_change_sta {
 	struct qlink_cmd chdr;
-	__le32 sta_flags_mask;
-	__le32 sta_flags_set;
+	struct qlink_sta_info_state flag_update;
 	__le16 if_type;
 	__le16 vlanid;
 	u8 sta_addr[ETH_ALEN];
@@ -585,6 +629,40 @@ struct qlink_cmd_start_ap {
 	u8 info[0];
 } __packed;
 
+/**
+ * struct qlink_cmd_start_cac - data for QLINK_CMD_START_CAC command
+ *
+ * @chan: a channel to start a radar detection procedure on.
+ * @cac_time_ms: CAC time.
+ */
+struct qlink_cmd_start_cac {
+	struct qlink_cmd chdr;
+	struct qlink_chandef chan;
+	__le32 cac_time_ms;
+} __packed;
+
+enum qlink_acl_policy {
+	QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED,
+	QLINK_ACL_POLICY_DENY_UNLESS_LISTED,
+};
+
+struct qlink_mac_address {
+	u8 addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_acl_data - ACL data
+ *
+ * @policy: filter policy, one of &enum qlink_acl_policy.
+ * @num_entries: number of MAC addresses in array.
+ * @mac_address: MAC addresses array.
+ */
+struct qlink_acl_data {
+	__le32 policy;
+	__le32 num_entries;
+	struct qlink_mac_address mac_addrs[0];
+} __packed;
+
 /* QLINK Command Responses messages related definitions
  */
 
@@ -646,6 +724,7 @@ struct qlink_resp_get_mac_info {
 	struct ieee80211_ht_cap ht_cap_mod_mask;
 	__le16 max_ap_assoc_sta;
 	__le16 radar_detect_widths;
+	__le32 max_acl_mac_addrs;
 	u8 bands_cap;
 	u8 rsvd[1];
 	u8 var_info[0];
@@ -685,6 +764,9 @@ struct qlink_resp_get_hw_info {
 	struct qlink_resp rhdr;
 	__le32 fw_ver;
 	__le32 hw_capab;
+	__le32 bld_tmstamp;
+	__le32 plat_id;
+	__le32 hw_ver;
 	__le16 ql_proto_ver;
 	u8 num_mac;
 	u8 mac_bitmap;
@@ -709,17 +791,27 @@ struct qlink_resp_manage_intf {
 	struct qlink_intf_info intf_info;
 } __packed;
 
+enum qlink_sta_info_rate_flags {
+	QLINK_STA_INFO_RATE_FLAG_HT_MCS		= BIT(0),
+	QLINK_STA_INFO_RATE_FLAG_VHT_MCS	= BIT(1),
+	QLINK_STA_INFO_RATE_FLAG_SHORT_GI	= BIT(2),
+	QLINK_STA_INFO_RATE_FLAG_60G		= BIT(3),
+};
+
 /**
  * struct qlink_resp_get_sta_info - response for QLINK_CMD_GET_STA_INFO command
  *
  * Response data containing statistics for specified STA.
  *
+ * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
+ *	is valid.
  * @sta_addr: MAC address of STA the response carries statistic for.
- * @info: statistics for specified STA.
+ * @info: variable statistics for specified STA.
  */
 struct qlink_resp_get_sta_info {
 	struct qlink_resp rhdr;
 	u8 sta_addr[ETH_ALEN];
+	u8 rsvd[2];
 	u8 info[0];
 } __packed;
 
@@ -782,6 +874,7 @@ enum qlink_event_type {
 	QLINK_EVENT_BSS_JOIN		= 0x0026,
 	QLINK_EVENT_BSS_LEAVE		= 0x0027,
 	QLINK_EVENT_FREQ_CHANGE		= 0x0028,
+	QLINK_EVENT_RADAR		= 0x0029,
 };
 
 /**
@@ -869,15 +962,16 @@ enum qlink_rxmgmt_flags {
  * struct qlink_event_rxmgmt - data for QLINK_EVENT_MGMT_RECEIVED event
  *
  * @freq: Frequency on which the frame was received in MHz.
- * @sig_dbm: signal strength in dBm.
  * @flags: bitmap of &enum qlink_rxmgmt_flags.
+ * @sig_dbm: signal strength in dBm.
  * @frame_data: data of Rx'd frame itself.
  */
 struct qlink_event_rxmgmt {
 	struct qlink_event ehdr;
 	__le32 freq;
-	__le32 sig_dbm;
 	__le32 flags;
+	s8 sig_dbm;
+	u8 rsvd[3];
 	u8 frame_data[0];
 } __packed;
 
@@ -889,7 +983,7 @@ struct qlink_event_rxmgmt {
  *	event was generated was discovered.
  * @capab: capabilities field.
  * @bintval: beacon interval announced by discovered BSS.
- * @signal: signal strength.
+ * @sig_dbm: signal strength in dBm.
  * @bssid: BSSID announced by discovered BSS.
  * @ssid_len: length of SSID announced by BSS.
  * @ssid: SSID announced by discovered BSS.
@@ -901,7 +995,7 @@ struct qlink_event_scan_result {
 	__le16 freq;
 	__le16 capab;
 	__le16 bintval;
-	s8 signal;
+	s8 sig_dbm;
 	u8 ssid_len;
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
 	u8 bssid[ETH_ALEN];
@@ -931,9 +1025,39 @@ struct qlink_event_scan_complete {
 	__le32 flags;
 } __packed;
 
+enum qlink_radar_event {
+	QLINK_RADAR_DETECTED,
+	QLINK_RADAR_CAC_FINISHED,
+	QLINK_RADAR_CAC_ABORTED,
+	QLINK_RADAR_NOP_FINISHED,
+	QLINK_RADAR_PRE_CAC_EXPIRED,
+};
+
+/**
+ * struct qlink_event_radar - data for QLINK_EVENT_RADAR event
+ *
+ * @chan: channel on which radar event happened.
+ * @event: radar event type, one of &enum qlink_radar_event.
+ */
+struct qlink_event_radar {
+	struct qlink_event ehdr;
+	struct qlink_chandef chan;
+	u8 event;
+	u8 rsvd[3];
+} __packed;
+
 /* QLINK TLVs (Type-Length Values) definitions
  */
 
+/**
+ * enum qlink_tlv_id - list of TLVs that Qlink messages can carry
+ *
+ * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
+ *	indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
+ *	&struct qlink_sta_stats. Valid values are marked as such in a bitmap
+ *	carried by QTN_TLV_ID_STA_STATS_MAP.
+ */
 enum qlink_tlv_id {
 	QTN_TLV_ID_FRAG_THRESH		= 0x0201,
 	QTN_TLV_ID_RTS_THRESH		= 0x0202,
@@ -942,15 +1066,24 @@ enum qlink_tlv_id {
 	QTN_TLV_ID_REG_RULE		= 0x0207,
 	QTN_TLV_ID_CHANNEL		= 0x020F,
 	QTN_TLV_ID_CHANDEF		= 0x0210,
+	QTN_TLV_ID_STA_STATS_MAP	= 0x0211,
+	QTN_TLV_ID_STA_STATS		= 0x0212,
 	QTN_TLV_ID_COVERAGE_CLASS	= 0x0213,
 	QTN_TLV_ID_IFACE_LIMIT		= 0x0214,
 	QTN_TLV_ID_NUM_IFACE_COMB	= 0x0215,
 	QTN_TLV_ID_CHANNEL_STATS	= 0x0216,
-	QTN_TLV_ID_STA_BASIC_COUNTERS	= 0x0300,
-	QTN_TLV_ID_STA_GENERIC_INFO	= 0x0301,
 	QTN_TLV_ID_KEY			= 0x0302,
 	QTN_TLV_ID_SEQ			= 0x0303,
 	QTN_TLV_ID_IE_SET		= 0x0305,
+	QTN_TLV_ID_EXT_CAPABILITY_MASK	= 0x0306,
+	QTN_TLV_ID_ACL_DATA		= 0x0307,
+	QTN_TLV_ID_BUILD_NAME		= 0x0401,
+	QTN_TLV_ID_BUILD_REV		= 0x0402,
+	QTN_TLV_ID_BUILD_TYPE		= 0x0403,
+	QTN_TLV_ID_BUILD_LABEL		= 0x0404,
+	QTN_TLV_ID_HW_ID		= 0x0405,
+	QTN_TLV_ID_CALIBRATION_VER	= 0x0406,
+	QTN_TLV_ID_UBOOT_VER		= 0x0407,
 };
 
 struct qlink_tlv_hdr {
@@ -959,76 +1092,24 @@ struct qlink_tlv_hdr {
 	u8 val[0];
 } __packed;
 
-struct qlink_iface_limit {
-	__le16 max_num;
-	__le16 type;
-} __packed;
-
 struct qlink_iface_comb_num {
-	__le16 iface_comb_num;
+	__le32 iface_comb_num;
 } __packed;
 
-struct qlink_sta_stat_basic_counters {
-	__le64 rx_bytes;
-	__le64 tx_bytes;
-	__le64 rx_beacons;
-	__le32 rx_packets;
-	__le32 tx_packets;
-	__le32 rx_dropped;
-	__le32 tx_failed;
-} __packed;
-
-enum qlink_sta_info_rate_flags {
-	QLINK_STA_INFO_RATE_FLAG_INVALID	= 0,
-	QLINK_STA_INFO_RATE_FLAG_HT_MCS		= BIT(0),
-	QLINK_STA_INFO_RATE_FLAG_VHT_MCS	= BIT(1),
-	QLINK_STA_INFO_RATE_FLAG_SHORT_GI	= BIT(2),
-	QLINK_STA_INFO_RATE_FLAG_60G		= BIT(3),
-};
-
-enum qlink_sta_info_rate_bw {
-	QLINK_STA_INFO_RATE_BW_5		= 0,
-	QLINK_STA_INFO_RATE_BW_10		= 1,
-	QLINK_STA_INFO_RATE_BW_20		= 2,
-	QLINK_STA_INFO_RATE_BW_40		= 3,
-	QLINK_STA_INFO_RATE_BW_80		= 4,
-	QLINK_STA_INFO_RATE_BW_160		= 5,
-};
-
-/**
- * struct qlink_sta_info_rate - STA rate statistics
- *
- * @rate: data rate in Mbps.
- * @flags: bitmap of &enum qlink_sta_flags.
- * @mcs: 802.11-defined MCS index.
- * nss: Number of Spatial Streams.
- * @bw: bandwidth, one of &enum qlink_sta_info_rate_bw.
- */
-struct qlink_sta_info_rate {
-	__le16 rate;
-	u8 flags;
-	u8 mcs;
-	u8 nss;
-	u8 bw;
+struct qlink_iface_limit {
+	__le16 max_num;
+	__le16 type;
 } __packed;
 
-struct qlink_sta_info_state {
-	__le32 mask;
-	__le32 value;
+struct qlink_iface_limit_record {
+	__le16 max_interfaces;
+	u8 num_different_channels;
+	u8 n_limits;
+	struct qlink_iface_limit limits[0];
 } __packed;
 
 #define QLINK_RSSI_OFFSET	120
 
-struct qlink_sta_info_generic {
-	struct qlink_sta_info_state state;
-	__le32 connected_time;
-	__le32 inactive_time;
-	struct qlink_sta_info_rate rx_rate;
-	struct qlink_sta_info_rate tx_rate;
-	u8 rssi;
-	u8 rssi_avg;
-} __packed;
-
 struct qlink_tlv_frag_rts_thr {
 	struct qlink_tlv_hdr hdr;
 	__le16 thr;
@@ -1113,19 +1194,16 @@ enum qlink_dfs_state {
 	QLINK_DFS_AVAILABLE,
 };
 
+/**
+ * struct qlink_tlv_channel - data for QTN_TLV_ID_CHANNEL TLV
+ *
+ * Channel settings.
+ *
+ * @channel: ieee80211 channel settings.
+ */
 struct qlink_tlv_channel {
 	struct qlink_tlv_hdr hdr;
-	__le16 hw_value;
-	__le16 center_freq;
-	__le32 flags;
-	u8 band;
-	u8 max_antenna_gain;
-	u8 max_power;
-	u8 max_reg_power;
-	__le32 dfs_cac_ms;
-	u8 dfs_state;
-	u8 beacon_found;
-	u8 rsvd[2];
+	struct qlink_channel chan;
 } __packed;
 
 /**
@@ -1137,7 +1215,7 @@ struct qlink_tlv_channel {
  */
 struct qlink_tlv_chandef {
 	struct qlink_tlv_hdr hdr;
-	struct qlink_chandef chan;
+	struct qlink_chandef chdef;
 } __packed;
 
 enum qlink_ie_set_type {
@@ -1176,4 +1254,105 @@ struct qlink_chan_stats {
 	s8 chan_noise;
 } __packed;
 
+/**
+ * enum qlink_sta_info - station information bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_sta_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_STA_STATS_MAP.
+ *
+ * @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
+ * @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
+ * @QLINK_STA_INFO_RX_BYTES: lower 32 bits of rx_bytes value are valid.
+ * @QLINK_STA_INFO_TX_BYTES: lower 32 bits of tx_bytes value are valid.
+ * @QLINK_STA_INFO_RX_BYTES64: rx_bytes value is valid.
+ * @QLINK_STA_INFO_TX_BYTES64: tx_bytes value is valid.
+ * @QLINK_STA_INFO_RX_DROP_MISC: rx_dropped_misc value is valid.
+ * @QLINK_STA_INFO_BEACON_RX: rx_beacon value is valid.
+ * @QLINK_STA_INFO_SIGNAL: signal value is valid.
+ * @QLINK_STA_INFO_SIGNAL_AVG: signal_avg value is valid.
+ * @QLINK_STA_INFO_RX_BITRATE: rxrate value is valid.
+ * @QLINK_STA_INFO_TX_BITRATE: txrate value is valid.
+ * @QLINK_STA_INFO_RX_PACKETS: rx_packets value is valid.
+ * @QLINK_STA_INFO_TX_PACKETS: tx_packets value is valid.
+ * @QLINK_STA_INFO_TX_RETRIES: tx_retries value is valid.
+ * @QLINK_STA_INFO_TX_FAILED: tx_failed value is valid.
+ * @QLINK_STA_INFO_STA_FLAGS: sta_flags value is valid.
+ */
+enum qlink_sta_info {
+	QLINK_STA_INFO_CONNECTED_TIME,
+	QLINK_STA_INFO_INACTIVE_TIME,
+	QLINK_STA_INFO_RX_BYTES,
+	QLINK_STA_INFO_TX_BYTES,
+	QLINK_STA_INFO_RX_BYTES64,
+	QLINK_STA_INFO_TX_BYTES64,
+	QLINK_STA_INFO_RX_DROP_MISC,
+	QLINK_STA_INFO_BEACON_RX,
+	QLINK_STA_INFO_SIGNAL,
+	QLINK_STA_INFO_SIGNAL_AVG,
+	QLINK_STA_INFO_RX_BITRATE,
+	QLINK_STA_INFO_TX_BITRATE,
+	QLINK_STA_INFO_RX_PACKETS,
+	QLINK_STA_INFO_TX_PACKETS,
+	QLINK_STA_INFO_TX_RETRIES,
+	QLINK_STA_INFO_TX_FAILED,
+	QLINK_STA_INFO_STA_FLAGS,
+	QLINK_STA_INFO_NUM,
+};
+
+/**
+ * struct qlink_sta_info_rate - STA rate statistics
+ *
+ * @rate: data rate in Mbps.
+ * @flags: bitmap of &enum qlink_sta_info_rate_flags.
+ * @mcs: 802.11-defined MCS index.
+ * nss: Number of Spatial Streams.
+ * @bw: bandwidth, one of &enum qlink_channel_width.
+ */
+struct qlink_sta_info_rate {
+	__le16 rate;
+	u8 flags;
+	u8 mcs;
+	u8 nss;
+	u8 bw;
+} __packed;
+
+/**
+ * struct qlink_sta_stats - data for QTN_TLV_ID_STA_STATS
+ *
+ * Carries statistics of a STA. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_sta_info. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_STA_STATS_MAP.
+ */
+struct qlink_sta_stats {
+	__le64 rx_bytes;
+	__le64 tx_bytes;
+	__le64 rx_beacon;
+	__le64 rx_duration;
+	__le64 t_offset;
+	__le32 connected_time;
+	__le32 inactive_time;
+	__le32 rx_packets;
+	__le32 tx_packets;
+	__le32 tx_retries;
+	__le32 tx_failed;
+	__le32 rx_dropped_misc;
+	__le32 beacon_loss_count;
+	__le32 expected_throughput;
+	struct qlink_sta_info_state sta_flags;
+	struct qlink_sta_info_rate txrate;
+	struct qlink_sta_info_rate rxrate;
+	__le16 llid;
+	__le16 plid;
+	u8 local_pm;
+	u8 peer_pm;
+	u8 nonpeer_pm;
+	u8 rx_beacon_signal_avg;
+	u8 plink_state;
+	u8 signal;
+	u8 signal_avg;
+	u8 rsvd[1];
+};
+
 #endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 61d999affb09..aeeda81b09ea 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -100,34 +100,6 @@ static enum nl80211_chan_width qlink_chanwidth_to_nl(u8 qlw)
 	}
 }
 
-void qlink_chandef_q2cfg(struct wiphy *wiphy,
-			 const struct qlink_chandef *qch,
-			 struct cfg80211_chan_def *chdef)
-{
-	chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
-	chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
-	chdef->width = qlink_chanwidth_to_nl(qch->width);
-
-	switch (chdef->width) {
-	case NL80211_CHAN_WIDTH_20_NOHT:
-	case NL80211_CHAN_WIDTH_20:
-	case NL80211_CHAN_WIDTH_5:
-	case NL80211_CHAN_WIDTH_10:
-		chdef->chan = ieee80211_get_channel(wiphy, chdef->center_freq1);
-		break;
-	case NL80211_CHAN_WIDTH_40:
-	case NL80211_CHAN_WIDTH_80:
-	case NL80211_CHAN_WIDTH_80P80:
-	case NL80211_CHAN_WIDTH_160:
-		chdef->chan = ieee80211_get_channel(wiphy,
-						    chdef->center_freq1 - 10);
-		break;
-	default:
-		chdef->chan = NULL;
-		break;
-	}
-}
-
 static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
 {
 	switch (nlwidth) {
@@ -152,9 +124,29 @@ static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
 	}
 }
 
+void qlink_chandef_q2cfg(struct wiphy *wiphy,
+			 const struct qlink_chandef *qch,
+			 struct cfg80211_chan_def *chdef)
+{
+	struct ieee80211_channel *chan;
+
+	chan = ieee80211_get_channel(wiphy, le16_to_cpu(qch->chan.center_freq));
+
+	chdef->chan = chan;
+	chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
+	chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
+	chdef->width = qlink_chanwidth_to_nl(qch->width);
+}
+
 void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
 			 struct qlink_chandef *qch)
 {
+	struct ieee80211_channel *chan = chdef->chan;
+
+	qch->chan.hw_value = cpu_to_le16(chan->hw_value);
+	qch->chan.center_freq = cpu_to_le16(chan->center_freq);
+	qch->chan.flags = cpu_to_le32(chan->flags);
+
 	qch->center_freq1 = cpu_to_le16(chdef->center_freq1);
 	qch->center_freq2 = cpu_to_le16(chdef->center_freq2);
 	qch->width = qlink_chanwidth_nl_to_qlink(chdef->width);
@@ -172,3 +164,33 @@ enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val)
 		return QLINK_HIDDEN_SSID_NOT_IN_USE;
 	}
 }
+
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+			   unsigned int arr_max_len)
+{
+	unsigned int idx = bit / BITS_PER_BYTE;
+	u8 mask = 1 << (bit - (idx * BITS_PER_BYTE));
+
+	if (idx >= arr_max_len)
+		return false;
+
+	return arr[idx] & mask;
+}
+
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+			  struct qlink_acl_data *qacl)
+{
+	switch (acl->acl_policy) {
+	case NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED:
+		qacl->policy =
+			cpu_to_le32(QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED);
+		break;
+	case NL80211_ACL_POLICY_DENY_UNLESS_LISTED:
+		qacl->policy = cpu_to_le32(QLINK_ACL_POLICY_DENY_UNLESS_LISTED);
+		break;
+	}
+
+	qacl->num_entries = cpu_to_le32(acl->n_acl_entries);
+	memcpy(qacl->mac_addrs, acl->mac_addrs,
+	       acl->n_acl_entries * sizeof(*qacl->mac_addrs));
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 260383d6d109..54caeb38917c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -69,5 +69,9 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy,
 void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
 			 struct qlink_chandef *qch);
 enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val);
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+			   unsigned int arr_max_len);
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+			  struct qlink_acl_data *qacl);
 
 #endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index ed38e87471bf..e745733ba417 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -57,9 +57,10 @@ struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
 	return NULL;
 }
 
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
 					const u8 *mac)
 {
+	struct qtnf_sta_list *list = &vif->sta_list;
 	struct qtnf_sta_node *node;
 
 	if (unlikely(!mac))
@@ -77,13 +78,15 @@ struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
 	ether_addr_copy(node->mac_addr, mac);
 	list_add_tail(&node->list, &list->head);
 	atomic_inc(&list->size);
+	++vif->generation;
 
 done:
 	return node;
 }
 
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac)
 {
+	struct qtnf_sta_list *list = &vif->sta_list;
 	struct qtnf_sta_node *node;
 	bool ret = false;
 
@@ -93,6 +96,7 @@ bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
 		list_del(&node->list);
 		atomic_dec(&list->size);
 		kfree(node);
+		++vif->generation;
 		ret = true;
 	}
 
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index 0359eae8c24b..0d4d92b11540 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -26,9 +26,9 @@ struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
 					   const u8 *mac);
 struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
 						 size_t index);
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
 					const u8 *mac);
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac);
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac);
 
 void qtnf_sta_list_free(struct qtnf_sta_list *list);
 
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index d2c289446c00..429d07b651dd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4903,7 +4903,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
 		min_sleep = 2000;
 		break;
 	default:
-		WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
+		WARN_ONCE(1, "Not supported RF chipset %x for VCO recalibration",
 			  rt2x00dev->chip.rf);
 		return;
 	}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index ecc96312a370..a971bc7a6b63 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -142,32 +142,28 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
 	if (!rt2x00dev->ops->hw->set_rts_threshold &&
 	    (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
 						IEEE80211_TX_RC_USE_CTS_PROTECT))) {
-		if (rt2x00queue_available(queue) <= 1)
-			goto exit_fail;
+		if (rt2x00queue_available(queue) <= 1) {
+			/*
+			 * Recheck for full queue under lock to avoid race
+			 * conditions with rt2x00lib_txdone().
+			 */
+			spin_lock(&queue->tx_lock);
+			if (rt2x00queue_threshold(queue))
+				rt2x00queue_pause_queue(queue);
+			spin_unlock(&queue->tx_lock);
+
+			goto exit_free_skb;
+		}
 
 		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
-			goto exit_fail;
+			goto exit_free_skb;
 	}
 
 	if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
-		goto exit_fail;
-
-	/*
-	 * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
-	 * we should not use spin_lock_bh variant as bottom halve was already
-	 * disabled before ieee80211_xmit() call.
-	 */
-	spin_lock(&queue->tx_lock);
-	if (rt2x00queue_threshold(queue))
-		rt2x00queue_pause_queue(queue);
-	spin_unlock(&queue->tx_lock);
+		goto exit_free_skb;
 
 	return;
 
- exit_fail:
-	spin_lock(&queue->tx_lock);
-	rt2x00queue_pause_queue(queue);
-	spin_unlock(&queue->tx_lock);
  exit_free_skb:
 	ieee80211_free_txskb(hw, skb);
 }
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index a2c1ca5c76d1..a6884e73d2ab 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -715,6 +715,14 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
 	rt2x00queue_kick_tx_queue(queue, &txdesc);
 
 out:
+	/*
+	 * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
+	 * do this under queue->tx_lock. Bottom halve was already disabled
+	 * before ieee80211_xmit() call.
+	 */
+	if (rt2x00queue_threshold(queue))
+		rt2x00queue_pause_queue(queue);
+
 	spin_unlock(&queue->tx_lock);
 	return ret;
 }
@@ -1244,10 +1252,8 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
 	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
 
 	queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
-	if (!queue) {
-		rt2x00_err(rt2x00dev, "Queue allocation failed\n");
+	if (!queue)
 		return -ENOMEM;
-	}
 
 	/*
 	 * Initialize pointers
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index e6668ffb77e6..ff0971f1e2c8 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -688,10 +688,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
 	cck_power = priv->channels[channel - 1].hw_value & 0xF;
 	ofdm_power = priv->channels[channel - 1].hw_value >> 4;
 
-	if (cck_power > 15)
-		cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22;
-	else
-		cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
+	cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
 	cck_power += priv->txpwr_base & 0xF;
 	cck_power = min(cck_power, (u8)35);
 
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 95e3993d8a33..8828baf26e7b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1172,7 +1172,7 @@ struct rtl8723bu_c2h {
 
 			u8 basic_rate:1;
 			u8 bt_has_reset:1;
-			u8 dummy4_1:1;;
+			u8 dummy4_1:1;
 			u8 ignore_wlan:1;
 			u8 auto_report:1;
 			u8 dummy4_2:3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index cad2272ae21b..d6c03bd5cc65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -244,7 +244,8 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw,
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
 
-	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE ||
+	    rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
 		u16 mcs_map;
 
 		vht_cap->vht_supported = true;
@@ -395,6 +396,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
 	ieee80211_hw_set(hw, CONNECTION_MONITOR);
 	ieee80211_hw_set(hw, MFP_CAPABLE);
 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
 
 	/* swlps or hwlps has been set in diff chip in init_sw_vars */
 	if (rtlpriv->psc.swctrl_lps) {
@@ -551,7 +553,8 @@ int rtl_init_core(struct ieee80211_hw *hw)
 
 	/* <4> locks */
 	mutex_init(&rtlpriv->locks.conf_mutex);
-	spin_lock_init(&rtlpriv->locks.ips_lock);
+	mutex_init(&rtlpriv->locks.ips_mutex);
+	mutex_init(&rtlpriv->locks.lps_mutex);
 	spin_lock_init(&rtlpriv->locks.irq_th_lock);
 	spin_lock_init(&rtlpriv->locks.h2c_lock);
 	spin_lock_init(&rtlpriv->locks.rf_ps_lock);
@@ -561,9 +564,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
 	spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
 	spin_lock_init(&rtlpriv->locks.scan_list_lock);
 	spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
-	spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
 	spin_lock_init(&rtlpriv->locks.fw_ps_lock);
-	spin_lock_init(&rtlpriv->locks.lps_lock);
 	spin_lock_init(&rtlpriv->locks.iqk_lock);
 	/* <5> init list */
 	INIT_LIST_HEAD(&rtlpriv->entry_list);
@@ -697,14 +698,94 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
 	}
 }
 
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+			    enum wireless_mode wirelessmode)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &rtlpriv->phy;
+	u8 ret = 0;
+
+	switch (rate_index) {
+	case RATR_INX_WIRELESS_NGB:
+		if (rtlphy->rf_type == RF_1T1R)
+			ret = RATEID_IDX_BGN_40M_1SS;
+		else
+			ret = RATEID_IDX_BGN_40M_2SS;
+		; break;
+	case RATR_INX_WIRELESS_N:
+	case RATR_INX_WIRELESS_NG:
+		if (rtlphy->rf_type == RF_1T1R)
+			ret = RATEID_IDX_GN_N1SS;
+		else
+			ret = RATEID_IDX_GN_N2SS;
+		; break;
+	case RATR_INX_WIRELESS_NB:
+		if (rtlphy->rf_type == RF_1T1R)
+			ret = RATEID_IDX_BGN_20M_1SS_BN;
+		else
+			ret = RATEID_IDX_BGN_20M_2SS_BN;
+		; break;
+	case RATR_INX_WIRELESS_GB:
+		ret = RATEID_IDX_BG;
+		break;
+	case RATR_INX_WIRELESS_G:
+		ret = RATEID_IDX_G;
+		break;
+	case RATR_INX_WIRELESS_B:
+		ret = RATEID_IDX_B;
+		break;
+	case RATR_INX_WIRELESS_MC:
+		if (wirelessmode == WIRELESS_MODE_B ||
+		    wirelessmode == WIRELESS_MODE_G ||
+		    wirelessmode == WIRELESS_MODE_N_24G ||
+		    wirelessmode == WIRELESS_MODE_AC_24G)
+			ret = RATEID_IDX_BG;
+		else
+			ret = RATEID_IDX_G;
+		break;
+	case RATR_INX_WIRELESS_AC_5N:
+		if (rtlphy->rf_type == RF_1T1R)
+			ret = RATEID_IDX_VHT_1SS;
+		else
+			ret = RATEID_IDX_VHT_2SS;
+		break;
+	case RATR_INX_WIRELESS_AC_24N:
+		if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+			if (rtlphy->rf_type == RF_1T1R)
+				ret = RATEID_IDX_VHT_1SS;
+			else
+				ret = RATEID_IDX_VHT_2SS;
+		} else {
+			if (rtlphy->rf_type == RF_1T1R)
+				ret = RATEID_IDX_MIX1;
+			else
+				ret = RATEID_IDX_MIX2;
+		}
+		break;
+	default:
+		ret = RATEID_IDX_BGN_40M_2SS;
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(rtl_mrate_idx_to_arfr_id);
+
 static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
 				   struct ieee80211_sta *sta,
 				   struct rtl_tcb_desc *tcb_desc)
 {
+#define SET_RATE_ID(rate_id)					\
+	({typeof(rate_id) _id = rate_id;			\
+	  ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ?	\
+		rtl_mrate_idx_to_arfr_id(hw, _id,		\
+			(sta_entry ? sta_entry->wireless_mode :	\
+			 WIRELESS_MODE_G)) :			\
+		_id); })
+
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_sta_info *sta_entry = NULL;
-	u8 ratr_index = 7;
+	u8 ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
 
 	if (sta) {
 		sta_entry = (struct rtl_sta_info *) sta->drv_priv;
@@ -719,7 +800,8 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
 				tcb_desc->hw_rate =
 				    rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
 				tcb_desc->use_driver_rate = 1;
-				tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+				tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_MC);
 			} else {
 				tcb_desc->ratr_index = ratr_index;
 			}
@@ -735,22 +817,30 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
 		    mac->opmode == NL80211_IFTYPE_MESH_POINT) {
 			tcb_desc->mac_id = 0;
 
-			if (mac->mode == WIRELESS_MODE_AC_5G)
+			if (sta &&
+			    (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID))
+				;	/* use sta_entry->ratr_index */
+			else if (mac->mode == WIRELESS_MODE_AC_5G)
 				tcb_desc->ratr_index =
-					RATR_INX_WIRELESS_AC_5N;
+					SET_RATE_ID(RATR_INX_WIRELESS_AC_5N);
 			else if (mac->mode == WIRELESS_MODE_AC_24G)
 				tcb_desc->ratr_index =
-					RATR_INX_WIRELESS_AC_24N;
+					SET_RATE_ID(RATR_INX_WIRELESS_AC_24N);
 			else if (mac->mode == WIRELESS_MODE_N_24G)
-				tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+				tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_NGB);
 			else if (mac->mode == WIRELESS_MODE_N_5G)
-				tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
+				tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_NG);
 			else if (mac->mode & WIRELESS_MODE_G)
-				tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+				tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_GB);
 			else if (mac->mode & WIRELESS_MODE_B)
-				tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+				tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_B);
 			else if (mac->mode & WIRELESS_MODE_A)
-				tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
+				tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_G);
 
 		} else if (mac->opmode == NL80211_IFTYPE_AP ||
 			mac->opmode == NL80211_IFTYPE_ADHOC) {
@@ -764,6 +854,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
 			}
 		}
 	}
+#undef SET_RATE_ID
 }
 
 static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
@@ -859,8 +950,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
 	struct rtl_phy *rtlphy = &rtlpriv->phy;
 	u8 hw_rate;
 
-	if ((get_rf_type(rtlphy) == RF_2T2R) &&
-	    (sta->ht_cap.mcs.rx_mask[1] != 0))
+	if (get_rf_type(rtlphy) == RF_2T2R &&
+	    sta->ht_cap.mcs.rx_mask[1] != 0)
 		hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
 	else
 		hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1140,9 +1231,19 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
 		      struct ieee80211_sta *sta,
 		      struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
 {
+#define SET_RATE_ID(rate_id)					\
+	({typeof(rate_id) _id = rate_id;			\
+	  ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ?	\
+		rtl_mrate_idx_to_arfr_id(hw, _id,		\
+			(sta_entry ? sta_entry->wireless_mode :	\
+			 WIRELESS_MODE_G)) :			\
+		_id); })
+
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
 	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+	struct rtl_sta_info *sta_entry =
+		(sta ? (struct rtl_sta_info *)sta->drv_priv : NULL);
 
 	__le16 fc = rtl_get_fc(skb);
 
@@ -1165,7 +1266,8 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
 		if (info->control.rates[0].idx == 0 ||
 				ieee80211_is_nullfunc(fc)) {
 			tcb_desc->use_driver_rate = true;
-			tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+			tcb_desc->ratr_index =
+					SET_RATE_ID(RATR_INX_WIRELESS_MC);
 
 			tcb_desc->disable_ratefallback = 1;
 		} else {
@@ -1180,7 +1282,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
 				tcb_desc->hw_rate =
 				_rtl_get_vht_highest_n_rate(hw, sta);
 			} else {
-				if (sta && (sta->ht_cap.ht_supported)) {
+				if (sta && sta->ht_cap.ht_supported) {
 					tcb_desc->hw_rate =
 						_rtl_get_highest_n_rate(hw, sta);
 				} else {
@@ -1207,11 +1309,12 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
 		_rtl_query_protection_mode(hw, tcb_desc, info);
 	} else {
 		tcb_desc->use_driver_rate = true;
-		tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+		tcb_desc->ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
 		tcb_desc->disable_ratefallback = 1;
 		tcb_desc->mac_id = 0;
 		tcb_desc->packet_bw = false;
 	}
+#undef SET_RATE_ID
 }
 EXPORT_SYMBOL(rtl_get_tcb_desc);
 
@@ -1229,7 +1332,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
 	}
 	if (ieee80211_is_auth(fc)) {
 		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
-		rtl_ips_nic_on(hw);
 
 		mac->link_state = MAC80211_LINKING;
 		/* Dul mac */
@@ -1321,6 +1423,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
 				  le16_to_cpu(mgmt->u.action.u.addba_req.capab);
 				tid = (capab &
 				       IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+				if (tid >= MAX_TID_COUNT) {
+					rcu_read_unlock();
+					return true;
+				}
 				tid_data = &sta_entry->tids[tid];
 				if (tid_data->agg.rx_agg_state ==
 				    RTL_RX_AGG_START)
@@ -1726,7 +1832,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
 void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
 {
 	struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
-	u8 reject_agg, ctrl_agg_size = 0, agg_size;
+	u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0;
 
 	if (rtlpriv->cfg->ops->get_btc_status())
 		btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
@@ -1972,9 +2078,9 @@ void rtl_watchdog_wq_callback(void *data)
 		    rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
 			goto label_lps_done;
 
-		if (((rtlpriv->link_info.num_rx_inperiod +
-		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
-		    (rtlpriv->link_info.num_rx_inperiod > 2))
+		if (rtlpriv->link_info.num_rx_inperiod +
+		      rtlpriv->link_info.num_tx_inperiod > 8 ||
+		    rtlpriv->link_info.num_rx_inperiod > 2)
 			rtl_lps_leave(hw);
 		else
 			rtl_lps_enter(hw);
@@ -2225,9 +2331,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
 	case IEEE80211_SMPS_AUTOMATIC:/* 0 */
 	case IEEE80211_SMPS_NUM_MODES:/* 4 */
 		WARN_ON(1);
-	/* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it.
-	 * According to Kernel Code, here is right.
-	 */
+	/* fall through */
 	case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
 		action_frame->u.action.u.ht_smps.smps_control =
 				WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2528,6 +2632,9 @@ static int __init rtl_core_module_init(void)
 	if (rtl_rate_control_register())
 		pr_err("rtl: Unable to register rtl_rc, use default RC !!\n");
 
+	/* add debugfs */
+	rtl_debugfs_add_topdir();
+
 	/* init some global vars */
 	INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
 	spin_lock_init(&rtl_global_var.glb_list_lock);
@@ -2539,6 +2646,9 @@ static void __exit rtl_core_module_exit(void)
 {
 	/*RC*/
 	rtl_rate_control_unregister();
+
+	/* remove debugfs */
+	rtl_debugfs_remove_topdir();
 }
 
 module_init(rtl_core_module_init);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 26735319b38f..acc924635818 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -162,6 +162,8 @@ void rtl_c2hcmd_wq_callback(void *data);
 void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
 void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
 
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+			    enum wireless_mode wirelessmode);
 void rtl_get_tcb_desc(struct ieee80211_hw *hw,
 		      struct ieee80211_tx_info *info,
 		      struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 44c25724529e..8fce371749d3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -2704,11 +2704,11 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
 	btc8192e2ant_init_coex_dm(btcoexist);
 }
 
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m)
 {
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
 	u16 u16tmp[4];
 	u32 u32tmp[4];
@@ -2719,75 +2719,64 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
 	u8 wifi_dot11_chnl, wifi_hs_chnl;
 	u32 fw_ver = 0, bt_patch_ver = 0;
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n ============[BT Coexist info]============");
+	seq_puts(m, "\n ============[BT Coexist info]============");
 
 	if (btcoexist->manual_control) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ===========[Under Manual Control]===========");
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========================================");
-	}
-
-	if (!board_info->bt_exist) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-		return;
+		seq_puts(m, "\n ===========[Under Manual Control]===========");
+		seq_puts(m, "\n ==========================================");
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+	seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
 		   board_info->pg_ant_num, board_info->btdm_ant_num);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-		 "BT stack/ hci ext ver",
+	seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
 		   ((stack_info->profile_notified) ? "Yes" : "No"),
 		   stack_info->hci_version);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
-		 "CoexVer/ FwVer/ PatchVer",
-		 glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
-		 fw_ver, bt_patch_ver, bt_patch_ver);
+	seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ FwVer/ PatchVer",
+		   glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+		   fw_ver, bt_patch_ver, bt_patch_ver);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
 			   &wifi_dot11_chnl);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-		 "Dot11 channel / HsMode(HsChnl)",
-		 wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
+	seq_printf(m, "\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsMode(HsChnl)",
+		   wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-		 "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+	seq_printf(m, "\n %-35s = %3ph ",
+		   "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-		 "Wifi link/ roam/ scan", link, roam, scan);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+		   "Wifi link/ roam/ scan", link, roam, scan);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
 			   &wifi_traffic_dir);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-		 "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+	seq_printf(m, "\n %-35s = %s / %s/ %s ",
+		   "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
 		   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
 			(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
 		   ((!wifi_busy) ? "idle" :
 			((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
 				"uplink" : "downlink")));
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
-		 "BT [status/ rssi/ retryCnt]",
+	seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+		   "BT [status/ rssi/ retryCnt]",
 		   ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
 		    ((coex_sta->c2h_bt_inquiry_page) ?
 		     ("inquiry/page scan") :
@@ -2797,131 +2786,129 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
 			   coex_dm->bt_status) ? "connected-idle" : "busy")))),
 		   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
-		 "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+	seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+		   "SCO/HID/PAN/A2DP", stack_info->sco_exist,
 		   stack_info->hid_exist, stack_info->pan_exist,
 		   stack_info->a2dp_exist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
 	bt_info_ext = coex_sta->bt_info_ext;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-		 "BT Info A2DP rate",
+	seq_printf(m, "\n %-35s = %s",
+		   "BT Info A2DP rate",
 		   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
 
 	for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) {
 		if (coex_sta->bt_info_c2h_cnt[i]) {
-			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-				 "\r\n %-35s = %7ph(%d)",
-				 glbt_info_src_8192e_2ant[i],
-				 coex_sta->bt_info_c2h[i],
-				 coex_sta->bt_info_c2h_cnt[i]);
+			seq_printf(m, "\n %-35s = %7ph(%d)",
+				   glbt_info_src_8192e_2ant[i],
+				   coex_sta->bt_info_c2h[i],
+				   coex_sta->bt_info_c2h_cnt[i]);
 		}
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-		 "PS state, IPS/LPS",
-		 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-		 ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+	seq_printf(m, "\n %-35s = %s/%s",
+		   "PS state, IPS/LPS",
+		   ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+		   ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
-		 coex_dm->cur_ss_type);
+	seq_printf(m, "\n %-35s = 0x%x ", "SS Type",
+		   coex_dm->cur_ss_type);
 
 	/* Sw mechanism	*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Sw mechanism]============");
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-		 "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
-		 coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-		 "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-		 coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-		 coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask",
-		 btcoexist->bt_info.ra_mask);
+	seq_printf(m, "\n %-35s",
+		   "============[Sw mechanism]============");
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+		   "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+		   coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+		   "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+		   coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+		   coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+
+	seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+		   btcoexist->bt_info.ra_mask);
 
 	/* Fw mechanism	*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Fw mechanism]============");
+	seq_printf(m, "\n %-35s",
+		   "============[Fw mechanism]============");
 
 	ps_tdma_case = coex_dm->cur_ps_tdma;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %5ph case-%d (auto:%d)",
-		 "PS TDMA", coex_dm->ps_tdma_para,
-		 ps_tdma_case, coex_dm->auto_tdma_adjust);
+	seq_printf(m,
+		   "\n %-35s = %5ph case-%d (auto:%d)",
+		   "PS TDMA", coex_dm->ps_tdma_para,
+		   ps_tdma_case, coex_dm->auto_tdma_adjust);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-		 "DecBtPwr/ IgnWlanAct",
-		 coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+	seq_printf(m, "\n %-35s = %d/ %d ",
+		   "DecBtPwr/ IgnWlanAct",
+		   coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
 
 	/* Hw setting */
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Hw setting]============");
+	seq_printf(m, "\n %-35s",
+		   "============[Hw setting]============");
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-		 "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+	seq_printf(m, "\n %-35s = 0x%x",
+		   "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-		 "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
-		 coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
-		 coex_dm->backup_ampdu_maxtime);
+	seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+		   coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+		   coex_dm->backup_ampdu_maxtime);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
 	u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-		 "0x430/0x434/0x42a/0x456",
-		 u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "0x430/0x434/0x42a/0x456",
+		   u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
 	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778",
-		 u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x", "0x778", u8tmp[0]);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
 	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)",
-		 u32tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+		   u32tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
 	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-		 "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
-		 u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x770(hp rx[31:16]/tx[15:0])",
-		 coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x774(lp rx[31:16]/tx[15:0])",
-		 coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+	seq_printf(m,
+		   "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+		   u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x770(hp rx[31:16]/tx[15:0])",
+		   coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x774(lp rx[31:16]/tx[15:0])",
+		   coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 	if (btcoexist->auto_report_2ant)
 		btc8192e2ant_monitor_bt_ctr(btcoexist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index 65502acee52c..b8c95c7afc06 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -180,4 +180,5 @@ void ex_btc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
 					    u8 type);
 void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 5f726f6d3567..fd3b1fb35dff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -2474,12 +2474,12 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
 	halbtc8723b1ant_query_bt_info(btcoexist);
 }
 
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m)
 {
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
 	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 u8tmp[4], i, bt_info_ext, pstdmacase = 0;
 	u16 u16tmp[4];
 	u32 u32tmp[4];
@@ -2491,62 +2491,56 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
 	u8 wifi_dot11_chnl, wifi_hs_chnl;
 	u32 fw_ver = 0, bt_patch_ver = 0;
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n ============[BT Coexist info]============");
+	seq_puts(m, "\n ============[BT Coexist info]============");
 
 	if (btcoexist->manual_control) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ============[Under Manual Control]==========");
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========================================");
+		seq_puts(m, "\n ============[Under Manual Control]==========");
+		seq_puts(m, "\n ==========================================");
 	}
 	if (btcoexist->stop_coex_dm) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ============[Coex is STOPPED]============");
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========================================");
+		seq_puts(m, "\n ============[Coex is STOPPED]============");
+		seq_puts(m, "\n ==========================================");
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
-		 "Ant PG Num/ Ant Mech/ Ant Pos:",
-		 board_info->pg_ant_num, board_info->btdm_ant_num,
-		 board_info->btdm_ant_pos);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d",
+		   "Ant PG Num/ Ant Mech/ Ant Pos:",
+		   board_info->pg_ant_num, board_info->btdm_ant_num,
+		   board_info->btdm_ant_pos);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-		 "BT stack/ hci ext ver",
-		 ((stack_info->profile_notified) ? "Yes" : "No"),
-		 stack_info->hci_version);
+	seq_printf(m, "\n %-35s = %s / %d",
+		   "BT stack/ hci ext ver",
+		   ((stack_info->profile_notified) ? "Yes" : "No"),
+		   stack_info->hci_version);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-		 "CoexVer/ FwVer/ PatchVer",
-		 glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
-		 fw_ver, bt_patch_ver, bt_patch_ver);
+	seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ FwVer/ PatchVer",
+		   glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+		   fw_ver, bt_patch_ver, bt_patch_ver);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
 			   &wifi_dot11_chnl);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-		 "Dot11 channel / HsChnl(HsMode)",
-		 wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+	seq_printf(m, "\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsChnl(HsMode)",
+		   wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-		 "H2C Wifi inform bt chnl Info",
-		 coex_dm->wifi_chnl_info);
+	seq_printf(m, "\n %-35s = %3ph ",
+		   "H2C Wifi inform bt chnl Info",
+		   coex_dm->wifi_chnl_info);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-		 "Wifi link/ roam/ scan", link, roam, scan);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+		   "Wifi link/ roam/ scan", link, roam, scan);
 
 	btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G,
 			   &wifi_under_5g);
@@ -2555,110 +2549,106 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
 			   &wifi_traffic_dir);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-		 "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
-		 ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
-		  ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
-		  ((!wifi_busy) ? "idle" :
-		   ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
-		   "uplink" : "downlink")));
+	seq_printf(m, "\n %-35s = %s / %s/ %s ",
+		   "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+		   ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+		    ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
+		    ((!wifi_busy) ? "idle" :
+		     ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+		     "uplink" : "downlink")));
 
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
 			   &wifi_link_status);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
-		 "sta/vwifi/hs/p2pGo/p2pGc",
-		 ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
-		 ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
-		 ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
-		 ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
-		 ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
-		 "BT [status/ rssi/ retryCnt]",
-		 ((coex_sta->bt_disabled) ? ("disabled") :
-		  ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
-		   ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
-		     coex_dm->bt_status) ?
-		    "non-connected idle" :
-		    ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
-		      coex_dm->bt_status) ?
-		     "connected-idle" : "busy")))),
-		     coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d / %d / %d / %d",
-		 "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
-		 bt_link_info->hid_exist, bt_link_info->pan_exist,
-		 bt_link_info->a2dp_exist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+		   "sta/vwifi/hs/p2pGo/p2pGc",
+		   ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+	seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+		   "BT [status/ rssi/ retryCnt]",
+		   ((coex_sta->bt_disabled) ? ("disabled") :
+		    ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
+		     ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+		       coex_dm->bt_status) ?
+		      "non-connected idle" :
+		      ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+			coex_dm->bt_status) ?
+		       "connected-idle" : "busy")))),
+		       coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+
+	seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+		   "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+		   bt_link_info->hid_exist, bt_link_info->pan_exist,
+		   bt_link_info->a2dp_exist);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
 	bt_info_ext = coex_sta->bt_info_ext;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-		 "BT Info A2DP rate",
-		 (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+	seq_printf(m, "\n %-35s = %s",
+		   "BT Info A2DP rate",
+		   (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
 
 	for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
 		if (coex_sta->bt_info_c2h_cnt[i]) {
-			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-				 "\r\n %-35s = %7ph(%d)",
-				 glbt_info_src_8723b_1ant[i],
-				 coex_sta->bt_info_c2h[i],
-				 coex_sta->bt_info_c2h_cnt[i]);
+			seq_printf(m, "\n %-35s = %7ph(%d)",
+				   glbt_info_src_8723b_1ant[i],
+				   coex_sta->bt_info_c2h[i],
+				   coex_sta->bt_info_c2h_cnt[i]);
 		}
 	}
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s/%s, (0x%x/0x%x)",
-		 "PS state, IPS/LPS, (lps/rpwm)",
-		 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-		 ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
-		 btcoexist->bt_info.lps_val,
-		 btcoexist->bt_info.rpwm_val);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+	seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+		   "PS state, IPS/LPS, (lps/rpwm)",
+		   ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+		   ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+		   btcoexist->bt_info.lps_val,
+		   btcoexist->bt_info.rpwm_val);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
 	if (!btcoexist->manual_control) {
 		/* Sw mechanism	*/
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-			 "============[Sw mechanism]============");
+		seq_printf(m, "\n %-35s",
+			   "============[Sw mechanism]============");
 
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/",
-			 "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
+		seq_printf(m, "\n %-35s = %d/",
+			   "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
 
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ",
-			 "DelBA/ BtCtrlAgg/ AggSize",
+		seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+			   "DelBA/ BtCtrlAgg/ AggSize",
 			   (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
 			   (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
 			   btcoexist->bt_info.agg_buf_size);
 
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
-			 "Rate Mask", btcoexist->bt_info.ra_mask);
+		seq_printf(m, "\n %-35s = 0x%x ",
+			   "Rate Mask", btcoexist->bt_info.ra_mask);
 
 		/* Fw mechanism	*/
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-			 "============[Fw mechanism]============");
+		seq_printf(m, "\n %-35s",
+			   "============[Fw mechanism]============");
 
 		pstdmacase = coex_dm->cur_ps_tdma;
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %5ph case-%d (auto:%d)",
+		seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
 			   "PS TDMA", coex_dm->ps_tdma_para,
 			   pstdmacase, coex_dm->auto_tdma_adjust);
 
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ",
-			 "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
+		seq_printf(m, "\n %-35s = %d ",
+			   "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
 
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
-			 "Latest error condition(should be 0)",
+		seq_printf(m, "\n %-35s = 0x%x ",
+			   "Latest error condition(should be 0)",
 			   coex_dm->error_condition);
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d",
-		 "Coex Table Type", coex_sta->coex_table_type);
+	seq_printf(m, "\n %-35s = %d",
+		   "Coex Table Type", coex_sta->coex_table_type);
 
 	/* Hw setting */
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Hw setting]============");
+	seq_printf(m, "\n %-35s",
+		   "============[Hw setting]============");
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-		 "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+	seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
 		   coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
 		   coex_dm->backup_ampdu_max_time);
 
@@ -2666,50 +2656,49 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
 	u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-		 "0x430/0x434/0x42a/0x456",
-		 u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "0x430/0x434/0x42a/0x456",
+		   u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
-		 (u32tmp[1] & 0x3e000000) >> 25);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+		   (u32tmp[1] & 0x3e000000) >> 25);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
 	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x948/ 0x67[5] / 0x765",
-		 u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x948/ 0x67[5] / 0x765",
+		   u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
 	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
-		 u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+		   u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
 	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
 	u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-		 "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
-		 ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
-		  ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+		   ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+		    ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -2727,26 +2716,26 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
 		   (u32tmp[3] & 0xffff);
 	fa_cck = (u8tmp[0] << 8) + u8tmp[1];
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "OFDM-CCA/OFDM-FA/CCK-FA",
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "OFDM-CCA/OFDM-FA/CCK-FA",
 		 u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
 	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x6c0/0x6c4/0x6c8(coexTable)",
-		 u32tmp[0], u32tmp[1], u32tmp[2]);
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
-		 coex_sta->high_priority_tx);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
-		 coex_sta->low_priority_tx);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8(coexTable)",
+		   u32tmp[0], u32tmp[1], u32tmp[2]);
+
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+		   coex_sta->high_priority_tx);
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+		   coex_sta->low_priority_tx);
 	if (btcoexist->auto_report_1ant)
 		halbtc8723b1ant_monitor_bt_ctr(btcoexist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
index 8d4fde235e11..934f27893c16 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
@@ -220,5 +220,6 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
 void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
 void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m);
 void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index e8f07573aed9..4907c2ffadfe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -3780,12 +3780,12 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
 	btc8723b2ant_init_coex_dm(btcoexist);
 }
 
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m)
 {
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
 	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
 	u32 u32tmp[4];
 	bool roam = false, scan = false;
@@ -3797,173 +3797,161 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
 	u32 fw_ver = 0, bt_patch_ver = 0;
 	u8 ap_num = 0;
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n ============[BT Coexist info]============");
+	seq_puts(m, "\n ============[BT Coexist info]============");
 
 	if (btcoexist->manual_control) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========[Under Manual Control]============");
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========================================");
-	}
-
-	if (!board_info->bt_exist) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-		return;
+		seq_puts(m, "\n ==========[Under Manual Control]============");
+		seq_puts(m, "\n ==========================================");
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-		 "Ant PG number/ Ant mechanism:",
-		 board_info->pg_ant_num, board_info->btdm_ant_num);
+	seq_printf(m, "\n %-35s = %d/ %d ",
+		   "Ant PG number/ Ant mechanism:",
+		   board_info->pg_ant_num, board_info->btdm_ant_num);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-		 "BT stack/ hci ext ver",
-		 ((stack_info->profile_notified) ? "Yes" : "No"),
-		 stack_info->hci_version);
+	seq_printf(m, "\n %-35s = %s / %d",
+		   "BT stack/ hci ext ver",
+		   ((stack_info->profile_notified) ? "Yes" : "No"),
+		   stack_info->hci_version);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-		 "CoexVer/ FwVer/ PatchVer",
-		 glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
-		 fw_ver, bt_patch_ver, bt_patch_ver);
+	seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ FwVer/ PatchVer",
+		   glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+		   fw_ver, bt_patch_ver, bt_patch_ver);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
 			   &wifi_dot11_chnl);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-		 "Dot11 channel / HsChnl(HsMode)",
-		 wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+	seq_printf(m, "\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsChnl(HsMode)",
+		   wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-		 "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+	seq_printf(m, "\n %-35s = %3ph ",
+		   "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
-		 "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d",
+		   "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-		 "Wifi link/ roam/ scan", link, roam, scan);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+		   "Wifi link/ roam/ scan", link, roam, scan);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
 			   &wifi_traffic_dir);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-		 "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+	seq_printf(m, "\n %-35s = %s / %s/ %s ",
+		   "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
 		 ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
 		 (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
 		 ((!wifi_busy) ? "idle" :
 		 ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
 		  "uplink" : "downlink")));
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
-		 "SCO/HID/PAN/A2DP",
-		 bt_link_info->sco_exist, bt_link_info->hid_exist,
-		 bt_link_info->pan_exist, bt_link_info->a2dp_exist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+		   "SCO/HID/PAN/A2DP",
+		   bt_link_info->sco_exist, bt_link_info->hid_exist,
+		   bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
 	bt_info_ext = coex_sta->bt_info_ext;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-		 "BT Info A2DP rate",
-		 (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+	seq_printf(m, "\n %-35s = %s",
+		   "BT Info A2DP rate",
+		   (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
 
 	for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
 		if (coex_sta->bt_info_c2h_cnt[i]) {
-			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-				 "\r\n %-35s = %7ph(%d)",
-				 glbt_info_src_8723b_2ant[i],
-				 coex_sta->bt_info_c2h[i],
-				 coex_sta->bt_info_c2h_cnt[i]);
+			seq_printf(m, "\n %-35s = %7ph(%d)",
+				   glbt_info_src_8723b_2ant[i],
+				   coex_sta->bt_info_c2h[i],
+				   coex_sta->bt_info_c2h_cnt[i]);
 		}
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-		 "PS state, IPS/LPS",
-		 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-		 ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+	seq_printf(m, "\n %-35s = %s/%s",
+		   "PS state, IPS/LPS",
+		   ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+		   ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
 	/* Sw mechanism	*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s", "============[Sw mechanism]============");
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-		 "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
-		 coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-		 "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-		 coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-		 coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+	seq_printf(m,
+		   "\n %-35s", "============[Sw mechanism]============");
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+		   "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+		   coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+		   "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+		   coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+		   coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
 	/* Fw mechanism	*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Fw mechanism]============");
+	seq_printf(m, "\n %-35s",
+		   "============[Fw mechanism]============");
 
 	ps_tdma_case = coex_dm->cur_ps_tdma;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %5ph case-%d (auto:%d)",
-		 "PS TDMA", coex_dm->ps_tdma_para,
-		 ps_tdma_case, coex_dm->auto_tdma_adjust);
+	seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+		   "PS TDMA", coex_dm->ps_tdma_para,
+		   ps_tdma_case, coex_dm->auto_tdma_adjust);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-		 "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
-		 coex_dm->cur_ignore_wlan_act);
+	seq_printf(m, "\n %-35s = %d/ %d ",
+		   "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
+		   coex_dm->cur_ignore_wlan_act);
 
 	/* Hw setting */
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Hw setting]============");
+	seq_printf(m, "\n %-35s",
+		   "============[Hw setting]============");
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-		 "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+	seq_printf(m, "\n %-35s = 0x%x",
+		   "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x778/0x880[29:25]", u8tmp[0],
-		 (u32tmp[0] & 0x3e000000) >> 25);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x778/0x880[29:25]", u8tmp[0],
+		   (u32tmp[0] & 0x3e000000) >> 25);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
 	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x948/ 0x67[5] / 0x765",
-		 u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x948/ 0x67[5] / 0x765",
+		   u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
 	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
-		 u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+		   u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
 
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
 	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
 	u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-		 "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
-		 ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
-		 ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+		   ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+		   ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -3981,29 +3969,27 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
 		   (u32tmp[3] & 0xffff);
 	fa_cck = (u8tmp[0] << 8) + u8tmp[1];
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "OFDM-CCA/OFDM-FA/CCK-FA",
-		 u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "OFDM-CCA/OFDM-FA/CCK-FA",
+		   u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
 
 	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
 	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
 	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
 	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-		 "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
-		 u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x770(high-pri rx/tx)",
-		 coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
-		 coex_sta->low_priority_tx);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+		   u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x770(high-pri rx/tx)",
+		   coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+		   coex_sta->low_priority_tx);
 	if (btcoexist->auto_report_2ant)
 		btc8723b2ant_monitor_bt_ctr(btcoexist);
-	btcoexist->btc_disp_dbg_msg(btcoexist,
-	BTC_DBG_DISP_COEX_STATISTICS);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index bc1e3042e271..aa24da402fb9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -195,7 +195,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
 				    u8 *tmpbuf, u8 length);
 void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m);
 void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
 void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist);
 void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 4efac5fe9982..0b26419881c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -2172,12 +2172,12 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 	btc8821a1ant_query_bt_info(btcoexist);
 }
 
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m)
 {
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
 	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 u1_tmp[4], i, bt_info_ext, ps_tdma_case = 0;
 	u16 u2_tmp[4];
 	u32 u4_tmp[4];
@@ -2188,49 +2188,36 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
 	u8 wifi_dot11_chnl, wifi_hs_chnl;
 	u32 fw_ver = 0, bt_patch_ver = 0;
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n ============[BT Coexist info]============");
+	seq_puts(m, "\n ============[BT Coexist info]============");
 
 	if (btcoexist->manual_control) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ============[Under Manual Control]============");
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========================================");
+		seq_puts(m, "\n ============[Under Manual Control]============");
+		seq_puts(m, "\n ==========================================");
 	}
 	if (btcoexist->stop_coex_dm) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ============[Coex is STOPPED]============");
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n ==========================================");
-	}
-
-	if (!board_info->bt_exist) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-		return;
+		seq_puts(m, "\n ============[Coex is STOPPED]============");
+		seq_puts(m, "\n ==========================================");
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d/ %d",
-		 "Ant PG Num/ Ant Mech/ Ant Pos:",
-		 board_info->pg_ant_num,
-		 board_info->btdm_ant_num,
-		 board_info->btdm_ant_pos);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d",
+		   "Ant PG Num/ Ant Mech/ Ant Pos:",
+		   board_info->pg_ant_num,
+		   board_info->btdm_ant_num,
+		   board_info->btdm_ant_pos);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
-		 ((stack_info->profile_notified) ? "Yes" : "No"),
-		 stack_info->hci_version);
+	seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
+		   ((stack_info->profile_notified) ? "Yes" : "No"),
+		   stack_info->hci_version);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
 			   &bt_patch_ver);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-		 "CoexVer/ FwVer/ PatchVer",
-		 glcoex_ver_date_8821a_1ant,
-		 glcoex_ver_8821a_1ant,
-		 fw_ver, bt_patch_ver,
-		 bt_patch_ver);
+	seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ FwVer/ PatchVer",
+		   glcoex_ver_date_8821a_1ant,
+		   glcoex_ver_8821a_1ant,
+		   fw_ver, bt_patch_ver,
+		   bt_patch_ver);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION,
 			   &bt_hs_on);
@@ -2238,28 +2225,24 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
 			   &wifi_dot11_chnl);
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL,
 			   &wifi_hs_chnl);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d / %d(%d)",
-		 "Dot11 channel / HsChnl(HsMode)",
-		 wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+	seq_printf(m, "\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsChnl(HsMode)",
+		   wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %3ph ",
-		 "H2C Wifi inform bt chnl Info",
-		 coex_dm->wifi_chnl_info);
+	seq_printf(m, "\n %-35s = %3ph ",
+		   "H2C Wifi inform bt chnl Info",
+		   coex_dm->wifi_chnl_info);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
-		 (int)wifi_rssi, (int)bt_hs_rssi);
+	seq_printf(m, "\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
+		   (int)wifi_rssi, (int)bt_hs_rssi);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
-		 link, roam, scan);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+		   link, roam, scan);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
 			   &wifi_under_5g);
@@ -2269,16 +2252,15 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
 			   &wifi_busy);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
 			   &wifi_traffic_dir);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s / %s/ %s ", "Wifi status",
-		 (wifi_under_5g ? "5G" : "2.4G"),
-		 ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
-		 (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
-		 ((!wifi_busy) ? "idle" :
-		 ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
-		 "uplink" : "downlink")));
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		   "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
+	seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
+		   (wifi_under_5g ? "5G" : "2.4G"),
+		   ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+		   (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+		   ((!wifi_busy) ? "idle" :
+		   ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+		   "uplink" : "downlink")));
+	seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+		   "BT [status/ rssi/ retryCnt]",
 		   ((coex_sta->bt_disabled) ? ("disabled") :
 		   ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
 		   ((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
@@ -2289,166 +2271,143 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
 		   "connected-idle" : "busy")))),
 		   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
-		 bt_link_info->sco_exist,
-		 bt_link_info->hid_exist,
-		 bt_link_info->pan_exist,
-		 bt_link_info->a2dp_exist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	seq_printf(m, "\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+		   bt_link_info->sco_exist,
+		   bt_link_info->hid_exist,
+		   bt_link_info->pan_exist,
+		   bt_link_info->a2dp_exist);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
 	bt_info_ext = coex_sta->bt_info_ext;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s",
-		 "BT Info A2DP rate",
-		 (bt_info_ext & BIT0) ?
-		 "Basic rate" : "EDR rate");
+	seq_printf(m, "\n %-35s = %s",
+		   "BT Info A2DP rate",
+		   (bt_info_ext & BIT0) ?
+		   "Basic rate" : "EDR rate");
 
 	for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) {
 		if (coex_sta->bt_info_c2h_cnt[i]) {
-			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-				 "\r\n %-35s = %7ph(%d)",
-				 glbt_info_src_8821a_1ant[i],
-				 coex_sta->bt_info_c2h[i],
-				 coex_sta->bt_info_c2h_cnt[i]);
+			seq_printf(m, "\n %-35s = %7ph(%d)",
+				   glbt_info_src_8821a_1ant[i],
+				   coex_sta->bt_info_c2h[i],
+				   coex_sta->bt_info_c2h_cnt[i]);
 		}
 	}
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s/%s, (0x%x/0x%x)",
-		 "PS state, IPS/LPS, (lps/rpwm)",
-		 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-		 ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
-		 btcoexist->bt_info.lps_val,
-		 btcoexist->bt_info.rpwm_val);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+	seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+		   "PS state, IPS/LPS, (lps/rpwm)",
+		   ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+		   ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+		   btcoexist->bt_info.lps_val,
+		   btcoexist->bt_info.rpwm_val);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
 	if (!btcoexist->manual_control) {
 		/* Sw mechanism*/
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s",
-			 "============[Sw mechanism]============");
-
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %d", "SM[LowPenaltyRA]",
-			 coex_dm->cur_low_penalty_ra);
-
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %s/ %s/ %d ",
-			 "DelBA/ BtCtrlAgg/ AggSize",
-			 (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
-			 (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
-			 btcoexist->bt_info.agg_buf_size);
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = 0x%x ", "Rate Mask",
-			 btcoexist->bt_info.ra_mask);
+		seq_printf(m, "\n %-35s",
+			   "============[Sw mechanism]============");
+
+		seq_printf(m, "\n %-35s = %d", "SM[LowPenaltyRA]",
+			   coex_dm->cur_low_penalty_ra);
+
+		seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+			   "DelBA/ BtCtrlAgg/ AggSize",
+			   (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
+			   (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
+			   btcoexist->bt_info.agg_buf_size);
+		seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+			   btcoexist->bt_info.ra_mask);
 
 		/* Fw mechanism */
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-			 "============[Fw mechanism]============");
+		seq_printf(m, "\n %-35s",
+			   "============[Fw mechanism]============");
 
 		ps_tdma_case = coex_dm->cur_ps_tdma;
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %5ph case-%d (auto:%d)",
-			 "PS TDMA",
-			 coex_dm->ps_tdma_para,
-			 ps_tdma_case,
-			 coex_dm->auto_tdma_adjust);
-
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = 0x%x ",
-			 "Latest error condition(should be 0)",
+		seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+			   "PS TDMA",
+			   coex_dm->ps_tdma_para,
+			   ps_tdma_case,
+			   coex_dm->auto_tdma_adjust);
+
+		seq_printf(m, "\n %-35s = 0x%x ",
+			   "Latest error condition(should be 0)",
 			   coex_dm->error_condition);
 
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %d ", "IgnWlanAct",
-			 coex_dm->cur_ignore_wlan_act);
+		seq_printf(m, "\n %-35s = %d ", "IgnWlanAct",
+			   coex_dm->cur_ignore_wlan_act);
 	}
 
 	/* Hw setting */
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s", "============[Hw setting]============");
+	seq_printf(m, "\n %-35s", "============[Hw setting]============");
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-		 "backup ARFR1/ARFR2/RL/AMaxTime",
-		 coex_dm->backup_arfr_cnt1,
-		 coex_dm->backup_arfr_cnt2,
-		 coex_dm->backup_retry_limit,
-		 coex_dm->backup_ampdu_max_time);
+	seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "backup ARFR1/ARFR2/RL/AMaxTime",
+		   coex_dm->backup_arfr_cnt1,
+		   coex_dm->backup_arfr_cnt2,
+		   coex_dm->backup_retry_limit,
+		   coex_dm->backup_ampdu_max_time);
 
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
 	u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
 	u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-		 "0x430/0x434/0x42a/0x456",
-		 u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "0x430/0x434/0x42a/0x456",
+		   u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
 
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
-		 u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
+		   u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
 
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x", "0x8db[6:5]",
-		 ((u1_tmp[0] & 0x60) >> 5));
+	seq_printf(m, "\n %-35s = 0x%x", "0x8db[6:5]",
+		   ((u1_tmp[0] & 0x60) >> 5));
 
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975);
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
-		 (u4_tmp[0] & 0x30000000) >> 28,
-		  u4_tmp[0] & 0xff,
-		  u1_tmp[0] & 0x3);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
+		   (u4_tmp[0] & 0x30000000) >> 28,
+		    u4_tmp[0] & 0xff,
+		    u1_tmp[0] & 0x3);
 
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
 	u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x40/0x4c[24:23]/0x64[0]",
-		 u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x40/0x4c[24:23]/0x64[0]",
+		   u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23),
+		   u1_tmp[1] & 0x1);
 
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
-		 u4_tmp[0], u1_tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
+		   u4_tmp[0], u1_tmp[0]);
 
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x", "0xc50(dig)",
-		 u4_tmp[0] & 0xff);
+	seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+		   u4_tmp[0] & 0xff);
 
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d);
 	u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
-		 u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
+		   u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
 
 	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
 	u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
 	u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
 	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-		 "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
 		   u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
-		 coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
-		 coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+	seq_printf(m, "\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
+		   coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	seq_printf(m, "\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
+		   coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 	if (btcoexist->auto_report_1ant)
 		btc8821a1ant_monitor_bt_ctr(btcoexist);
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index b0a6626fbb66..a498ff5b1b9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -186,7 +186,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
 void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
 void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m);
 void ex_btc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
 				 u8 op_len, u8 *data);
 void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 41943c34edff..d5f282cb9d24 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -3657,11 +3657,11 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
 	btc8821a2ant_init_coex_dm(btcoexist);
 }
 
-void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist,
+				       struct seq_file *m)
 {
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 u1tmp[4], i, bt_info_ext, ps_tdma_case = 0;
 	u32 u4tmp[4];
 	bool roam = false, scan = false, link = false, wifi_under_5g = false;
@@ -3671,32 +3671,22 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
 	u8 wifi_dot_11_chnl, wifi_hs_chnl;
 	u32 fw_ver = 0, bt_patch_ver = 0;
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n ============[BT Coexist info]============");
-
-	if (!board_info->bt_exist) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-		return;
-	}
+	seq_puts(m, "\n ============[BT Coexist info]============");
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
-		 board_info->pg_ant_num, board_info->btdm_ant_num);
+	seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+		   board_info->pg_ant_num, board_info->btdm_ant_num);
 
 	if (btcoexist->manual_control) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s", "[Action Manual control]!!");
+		seq_printf(m, "\n %-35s", "[Action Manual control]!!");
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+	seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
 		   ((stack_info->profile_notified) ? "Yes" : "No"),
 		   stack_info->hci_version);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
 	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+	seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
 		   "CoexVer/ FwVer/ PatchVer",
 		   glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
 		   fw_ver, bt_patch_ver, bt_patch_ver);
@@ -3707,27 +3697,23 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
 		BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot_11_chnl);
 	btcoexist->btc_get(btcoexist,
 		BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d / %d(%d)",
+	seq_printf(m, "\n %-35s = %d / %d(%d)",
 		   "Dot11 channel / HsMode(HsChnl)",
 		   wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl);
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %3ph ",
+	seq_printf(m, "\n %-35s = %3ph ",
 		   "H2C Wifi inform bt chnl Info",
 		   coex_dm->wifi_chnl_info);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
+	seq_printf(m, "\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
 		   wifi_rssi, bt_hs_rssi);
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
 		   link, roam, scan);
 
 	btcoexist->btc_get(btcoexist,
@@ -3738,8 +3724,7 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
 		BTC_GET_BL_WIFI_BUSY, &wifi_busy);
 	btcoexist->btc_get(btcoexist,
 		BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+	seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
 		   (wifi_under_5g ? "5G" : "2.4G"),
 		   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
 		    (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
@@ -3748,134 +3733,128 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
 		     "uplink" : "downlink")));
 
 	if (stack_info->profile_notified) {
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+		seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+			   "SCO/HID/PAN/A2DP",
 			   stack_info->sco_exist, stack_info->hid_exist,
 			   stack_info->pan_exist, stack_info->a2dp_exist);
 
 		btcoexist->btc_disp_dbg_msg(btcoexist,
-					    BTC_DBG_DISP_BT_LINK_INFO);
+					    BTC_DBG_DISP_BT_LINK_INFO,
+					    m);
 	}
 
 	bt_info_ext = coex_sta->bt_info_ext;
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-		 "BT Info A2DP rate",
+	seq_printf(m, "\n %-35s = %s", "BT Info A2DP rate",
 		   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
 
 	for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) {
 		if (coex_sta->bt_info_c2h_cnt[i]) {
-			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-				 "\r\n %-35s = %7ph(%d)",
-				 glbt_info_src_8821a_2ant[i],
-				 coex_sta->bt_info_c2h[i],
-				 coex_sta->bt_info_c2h_cnt[i]);
+			seq_printf(m, "\n %-35s = %7ph(%d)",
+				   glbt_info_src_8821a_2ant[i],
+				   coex_sta->bt_info_c2h[i],
+				   coex_sta->bt_info_c2h_cnt[i]);
 		}
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-		 "PS state, IPS/LPS",
-		 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-		 ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+	seq_printf(m, "\n %-35s = %s/%s",
+		   "PS state, IPS/LPS",
+		   ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+		   ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
 	/* Sw mechanism*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Sw mechanism]============");
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-		 "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-		 coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-		 coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+	seq_printf(m, "\n %-35s",
+		   "============[Sw mechanism]============");
+	seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+		   "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+		   coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+		   coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
 	/* Fw mechanism*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-		 "============[Fw mechanism]============");
+	seq_printf(m, "\n %-35s",
+		   "============[Fw mechanism]============");
 
 	if (!btcoexist->manual_control) {
 		ps_tdma_case = coex_dm->cur_ps_tdma;
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %5ph case-%d",
-			 "PS TDMA",
-			 coex_dm->ps_tdma_para, ps_tdma_case);
-
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-			 "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
-			 coex_dm->cur_dec_bt_pwr_lvl,
-			 coex_dm->cur_ignore_wlan_act);
+		seq_printf(m, "\n %-35s = %5ph case-%d",
+			   "PS TDMA",
+			   coex_dm->ps_tdma_para, ps_tdma_case);
+
+		seq_printf(m, "\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
+			   coex_dm->cur_dec_bt_pwr_lvl,
+			   coex_dm->cur_ignore_wlan_act);
 	}
 
 	/* Hw setting*/
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s", "============[Hw setting]============");
+	seq_printf(m, "\n %-35s", "============[Hw setting]============");
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-		 "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
-		 coex_dm->bt_rf0x1e_backup);
+	seq_printf(m, "\n %-35s = 0x%x", "RF-A, 0x1e initVal",
+		   coex_dm->bt_rf0x1e_backup);
 
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
 	u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ",
-		 "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
-		 u1tmp[0], u1tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x ",
+		   "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
+		   u1tmp[0], u1tmp[1]);
 
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
 	u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x8db(ADC)/0xc5b[29:25](DAC)",
-		 ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x8db(ADC)/0xc5b[29:25](DAC)",
+		   ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
 
 	u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
-		 u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
+		   u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
 
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
 	u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
 	u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x40/ 0x4c[24:23]/ 0x974",
-		 u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x40/ 0x4c[24:23]/ 0x974",
+		   u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
 
 	u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0x550(bcn ctrl)/0x522",
-		 u4tmp[0], u1tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522",
+		   u4tmp[0], u1tmp[0]);
 
 	u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "0xc50(DIG)/0xa0a(CCK-TH)",
-		 u4tmp[0], u1tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "0xc50(DIG)/0xa0a(CCK-TH)",
+		   u4tmp[0], u1tmp[0]);
 
 	u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
 	u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-		 "OFDM-FA/ CCK-FA",
-		 u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+		   "OFDM-FA/ CCK-FA",
+		   u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
 
 	u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
 	u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
 	u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-		 "0x6c0/0x6c4/0x6c8",
-		 u4tmp[0], u4tmp[1], u4tmp[2]);
-
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-		 "0x770 (hi-pri Rx/Tx)",
-		 coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
+	seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8",
+		   u4tmp[0], u4tmp[1], u4tmp[2]);
+
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "0x770 (hi-pri Rx/Tx)",
+		   coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	seq_printf(m, "\n %-35s = %d/ %d",
 		   "0x774(low-pri Rx/Tx)",
 		   coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 
 	/* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/
 	u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b);
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-		 "0x41b (mgntQ hang chk == 0xf)",
-		 u1tmp[0]);
+	seq_printf(m, "\n %-35s = 0x%x",
+		   "0x41b (mgntQ hang chk == 0xf)",
+		   u1tmp[0]);
 
-	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index a839d5574422..ce3e58c4e660 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -226,7 +226,8 @@ ex_btc8821a2ant_periodical(
 	);
 void
 ex_btc8821a2ant_display_coex_info(
-	struct btc_coexist *btcoexist
+	struct btc_coexist *btcoexist,
+	struct seq_file *m
 	);
 void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
 void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b5e9877d935c..1404729441a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -25,19 +25,11 @@
 
 #include "halbt_precomp.h"
 
-/***********************************************
- *		Global variables
- ***********************************************/
-
-struct btc_coexist gl_bt_coexist;
-
-u32 btc_dbg_type[BTC_MSG_MAX];
-
 /***************************************************
  *		Debug related function
  ***************************************************/
 
-const char *const gl_btc_wifi_bw_string[] = {
+static const char *const gl_btc_wifi_bw_string[] = {
 	"11bg",
 	"HT20",
 	"HT40",
@@ -45,7 +37,7 @@ const char *const gl_btc_wifi_bw_string[] = {
 	"HT160"
 };
 
-const char *const gl_btc_wifi_freq_string[] = {
+static const char *const gl_btc_wifi_freq_string[] = {
 	"2.4G",
 	"5G"
 };
@@ -103,21 +95,6 @@ static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist)
 		return false;
 }
 
-static bool halbtc_is_bt40(struct rtl_priv *adapter)
-{
-	struct rtl_priv *rtlpriv = adapter;
-	struct rtl_phy *rtlphy = &(rtlpriv->phy);
-	bool is_ht40 = true;
-	enum ht_channel_width bw = rtlphy->current_chan_bw;
-
-	if (bw == HT_CHANNEL_WIDTH_20)
-		is_ht40 = false;
-	else if (bw == HT_CHANNEL_WIDTH_20_40)
-		is_ht40 = true;
-
-	return is_ht40;
-}
-
 static bool halbtc_legacy(struct rtl_priv *adapter)
 {
 	struct rtl_priv *rtlpriv = adapter;
@@ -143,18 +120,26 @@ bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
 
 static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
 {
-	struct rtl_priv *rtlpriv =
-		(struct rtl_priv *)btcoexist->adapter;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_phy *rtlphy = &rtlpriv->phy;
 	u32 wifi_bw = BTC_WIFI_BW_HT20;
 
-	if (halbtc_is_bt40(rtlpriv)) {
-		wifi_bw = BTC_WIFI_BW_HT40;
+	if (halbtc_legacy(rtlpriv)) {
+		wifi_bw = BTC_WIFI_BW_LEGACY;
 	} else {
-		if (halbtc_legacy(rtlpriv))
-			wifi_bw = BTC_WIFI_BW_LEGACY;
-		else
+		switch (rtlphy->current_chan_bw) {
+		case HT_CHANNEL_WIDTH_20:
 			wifi_bw = BTC_WIFI_BW_HT20;
+			break;
+		case HT_CHANNEL_WIDTH_20_40:
+			wifi_bw = BTC_WIFI_BW_HT40;
+			break;
+		case HT_CHANNEL_WIDTH_80:
+			wifi_bw = BTC_WIFI_BW_HT80;
+			break;
+		}
 	}
+
 	return wifi_bw;
 }
 
@@ -171,7 +156,7 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
 	return chnl;
 }
 
-u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
 {
 	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
 
@@ -186,12 +171,12 @@ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
 	return rtlpriv->btcoexist.btc_info.single_ant_path;
 }
 
-u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
 {
 	return rtlpriv->btcoexist.btc_info.bt_type;
 }
 
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
 {
 	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
 	u8 num;
@@ -208,13 +193,117 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
 	return num;
 }
 
-u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
 {
 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
 
 	return rtlhal->package_type;
 }
 
+static
+u8 rtl_get_hwpg_rfe_type(struct rtl_priv *rtlpriv)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+	return rtlhal->rfe_type;
+}
+
+static
+bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
+{
+	if (IS_HARDWARE_TYPE_8812(btcoexist->adapter))
+		return false;
+	else
+		return true;
+}
+
+static
+bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
+				 u8 *cmd, u32 len, unsigned long wait_ms)
+{
+	struct rtl_priv *rtlpriv;
+	const u8 oper_ver = 0;
+	u8 req_num;
+
+	if (!halbtc_is_hw_mailbox_exist(btcoexist))
+		return false;
+
+	if (wait_ms)	/* before h2c to avoid race condition */
+		reinit_completion(&btcoexist->bt_mp_comp);
+
+	rtlpriv = btcoexist->adapter;
+
+	/* fill req_num by op_code, and rtl_btc_btmpinfo_notify() use it
+	 * to know message type
+	 */
+	switch (op_code) {
+	case BT_OP_GET_BT_VERSION:
+		req_num = BT_SEQ_GET_BT_VERSION;
+		break;
+	case BT_OP_GET_AFH_MAP_L:
+		req_num = BT_SEQ_GET_AFH_MAP_L;
+		break;
+	case BT_OP_GET_AFH_MAP_M:
+		req_num = BT_SEQ_GET_AFH_MAP_M;
+		break;
+	case BT_OP_GET_AFH_MAP_H:
+		req_num = BT_SEQ_GET_AFH_MAP_H;
+		break;
+	case BT_OP_GET_BT_COEX_SUPPORTED_FEATURE:
+		req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE;
+		break;
+	case BT_OP_GET_BT_COEX_SUPPORTED_VERSION:
+		req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION;
+		break;
+	case BT_OP_GET_BT_ANT_DET_VAL:
+		req_num = BT_SEQ_GET_BT_ANT_DET_VAL;
+		break;
+	case BT_OP_GET_BT_BLE_SCAN_PARA:
+		req_num = BT_SEQ_GET_BT_BLE_SCAN_PARA;
+		break;
+	case BT_OP_GET_BT_BLE_SCAN_TYPE:
+		req_num = BT_SEQ_GET_BT_BLE_SCAN_TYPE;
+		break;
+	case BT_OP_GET_BT_DEVICE_INFO:
+		req_num = BT_SEQ_GET_BT_DEVICE_INFO;
+		break;
+	case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL:
+		req_num = BT_SEQ_GET_BT_FORB_SLOT_VAL;
+		break;
+	case BT_OP_WRITE_REG_ADDR:
+	case BT_OP_WRITE_REG_VALUE:
+	case BT_OP_READ_REG:
+	default:
+		req_num = BT_SEQ_DONT_CARE;
+		break;
+	}
+
+	cmd[0] |= (oper_ver & 0x0f);		/* Set OperVer */
+	cmd[0] |= ((req_num << 4) & 0xf0);	/* Set ReqNum */
+	cmd[1] = op_code;
+	rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, len, cmd);
+
+	/* wait? */
+	if (!wait_ms)
+		return true;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+		 "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
+
+	if (in_interrupt())
+		return false;
+
+	if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
+					msecs_to_jiffies(wait_ms)) == 0) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "btmpinfo wait (req_num=%d) timeout\n", req_num);
+
+		return false;	/* timeout */
+	}
+
+	return true;
+}
+
 static void halbtc_leave_lps(struct btc_coexist *btcoexist)
 {
 	struct rtl_priv *rtlpriv;
@@ -342,25 +431,80 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist)
 
 static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
 {
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 cmd_buffer[4] = {0};
-	u8 oper_ver = 0;
-	u8 req_num = 0x0E;
 
 	if (btcoexist->bt_info.bt_real_fw_ver)
 		goto label_done;
 
-	cmd_buffer[0] |= (oper_ver & 0x0f);	/* Set OperVer */
-	cmd_buffer[0] |= ((req_num << 4) & 0xf0);	/* Set ReqNum */
-	cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */
-	rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
-					&cmd_buffer[0]);
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_VERSION,
+				    cmd_buffer, 4, 200);
 
 label_done:
 	return btcoexist->bt_info.bt_real_fw_ver;
 }
 
-u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
+static u32 halbtc_get_bt_coex_supported_feature(void *btc_context)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	if (btcoexist->bt_info.bt_supported_feature)
+		goto label_done;
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist,
+				    BT_OP_GET_BT_COEX_SUPPORTED_FEATURE,
+				    cmd_buffer, 4, 200);
+
+label_done:
+	return btcoexist->bt_info.bt_supported_feature;
+}
+
+static u32 halbtc_get_bt_coex_supported_version(void *btc_context)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	if (btcoexist->bt_info.bt_supported_version)
+		goto label_done;
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist,
+				    BT_OP_GET_BT_COEX_SUPPORTED_VERSION,
+				    cmd_buffer, 4, 200);
+
+label_done:
+	return btcoexist->bt_info.bt_supported_version;
+}
+
+static u32 halbtc_get_bt_device_info(void *btc_context)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist,
+				    BT_OP_GET_BT_DEVICE_INFO,
+				    cmd_buffer, 4, 200);
+
+	return btcoexist->bt_info.bt_device_info;
+}
+
+static u32 halbtc_get_bt_forbidden_slot_val(void *btc_context)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist,
+				    BT_OP_GET_BT_FORBIDDEN_SLOT_VAL,
+				    cmd_buffer, 4, 200);
+
+	return btcoexist->bt_info.bt_forb_slot_val;
+}
+
+static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
 {
 	/* return value:
 	 * [31:16] => connected port number
@@ -521,6 +665,18 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
 	case BTC_GET_U4_VENDOR:
 		*u32_tmp = BTC_VENDOR_OTHER;
 		break;
+	case BTC_GET_U4_SUPPORTED_VERSION:
+		*u32_tmp = halbtc_get_bt_coex_supported_version(btcoexist);
+		break;
+	case BTC_GET_U4_SUPPORTED_FEATURE:
+		*u32_tmp = halbtc_get_bt_coex_supported_feature(btcoexist);
+		break;
+	case BTC_GET_U4_BT_DEVICE_INFO:
+		*u32_tmp = halbtc_get_bt_device_info(btcoexist);
+		break;
+	case BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL:
+		*u32_tmp = halbtc_get_bt_forbidden_slot_val(btcoexist);
+		break;
 	case BTC_GET_U1_WIFI_DOT11_CHNL:
 		*u8_tmp = rtlphy->current_channel;
 		break;
@@ -653,6 +809,104 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
 	return ret;
 }
 
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist,
+					   struct seq_file *m)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist,
+					struct seq_file *m)
+{
+}
+
+static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
+				       struct seq_file *m)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	s32 wifi_rssi = 0, bt_hs_rssi = 0;
+	bool scan = false, link = false, roam = false, wifi_busy = false;
+	bool wifi_under_b_mode = false;
+	bool wifi_under_5g = false;
+	u32 wifi_bw = BTC_WIFI_BW_HT20;
+	u32 wifi_traffic_dir = BTC_WIFI_TRAFFIC_TX;
+	u32 wifi_freq = BTC_FREQ_2_4G;
+	u32 wifi_link_status = 0x0;
+	bool bt_hs_on = false, under_ips = false, under_lps = false;
+	bool low_power = false, dc_mode = false;
+	u8 wifi_chnl = 0, wifi_hs_chnl = 0;
+	u8 ap_num = 0;
+
+	wifi_link_status = halbtc_get_wifi_link_status(btcoexist);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+		   "STA/vWifi/HS/p2pGo/p2pGc",
+		   ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+		   ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_chnl);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+	seq_printf(m, "\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsChnl(High Speed)",
+		   wifi_chnl, wifi_hs_chnl, bt_hs_on);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+	seq_printf(m, "\n %-35s = %d/ %d",
+		   "Wifi rssi/ HS rssi",
+		   wifi_rssi - 100, bt_hs_rssi - 100);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+	seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+		   "Wifi link/ roam/ scan",
+		   link, roam, scan);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+			   &wifi_traffic_dir);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+	wifi_freq = (wifi_under_5g ? BTC_FREQ_5G : BTC_FREQ_2_4G);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
+			   &wifi_under_b_mode);
+
+	seq_printf(m, "\n %-35s = %s / %s/ %s/ AP=%d ",
+		   "Wifi freq/ bw/ traffic",
+		   gl_btc_wifi_freq_string[wifi_freq],
+		   ((wifi_under_b_mode) ? "11b" :
+		    gl_btc_wifi_bw_string[wifi_bw]),
+		   ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX ==
+					      wifi_traffic_dir) ? "uplink" :
+					     "downlink")),
+		   ap_num);
+
+	/* power status	 */
+	dc_mode = true;	/*TODO*/
+	under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0;
+	under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1;
+	low_power = 0; /*TODO*/
+	seq_printf(m, "\n %-35s = %s%s%s%s",
+		   "Power Status",
+		   (dc_mode ? "DC mode" : "AC mode"),
+		   (under_ips ? ", IPS ON" : ""),
+		   (under_lps ? ", LPS ON" : ""),
+		   (low_power ? ", 32k" : ""));
+
+	seq_printf(m,
+		   "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)",
+		   "Power mode cmd(lps/rpwm)",
+		   btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1],
+		   btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3],
+		   btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5],
+		   btcoexist->bt_info.lps_val,
+		   btcoexist->bt_info.rpwm_val);
+}
+
 /************************************************************
  *		IO related function
  ************************************************************/
@@ -726,7 +980,8 @@ static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
 	rtl_write_dword(rtlpriv, reg_addr, data);
 }
 
-void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
+static void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr,
+					 u8 data)
 {
 	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
 	struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -739,22 +994,6 @@ void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
 		rtl_write_byte(rtlpriv, reg_addr, data);
 }
 
-void halbtc_set_macreg(void *btc_context, u32 reg_addr, u32 bit_mask, u32 data)
-{
-	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-	rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
-}
-
-u32 halbtc_get_macreg(void *btc_context, u32 reg_addr, u32 bit_mask)
-{
-	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-	return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
-}
-
 static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
 			     u32 data)
 {
@@ -800,38 +1039,47 @@ static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
 					cmd_len, cmd_buf);
 }
 
+static
 void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val)
 {
 	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
-	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	u8 cmd_buffer1[4] = {0};
 	u8 cmd_buffer2[4] = {0};
-	u8 *addr_to_set = (u8 *)&offset;
-	u8 *value_to_set = (u8 *)&set_val;
-	u8 oper_ver = 0;
-	u8 req_num = 0;
 
-	if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
-		cmd_buffer1[0] |= (oper_ver & 0x0f);	/* Set OperVer */
-		cmd_buffer1[0] |= ((req_num << 4) & 0xf0);	/* Set ReqNum */
-		cmd_buffer1[1] = 0x0d;	/* OpCode: BT_LO_OP_WRITE_REG_VALUE */
-		cmd_buffer1[2] = value_to_set[0];	/* Set WriteRegValue */
-		rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
-						&cmd_buffer1[0]);
-
-		msleep(200);
-		req_num++;
-
-		cmd_buffer2[0] |= (oper_ver & 0x0f);	/* Set OperVer */
-		cmd_buffer2[0] |= ((req_num << 4) & 0xf0);	/* Set ReqNum */
-		cmd_buffer2[1] = 0x0c;	/* OpCode: BT_LO_OP_WRITE_REG_ADDR */
-		cmd_buffer2[3] = addr_to_set[0];	/* Set WriteRegAddr */
-		rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
-						&cmd_buffer2[0]);
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	*((__le16 *)&cmd_buffer1[2]) = cpu_to_le16((u16)set_val);
+	if (!halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_VALUE,
+					 cmd_buffer1, 4, 200))
+		return;
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	cmd_buffer2[2] = reg_type;
+	*((u8 *)&cmd_buffer2[3]) = (u8)offset;
+	halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_ADDR,
+				    cmd_buffer2, 4, 200);
+}
+
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type,
+				   struct seq_file *m)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+
+	switch (disp_type) {
+	case BTC_DBG_DISP_COEX_STATISTICS:
+		halbtc_display_coex_statistics(btcoexist, m);
+		break;
+	case BTC_DBG_DISP_BT_LINK_INFO:
+		halbtc_display_bt_link_info(btcoexist, m);
+		break;
+	case BTC_DBG_DISP_WIFI_STATUS:
+		halbtc_display_wifi_status(btcoexist, m);
+		break;
+	default:
+		break;
 	}
 }
 
-bool halbtc_under_ips(struct btc_coexist *btcoexist)
+static bool halbtc_under_ips(struct btc_coexist *btcoexist)
 {
 	struct rtl_priv *rtlpriv = btcoexist->adapter;
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
@@ -849,12 +1097,95 @@ bool halbtc_under_ips(struct btc_coexist *btcoexist)
 	return false;
 }
 
+static u8 halbtc_get_ant_det_val_from_bt(void *btc_context)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_ANT_DET_VAL,
+				    cmd_buffer, 4, 200);
+
+	/* need wait completion to return correct value */
+
+	return btcoexist->bt_info.bt_ant_det_val;
+}
+
+static u8 halbtc_get_ble_scan_type_from_bt(void *btc_context)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_TYPE,
+				    cmd_buffer, 4, 200);
+
+	/* need wait completion to return correct value */
+
+	return btcoexist->bt_info.bt_ble_scan_type;
+}
+
+static u32 halbtc_get_ble_scan_para_from_bt(void *btc_context, u8 scan_type)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[4] = {0};
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_PARA,
+				    cmd_buffer, 4, 200);
+
+	/* need wait completion to return correct value */
+
+	return btcoexist->bt_info.bt_ble_scan_para;
+}
+
+static bool halbtc_get_bt_afh_map_from_bt(void *btc_context, u8 map_type,
+					  u8 *afh_map)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+	u8 cmd_buffer[2] = {0};
+	bool ret;
+	u32 *afh_map_l = (u32 *)afh_map;
+	u32 *afh_map_m = (u32 *)(afh_map + 4);
+	u16 *afh_map_h = (u16 *)(afh_map + 8);
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_L,
+					  cmd_buffer, 2, 200);
+	if (!ret)
+		goto exit;
+
+	*afh_map_l = btcoexist->bt_info.afh_map_l;
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_M,
+					  cmd_buffer, 2, 200);
+	if (!ret)
+		goto exit;
+
+	*afh_map_m = btcoexist->bt_info.afh_map_m;
+
+	/* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+	ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_H,
+					  cmd_buffer, 2, 200);
+	if (!ret)
+		goto exit;
+
+	*afh_map_h = btcoexist->bt_info.afh_map_h;
+
+exit:
+	return ret;
+}
+
 /*****************************************************************
  *         Extern functions called by other module
  *****************************************************************/
-bool exhalbtc_initlize_variables(void)
+bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv)
 {
-	struct btc_coexist *btcoexist = &gl_bt_coexist;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return false;
 
 	halbtc_dbg_init();
 
@@ -874,24 +1205,76 @@ bool exhalbtc_initlize_variables(void)
 	btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
 
 	btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+	btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
 
 	btcoexist->btc_get = halbtc_get;
 	btcoexist->btc_set = halbtc_set;
 	btcoexist->btc_set_bt_reg = halbtc_set_bt_reg;
 
-
 	btcoexist->bt_info.bt_ctrl_buf_size = false;
 	btcoexist->bt_info.agg_buf_size = 5;
 
 	btcoexist->bt_info.increase_scan_dev_num = false;
+
+	btcoexist->btc_get_bt_coex_supported_feature =
+					halbtc_get_bt_coex_supported_feature;
+	btcoexist->btc_get_bt_coex_supported_version =
+					halbtc_get_bt_coex_supported_version;
+	btcoexist->btc_get_ant_det_val_from_bt = halbtc_get_ant_det_val_from_bt;
+	btcoexist->btc_get_ble_scan_type_from_bt =
+					halbtc_get_ble_scan_type_from_bt;
+	btcoexist->btc_get_ble_scan_para_from_bt =
+					halbtc_get_ble_scan_para_from_bt;
+	btcoexist->btc_get_bt_afh_map_from_bt =
+					halbtc_get_bt_afh_map_from_bt;
+
+	init_completion(&btcoexist->bt_mp_comp);
+
+	return true;
+}
+
+bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
+{
+	struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+	struct wifi_only_haldata *wifionly_haldata;
+
+	if (!wifionly_cfg)
+		return false;
+
+	wifionly_cfg->adapter = rtlpriv;
+
+	switch (rtlpriv->rtlhal.interface) {
+	case INTF_PCI:
+		wifionly_cfg->chip_interface = BTC_INTF_PCI;
+		break;
+	case INTF_USB:
+		wifionly_cfg->chip_interface = BTC_INTF_USB;
+		break;
+	default:
+		wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN;
+		break;
+	}
+
+	wifionly_haldata = &wifionly_cfg->haldata_info;
+
+	wifionly_haldata->customer_id = CUSTOMER_NORMAL;
+	wifionly_haldata->efuse_pg_antnum = rtl_get_hwpg_ant_num(rtlpriv);
+	wifionly_haldata->efuse_pg_antpath =
+					rtl_get_hwpg_single_ant_path(rtlpriv);
+	wifionly_haldata->rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
+	wifionly_haldata->ant_div_cfg = 0;
+
 	return true;
 }
 
 bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
 {
-	struct btc_coexist *btcoexist = &gl_bt_coexist;
 	struct rtl_priv *rtlpriv = adapter;
-	u8 ant_num = 2, chip_type;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+	u8 ant_num = 2, chip_type, single_ant_path = 0;
+
+	if (!btcoexist)
+		return false;
 
 	if (btcoexist->binded)
 		return false;
@@ -922,10 +1305,16 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
 	btcoexist->bt_info.miracast_plus_bt = false;
 
 	chip_type = rtl_get_hwpg_bt_type(rtlpriv);
-	exhalbtc_set_chip_type(chip_type);
+	exhalbtc_set_chip_type(btcoexist, chip_type);
 	ant_num = rtl_get_hwpg_ant_num(rtlpriv);
 	exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
 
+	/* set default antenna position to main  port */
+	btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+
+	single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
+	exhalbtc_set_single_ant_path(btcoexist, single_ant_path);
+
 	if (rtl_get_hwpg_package_type(rtlpriv) == 0)
 		btcoexist->board_info.tfbga_package = false;
 	else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
@@ -940,6 +1329,9 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
 		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
 			 "[BTCoex], Package Type = Non-TFBGA\n");
 
+	btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
+	btcoexist->board_info.ant_div_cfg = 0;
+
 	return true;
 }
 
@@ -996,6 +1388,10 @@ void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only)
 	}
 }
 
+void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg)
+{
+}
+
 void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
 {
 	if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -1122,6 +1518,11 @@ void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
 	halbtc_normal_low_power(btcoexist);
 }
 
+void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+				    u8 is_5g)
+{
+}
+
 void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
 {
 	u8 asso_type;
@@ -1430,30 +1831,25 @@ void exhalbtc_stack_update_profile_info(void)
 {
 }
 
-void exhalbtc_update_min_bt_rssi(s8 bt_rssi)
+void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi)
 {
-	struct btc_coexist *btcoexist = &gl_bt_coexist;
-
 	if (!halbtc_is_bt_coexist_available(btcoexist))
 		return;
 
 	btcoexist->stack_info.min_bt_rssi = bt_rssi;
 }
 
-void exhalbtc_set_hci_version(u16 hci_version)
+void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version)
 {
-	struct btc_coexist *btcoexist = &gl_bt_coexist;
-
 	if (!halbtc_is_bt_coexist_available(btcoexist))
 		return;
 
 	btcoexist->stack_info.hci_version = hci_version;
 }
 
-void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
+				   u16 bt_hci_version, u16 bt_patch_version)
 {
-	struct btc_coexist *btcoexist = &gl_bt_coexist;
-
 	if (!halbtc_is_bt_coexist_available(btcoexist))
 		return;
 
@@ -1461,7 +1857,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
 	btcoexist->bt_info.bt_hci_ver = bt_hci_version;
 }
 
-void exhalbtc_set_chip_type(u8 chip_type)
+void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type)
 {
 	switch (chip_type) {
 	default:
@@ -1469,51 +1865,58 @@ void exhalbtc_set_chip_type(u8 chip_type)
 	case BT_ISSC_3WIRE:
 	case BT_ACCEL:
 	case BT_RTL8756:
-		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+		btcoexist->board_info.bt_chip_type = BTC_CHIP_UNDEF;
 		break;
 	case BT_CSR_BC4:
-		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+		btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
 		break;
 	case BT_CSR_BC8:
-		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+		btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
 		break;
 	case BT_RTL8723A:
-		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+		btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723A;
 		break;
 	case BT_RTL8821A:
-		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+		btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8821;
 		break;
 	case BT_RTL8723B:
-		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+		btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723B;
 		break;
 	}
 }
 
 void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
 	if (BT_COEX_ANT_TYPE_PG == type) {
-		gl_bt_coexist.board_info.pg_ant_num = ant_num;
-		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+		btcoexist->board_info.pg_ant_num = ant_num;
+		btcoexist->board_info.btdm_ant_num = ant_num;
 	} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
-		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+		btcoexist->board_info.btdm_ant_num = ant_num;
 	} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
-		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+		btcoexist->board_info.btdm_ant_num = ant_num;
 		if (rtlpriv->cfg->mod_params->ant_sel == 1)
-			gl_bt_coexist.board_info.btdm_ant_pos =
+			btcoexist->board_info.btdm_ant_pos =
 				BTC_ANTENNA_AT_AUX_PORT;
 		else
-			gl_bt_coexist.board_info.btdm_ant_pos =
+			btcoexist->board_info.btdm_ant_pos =
 				BTC_ANTENNA_AT_MAIN_PORT;
 	}
 }
 
 /* Currently used by 8723b only, S0 or S1 */
-void exhalbtc_set_single_ant_path(u8 single_ant_path)
+void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
+				  u8 single_ant_path)
 {
-	gl_bt_coexist.board_info.single_ant_path = single_ant_path;
+	btcoexist->board_info.single_ant_path = single_ant_path;
 }
 
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+				   struct seq_file *m)
 {
 	if (!halbtc_is_bt_coexist_available(btcoexist))
 		return;
@@ -1522,18 +1925,36 @@ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
 
 	if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
 		if (btcoexist->board_info.btdm_ant_num == 2)
-			ex_btc8821a2ant_display_coex_info(btcoexist);
+			ex_btc8821a2ant_display_coex_info(btcoexist, m);
 		else if (btcoexist->board_info.btdm_ant_num == 1)
-			ex_btc8821a1ant_display_coex_info(btcoexist);
+			ex_btc8821a1ant_display_coex_info(btcoexist, m);
 	} else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
 		if (btcoexist->board_info.btdm_ant_num == 2)
-			ex_btc8723b2ant_display_coex_info(btcoexist);
+			ex_btc8723b2ant_display_coex_info(btcoexist, m);
 		else if (btcoexist->board_info.btdm_ant_num == 1)
-			ex_btc8723b1ant_display_coex_info(btcoexist);
+			ex_btc8723b1ant_display_coex_info(btcoexist, m);
 	} else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
 		if (btcoexist->board_info.btdm_ant_num == 2)
-			ex_btc8192e2ant_display_coex_info(btcoexist);
+			ex_btc8192e2ant_display_coex_info(btcoexist, m);
 	}
 
 	halbtc_normal_low_power(btcoexist);
 }
+
+void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	if (btcoexist->manual_control)
+		return;
+
+	halbtc_leave_low_power(btcoexist);
+
+	halbtc_normal_low_power(btcoexist);
+}
+
+void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+					   u8 is_5g)
+{
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index f9b87c12db09..8ed217656539 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -103,8 +103,6 @@ enum btc_msg_type {
 	BTC_MSG_MAX
 };
 
-extern u32 btc_dbg_type[];
-
 /* following is for BTC_MSG_INTERFACE */
 #define		INTF_INIT				BIT0
 #define		INTF_NOTIFY				BIT2
@@ -152,8 +150,10 @@ struct btc_board_info {
 	u8 btdm_ant_num;	/* ant number for btdm */
 	u8 btdm_ant_pos;
 	u8 single_ant_path; /* current used for 8723b only, 1=>s0,  0=>s1 */
-	bool bt_exist;
 	bool tfbga_package;
+
+	u8 rfe_type;
+	u8 ant_div_cfg;
 };
 
 enum btc_dbg_opcode {
@@ -181,10 +181,17 @@ enum btc_wifi_role {
 	BTC_ROLE_MAX
 };
 
+enum btc_wireless_freq {
+	BTC_FREQ_2_4G = 0x0,
+	BTC_FREQ_5G = 0x1,
+	BTC_FREQ_MAX
+};
+
 enum btc_wifi_bw_mode {
 	BTC_WIFI_BW_LEGACY = 0x0,
 	BTC_WIFI_BW_HT20 = 0x1,
 	BTC_WIFI_BW_HT40 = 0x2,
+	BTC_WIFI_BW_HT80 = 0x3,
 	BTC_WIFI_BW_MAX
 };
 
@@ -275,6 +282,8 @@ enum btc_get_type {
 	BTC_GET_U4_VENDOR,
 	BTC_GET_U4_SUPPORTED_VERSION,
 	BTC_GET_U4_SUPPORTED_FEATURE,
+	BTC_GET_U4_BT_DEVICE_INFO,
+	BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL,
 	BTC_GET_U4_WIFI_IQK_TOTAL,
 	BTC_GET_U4_WIFI_IQK_OK,
 	BTC_GET_U4_WIFI_IQK_FAIL,
@@ -355,6 +364,7 @@ enum btc_dbg_disp_type {
 	BTC_DBG_DISP_BT_LINK_INFO = 0x1,
 	BTC_DBG_DISP_BT_FW_VER = 0x2,
 	BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+	BTC_DBG_DISP_WIFI_STATUS = 0x04,
 	BTC_DBG_DISP_MAX
 };
 
@@ -376,6 +386,14 @@ enum btc_notify_type_scan {
 	BTC_SCAN_MAX
 };
 
+enum btc_notify_type_switchband {
+	BTC_NOT_SWITCH = 0x0,
+	BTC_SWITCH_TO_24G = 0x1,
+	BTC_SWITCH_TO_5G = 0x2,
+	BTC_SWITCH_TO_24G_NOFORSCAN = 0x3,
+	BTC_SWITCH_MAX
+};
+
 enum btc_notify_type_associate {
 	BTC_ASSOCIATE_FINISH = 0x0,
 	BTC_ASSOCIATE_START = 0x1,
@@ -417,49 +435,6 @@ enum btc_notify_type_stack_operation {
 	BTC_STACK_OP_MAX
 };
 
-typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
-
-typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
-
-typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
-
-typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
-				   u32 bit_mask, u8 data1b);
-
-typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
-
-typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data);
-typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
-					  u8 bit_mask, u8 data);
-
-typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
-				   u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
-				  u32 bit_mask);
-
-typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
-				   u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
-				  u32 reg_addr, u32 bit_mask);
-
-typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
-				 u32 cmd_len, u8 *cmd_buffer);
-
-typedef	bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
-
-typedef	bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
-
-typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
-				   u32 value);
-
-typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
-
 struct btc_bt_info {
 	bool bt_disabled;
 	u8 rssi_adjust_for_agc_table_on;
@@ -491,6 +466,17 @@ struct btc_bt_info {
 	u8 lps_val;
 	u8 rpwm_val;
 	u32 ra_mask;
+
+	u32 afh_map_l;
+	u32 afh_map_m;
+	u16 afh_map_h;
+	u32 bt_supported_feature;
+	u32 bt_supported_version;
+	u32 bt_device_info;
+	u32 bt_forb_slot_val;
+	u8 bt_ant_det_val;
+	u8 bt_ble_scan_type;
+	u32 bt_ble_scan_para;
 };
 
 struct btc_stack_info {
@@ -546,6 +532,40 @@ enum btc_antenna_pos {
 	BTC_ANTENNA_AT_AUX_PORT = 0x2,
 };
 
+enum btc_mp_h2c_op_code {
+	BT_OP_GET_BT_VERSION			= 0,
+	BT_OP_WRITE_REG_ADDR			= 12,
+	BT_OP_WRITE_REG_VALUE			= 13,
+	BT_OP_READ_REG				= 17,
+	BT_OP_GET_AFH_MAP_L			= 30,
+	BT_OP_GET_AFH_MAP_M			= 31,
+	BT_OP_GET_AFH_MAP_H			= 32,
+	BT_OP_GET_BT_COEX_SUPPORTED_FEATURE	= 42,
+	BT_OP_GET_BT_COEX_SUPPORTED_VERSION	= 43,
+	BT_OP_GET_BT_ANT_DET_VAL		= 44,
+	BT_OP_GET_BT_BLE_SCAN_PARA		= 45,
+	BT_OP_GET_BT_BLE_SCAN_TYPE		= 46,
+	BT_OP_GET_BT_DEVICE_INFO		= 48,
+	BT_OP_GET_BT_FORBIDDEN_SLOT_VAL		= 49,
+	BT_OP_MAX
+};
+
+enum btc_mp_h2c_req_num {
+	/* 4 bits only */
+	BT_SEQ_DONT_CARE			= 0,
+	BT_SEQ_GET_BT_VERSION			= 0xE,
+	BT_SEQ_GET_AFH_MAP_L			= 0x5,
+	BT_SEQ_GET_AFH_MAP_M			= 0x6,
+	BT_SEQ_GET_AFH_MAP_H			= 0x9,
+	BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE	= 0x7,
+	BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION	= 0x8,
+	BT_SEQ_GET_BT_ANT_DET_VAL		= 0x2,
+	BT_SEQ_GET_BT_BLE_SCAN_PARA		= 0x3,
+	BT_SEQ_GET_BT_BLE_SCAN_TYPE		= 0x4,
+	BT_SEQ_GET_BT_DEVICE_INFO		= 0xA,
+	BT_SEQ_GET_BT_FORB_SLOT_VAL		= 0xB,
+};
+
 struct btc_coexist {
 	/* make sure only one adapter can bind the data context  */
 	bool binded;
@@ -563,55 +583,86 @@ struct btc_coexist {
 	 */
 	bool auto_report_1ant;
 	bool auto_report_2ant;
+	bool dbg_mode_1ant;
+	bool dbg_mode_2ant;
 	bool initilized;
 	bool stop_coex_dm;
 	bool manual_control;
 	struct btc_statistics statistics;
 	u8 pwr_mode_val[10];
 
-	/* function pointers - io related */
-	bfp_btc_r1 btc_read_1byte;
-	bfp_btc_w1 btc_write_1byte;
-	bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
-	bfp_btc_r2 btc_read_2byte;
-	bfp_btc_w2 btc_write_2byte;
-	bfp_btc_r4 btc_read_4byte;
-	bfp_btc_w4 btc_write_4byte;
-	bfp_btc_local_reg_w1 btc_write_local_reg_1byte;
-
-	bfp_btc_set_bb_reg btc_set_bb_reg;
-	bfp_btc_get_bb_reg btc_get_bb_reg;
+	struct completion bt_mp_comp;
 
-	bfp_btc_set_rf_reg btc_set_rf_reg;
-	bfp_btc_get_rf_reg btc_get_rf_reg;
-
-	bfp_btc_fill_h2c btc_fill_h2c;
-
-	bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
-
-	bfp_btc_get btc_get;
-	bfp_btc_set btc_set;
-
-	bfp_btc_set_bt_reg btc_set_bt_reg;
+	/* function pointers - io related */
+	u8 (*btc_read_1byte)(void *btc_context, u32 reg_addr);
+	void (*btc_write_1byte)(void *btc_context, u32 reg_addr, u32 data);
+	void (*btc_write_1byte_bitmask)(void *btc_context, u32 reg_addr,
+					u32 bit_mask, u8 data1b);
+	u16 (*btc_read_2byte)(void *btc_context, u32 reg_addr);
+	void (*btc_write_2byte)(void *btc_context, u32 reg_addr, u16 data);
+	u32 (*btc_read_4byte)(void *btc_context, u32 reg_addr);
+	void (*btc_write_4byte)(void *btc_context, u32 reg_addr, u32 data);
+
+	void (*btc_write_local_reg_1byte)(void *btc_context, u32 reg_addr,
+					  u8 data);
+	void (*btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+			       u32 bit_mask, u32 data);
+	u32 (*btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+			      u32 bit_mask);
+	void (*btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+			       u32 bit_mask, u32 data);
+	u32 (*btc_get_rf_reg)(void *btc_context, u8 rf_path,
+			      u32 reg_addr, u32 bit_mask);
+
+	void (*btc_fill_h2c)(void *btc_context, u8 element_id,
+			     u32 cmd_len, u8 *cmd_buffer);
+
+	void (*btc_disp_dbg_msg)(void *btcoexist, u8 disp_type,
+				 struct seq_file *m);
+
+	bool (*btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+	bool (*btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+	void (*btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
+			       u32 value);
+	u32 (*btc_get_bt_coex_supported_feature)(void *btcoexist);
+	u32 (*btc_get_bt_coex_supported_version)(void *btcoexist);
+	u8 (*btc_get_ant_det_val_from_bt)(void *btcoexist);
+	u8 (*btc_get_ble_scan_type_from_bt)(void *btcoexist);
+	u32 (*btc_get_ble_scan_para_from_bt)(void *btcoexist, u8 scan_type);
+	bool (*btc_get_bt_afh_map_from_bt)(void *btcoexist, u8 map_type,
+					   u8 *afh_map);
 };
 
 bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
 
-extern struct btc_coexist gl_bt_coexist;
+#define rtl_btc_coexist(rtlpriv)				\
+	((struct btc_coexist *)((rtlpriv)->btcoexist.btc_context))
+#define rtl_btc_wifi_only(rtlpriv)				\
+	((struct wifi_only_cfg *)((rtlpriv)->btcoexist.wifi_only_context))
 
-bool exhalbtc_initlize_variables(void);
+struct wifi_only_cfg;
+
+bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv);
+bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv);
 bool exhalbtc_bind_bt_coex_withadapter(void *adapter);
+void exhalbtc_power_on_setting(struct btc_coexist *btcoexist);
+void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist);
 void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only);
+void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg);
 void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
 void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
 void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
 void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+				    u8 is_5g);
 void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
 void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
 				 enum rt_media_status media_status);
 void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
 void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
 			     u8 length);
+void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
 void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
 void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
 void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
@@ -619,18 +670,63 @@ void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist);
 void exhalbtc_periodical(struct btc_coexist *btcoexist);
 void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
 			  u8 *data);
+void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq,
+				u32 offset, u32 span, u32 seconds);
 void exhalbtc_stack_update_profile_info(void);
-void exhalbtc_set_hci_version(u16 hci_version);
-void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
-void exhalbtc_update_min_bt_rssi(s8 bt_rssi);
-void exhalbtc_set_bt_exist(bool bt_exist);
-void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version);
+void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
+				   u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi);
+void exhalbtc_set_bt_exist(struct btc_coexist *btcoexist, bool bt_exist);
+void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type);
 void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+				   struct seq_file *m);
+void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+					   u8 is_5g);
 void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
 				  u8 *rssi_wifi, u8 *rssi_bt);
 void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
 void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
-void exhalbtc_set_single_ant_path(u8 single_ant_path);
+void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
+				  u8 single_ant_path);
+
+/* The following are used by wifi_only case */
+enum wifionly_chip_interface {
+	WIFIONLY_INTF_UNKNOWN	= 0,
+	WIFIONLY_INTF_PCI		= 1,
+	WIFIONLY_INTF_USB		= 2,
+	WIFIONLY_INTF_SDIO		= 3,
+	WIFIONLY_INTF_MAX
+};
+
+enum wifionly_customer_id {
+	CUSTOMER_NORMAL			= 0,
+	CUSTOMER_HP_1			= 1,
+};
+
+struct wifi_only_haldata {
+	u16		customer_id;
+	u8		efuse_pg_antnum;
+	u8		efuse_pg_antpath;
+	u8		rfe_type;
+	u8		ant_div_cfg;
+};
+
+struct wifi_only_cfg {
+	void				*adapter;
+	struct wifi_only_haldata	haldata_info;
+	enum wifionly_chip_interface	chip_interface;
+};
+
+static inline
+void halwifionly_phy_set_bb_reg(struct wifi_only_cfg *wifi_conly_cfg,
+				u32 regaddr, u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = (struct rtl_priv *)wifi_conly_cfg->adapter;
+
+	rtl_set_bbreg(rtlpriv->hw, regaddr, bitmask, data);
+}
 
 #endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index 7d296a401b6f..cce4a37ea408 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -31,11 +31,16 @@
 
 static struct rtl_btc_ops rtl_btc_operation = {
 	.btc_init_variables = rtl_btc_init_variables,
+	.btc_init_variables_wifi_only = rtl_btc_init_variables_wifi_only,
+	.btc_deinit_variables = rtl_btc_deinit_variables,
 	.btc_init_hal_vars = rtl_btc_init_hal_vars,
+	.btc_power_on_setting = rtl_btc_power_on_setting,
 	.btc_init_hw_config = rtl_btc_init_hw_config,
+	.btc_init_hw_config_wifi_only = rtl_btc_init_hw_config_wifi_only,
 	.btc_ips_notify = rtl_btc_ips_notify,
 	.btc_lps_notify = rtl_btc_lps_notify,
 	.btc_scan_notify = rtl_btc_scan_notify,
+	.btc_scan_notify_wifi_only = rtl_btc_scan_notify_wifi_only,
 	.btc_connect_notify = rtl_btc_connect_notify,
 	.btc_mediastatus_notify = rtl_btc_mediastatus_notify,
 	.btc_periodical = rtl_btc_periodical,
@@ -46,63 +51,143 @@ static struct rtl_btc_ops rtl_btc_operation = {
 	.btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
 	.btc_is_bt_disabled = rtl_btc_is_bt_disabled,
 	.btc_special_packet_notify = rtl_btc_special_packet_notify,
+	.btc_switch_band_notify = rtl_btc_switch_band_notify,
+	.btc_switch_band_notify_wifi_only = rtl_btc_switch_band_notify_wifionly,
 	.btc_record_pwr_mode = rtl_btc_record_pwr_mode,
 	.btc_get_lps_val = rtl_btc_get_lps_val,
 	.btc_get_rpwm_val = rtl_btc_get_rpwm_val,
 	.btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps,
 	.btc_is_bt_lps_on = rtl_btc_is_bt_lps_on,
 	.btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg,
+	.btc_display_bt_coex_info = rtl_btc_display_bt_coex_info,
 };
 
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m)
+{
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist) {
+		seq_puts(m, "btc_coexist context is NULL!\n");
+		return;
+	}
+
+	exhalbtc_display_bt_coex_info(btcoexist, m);
+}
+
 void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
 	u8 safe_len;
 
-	safe_len = sizeof(gl_bt_coexist.pwr_mode_val);
+	if (!btcoexist)
+		return;
+
+	safe_len = sizeof(btcoexist->pwr_mode_val);
 
 	if (safe_len > len)
 		safe_len = len;
 
-	memcpy(gl_bt_coexist.pwr_mode_val, buf, safe_len);
+	memcpy(btcoexist->pwr_mode_val, buf, safe_len);
 }
 
 u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv)
 {
-	return gl_bt_coexist.bt_info.lps_val;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return 0;
+
+	return btcoexist->bt_info.lps_val;
 }
 
 u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv)
 {
-	return gl_bt_coexist.bt_info.rpwm_val;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return 0;
+
+	return btcoexist->bt_info.rpwm_val;
 }
 
 bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv)
 {
-	return gl_bt_coexist.bt_info.bt_ctrl_lps;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return false;
+
+	return btcoexist->bt_info.bt_ctrl_lps;
 }
 
 bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv)
 {
-	return gl_bt_coexist.bt_info.bt_lps_on;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return false;
+
+	return btcoexist->bt_info.bt_lps_on;
 }
 
 void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
 			   u8 *ctrl_agg_size, u8 *agg_size)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist) {
+		*reject_agg = false;
+		*ctrl_agg_size = false;
+		return;
+	}
+
 	if (reject_agg)
-		*reject_agg = gl_bt_coexist.bt_info.reject_agg_pkt;
+		*reject_agg = btcoexist->bt_info.reject_agg_pkt;
 	if (ctrl_agg_size)
-		*ctrl_agg_size = gl_bt_coexist.bt_info.bt_ctrl_agg_buf_size;
+		*ctrl_agg_size = btcoexist->bt_info.bt_ctrl_agg_buf_size;
 	if (agg_size)
-		*agg_size = gl_bt_coexist.bt_info.agg_buf_size;
+		*agg_size = btcoexist->bt_info.agg_buf_size;
+}
+
+static void rtl_btc_alloc_variable(struct rtl_priv *rtlpriv, bool wifi_only)
+{
+	if (wifi_only)
+		rtlpriv->btcoexist.wifi_only_context =
+			kzalloc(sizeof(struct wifi_only_cfg), GFP_KERNEL);
+	else
+		rtlpriv->btcoexist.btc_context =
+			kzalloc(sizeof(struct btc_coexist), GFP_KERNEL);
+}
+
+static void rtl_btc_free_variable(struct rtl_priv *rtlpriv)
+{
+	kfree(rtlpriv->btcoexist.btc_context);
+	rtlpriv->btcoexist.btc_context = NULL;
+
+	kfree(rtlpriv->btcoexist.wifi_only_context);
+	rtlpriv->btcoexist.wifi_only_context = NULL;
 }
 
 void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
 {
-	exhalbtc_initlize_variables();
+	rtl_btc_alloc_variable(rtlpriv, false);
+
+	exhalbtc_initlize_variables(rtlpriv);
 	exhalbtc_bind_bt_coex_withadapter(rtlpriv);
 }
 
+void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv)
+{
+	rtl_btc_alloc_variable(rtlpriv, true);
+
+	exhalbtc_initlize_variables_wifi_only(rtlpriv);
+}
+
+void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv)
+{
+	rtl_btc_free_variable(rtlpriv);
+}
+
 void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
 {
 	/* move ant_num, bt_type and single_ant_path to
@@ -110,67 +195,155 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
 	 */
 }
 
+void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv)
+{
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_power_on_setting(btcoexist);
+}
+
 void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
 	u8 bt_exist;
 
 	bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
 		"%s, bt_exist is %d\n", __func__, bt_exist);
 
-	exhalbtc_init_hw_config(&gl_bt_coexist, !bt_exist);
-	exhalbtc_init_coex_dm(&gl_bt_coexist);
+	if (!btcoexist)
+		return;
+
+	exhalbtc_init_hw_config(btcoexist, !bt_exist);
+	exhalbtc_init_coex_dm(btcoexist);
+}
+
+void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv)
+{
+	struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+
+	if (!wifionly_cfg)
+		return;
+
+	exhalbtc_init_hw_config_wifi_only(wifionly_cfg);
 }
 
 void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
 {
-	exhalbtc_ips_notify(&gl_bt_coexist, type);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_ips_notify(btcoexist, type);
+
+	if (type == ERFON) {
+		/* In some situation, it doesn't scan after leaving IPS, and
+		 * this will cause btcoex in wrong state.
+		 */
+		exhalbtc_scan_notify(btcoexist, 1);
+		exhalbtc_scan_notify(btcoexist, 0);
+	}
 }
 
 void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type)
 {
-	exhalbtc_lps_notify(&gl_bt_coexist, type);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_lps_notify(btcoexist, type);
 }
 
 void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
 {
-	exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_scan_notify(btcoexist, scantype);
+}
+
+void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+	u8 is_5g = (rtlhal->current_bandtype == BAND_ON_5G);
+
+	if (!wifionly_cfg)
+		return;
+
+	exhalbtc_scan_notify_wifi_only(wifionly_cfg, is_5g);
 }
 
 void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
 {
-	exhalbtc_connect_notify(&gl_bt_coexist, action);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_connect_notify(btcoexist, action);
 }
 
 void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
 				enum rt_media_status mstatus)
 {
-	exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_mediastatus_notify(btcoexist, mstatus);
 }
 
 void rtl_btc_periodical(struct rtl_priv *rtlpriv)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
 	/*rtl_bt_dm_monitor();*/
-	exhalbtc_periodical(&gl_bt_coexist);
+	exhalbtc_periodical(btcoexist);
 }
 
-void rtl_btc_halt_notify(void)
+void rtl_btc_halt_notify(struct rtl_priv *rtlpriv)
 {
-	struct btc_coexist *btcoexist = &gl_bt_coexist;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
 
 	exhalbtc_halt_notify(btcoexist);
 }
 
 void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
 {
-	exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	exhalbtc_bt_info_notify(btcoexist, tmp_buf, length);
 }
 
 void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
 	u8 extid, seq, len;
 	u16 bt_real_fw_ver;
 	u8 bt_fw_ver;
+	u8 *data;
+
+	if (!btcoexist)
+		return;
 
 	if ((length < 4) || (!tmp_buf))
 		return;
@@ -182,20 +355,70 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
 
 	len = tmp_buf[1] >> 4;
 	seq = tmp_buf[2] >> 4;
+	data = &tmp_buf[3];
 
 	/* BT Firmware version response */
-	if (seq == 0x0E) {
+	switch (seq) {
+	case BT_SEQ_GET_BT_VERSION:
 		bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8);
 		bt_fw_ver = tmp_buf[5];
 
-		gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver;
-		gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver;
+		btcoexist->bt_info.bt_real_fw_ver = bt_real_fw_ver;
+		btcoexist->bt_info.bt_fw_ver = bt_fw_ver;
+		break;
+	case BT_SEQ_GET_AFH_MAP_L:
+		btcoexist->bt_info.afh_map_l = le32_to_cpu(*(__le32 *)data);
+		break;
+	case BT_SEQ_GET_AFH_MAP_M:
+		btcoexist->bt_info.afh_map_m = le32_to_cpu(*(__le32 *)data);
+		break;
+	case BT_SEQ_GET_AFH_MAP_H:
+		btcoexist->bt_info.afh_map_h = le16_to_cpu(*(__le16 *)data);
+		break;
+	case BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE:
+		btcoexist->bt_info.bt_supported_feature = tmp_buf[3] |
+							  (tmp_buf[4] << 8);
+		break;
+	case BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION:
+		btcoexist->bt_info.bt_supported_version = tmp_buf[3] |
+							  (tmp_buf[4] << 8);
+		break;
+	case BT_SEQ_GET_BT_ANT_DET_VAL:
+		btcoexist->bt_info.bt_ant_det_val = tmp_buf[3];
+		break;
+	case BT_SEQ_GET_BT_BLE_SCAN_PARA:
+		btcoexist->bt_info.bt_ble_scan_para = tmp_buf[3] |
+						      (tmp_buf[4] << 8) |
+						      (tmp_buf[5] << 16) |
+						      (tmp_buf[6] << 24);
+		break;
+	case BT_SEQ_GET_BT_BLE_SCAN_TYPE:
+		btcoexist->bt_info.bt_ble_scan_type = tmp_buf[3];
+		break;
+	case BT_SEQ_GET_BT_DEVICE_INFO:
+		btcoexist->bt_info.bt_device_info =
+						le32_to_cpu(*(__le32 *)data);
+		break;
+	case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL:
+		btcoexist->bt_info.bt_forb_slot_val =
+						le32_to_cpu(*(__le32 *)data);
+		break;
 	}
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+		 "btmpinfo complete req_num=%d\n", seq);
+
+	complete(&btcoexist->bt_mp_comp);
 }
 
 bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
 {
-	return gl_bt_coexist.bt_info.limited_dig;
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return false;
+
+	return btcoexist->bt_info.limited_dig;
 }
 
 bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
@@ -227,8 +450,13 @@ bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
 
 bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
 {
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return true;
+
 	/* It seems 'bt_disabled' is never be initialized or set. */
-	if (gl_bt_coexist.bt_info.bt_disabled)
+	if (btcoexist->bt_info.bt_disabled)
 		return true;
 	else
 		return false;
@@ -236,7 +464,50 @@ bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
 
 void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type)
 {
-	return exhalbtc_special_packet_notify(&gl_bt_coexist, pkt_type);
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+	if (!btcoexist)
+		return;
+
+	return exhalbtc_special_packet_notify(btcoexist, pkt_type);
+}
+
+void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
+				bool scanning)
+{
+	struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+	u8 type = BTC_NOT_SWITCH;
+
+	if (!btcoexist)
+		return;
+
+	switch (band_type) {
+	case BAND_ON_2_4G:
+		if (scanning)
+			type = BTC_SWITCH_TO_24G;
+		else
+			type = BTC_SWITCH_TO_24G_NOFORSCAN;
+		break;
+
+	case BAND_ON_5G:
+		type = BTC_SWITCH_TO_5G;
+		break;
+	}
+
+	if (type != BTC_NOT_SWITCH)
+		exhalbtc_switch_band_notify(btcoexist, type);
+}
+
+void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
+					 bool scanning)
+{
+	struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+	u8 is_5g = (band_type == BAND_ON_5G);
+
+	if (!wifionly_cfg)
+		return;
+
+	exhalbtc_switch_band_notify_wifi_only(wifionly_cfg, is_5g);
 }
 
 struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
index ac1253c46f44..8c996055de71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
@@ -28,22 +28,32 @@
 #include "halbt_precomp.h"
 
 void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv);
+void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv);
 void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv);
 void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv);
 void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
 void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type);
 void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype);
 void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
 void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
 				enum rt_media_status mstatus);
 void rtl_btc_periodical(struct rtl_priv *rtlpriv);
-void rtl_btc_halt_notify(void);
+void rtl_btc_halt_notify(struct rtl_priv *rtlpriv);
 void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
 void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length);
 bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
 bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
 bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
 void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
+void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
+				bool scanning);
+void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
+					 bool scanning);
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m);
 void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
 u8   rtl_btc_get_lps_val(struct rtl_priv *rtlpriv);
 u8   rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 3cb88825473e..cfea57efa7f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -345,9 +345,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
 	mutex_lock(&rtlpriv->locks.conf_mutex);
 
 	/* Free beacon resources */
-	if ((vif->type == NL80211_IFTYPE_AP) ||
-	    (vif->type == NL80211_IFTYPE_ADHOC) ||
-	    (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC ||
+	    vif->type == NL80211_IFTYPE_MESH_POINT) {
 		if (mac->beacon_enabled == 1) {
 			mac->beacon_enabled = 0;
 			rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -858,8 +858,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
 	 * here just used for linked scanning, & linked
 	 * and nolink check bssid is set in set network_type
 	 */
-	if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
-	    (mac->link_state >= MAC80211_LINKED)) {
+	if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
+	    mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode != NL80211_IFTYPE_AP &&
 		    mac->opmode != NL80211_IFTYPE_MESH_POINT) {
 			if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
@@ -1044,10 +1044,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 
 	mutex_lock(&rtlpriv->locks.conf_mutex);
-	if ((vif->type == NL80211_IFTYPE_ADHOC) ||
-	    (vif->type == NL80211_IFTYPE_AP) ||
-	    (vif->type == NL80211_IFTYPE_MESH_POINT)) {
-		if ((changed & BSS_CHANGED_BEACON) ||
+	if (vif->type == NL80211_IFTYPE_ADHOC ||
+	    vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_MESH_POINT) {
+		if (changed & BSS_CHANGED_BEACON ||
 		    (changed & BSS_CHANGED_BEACON_ENABLED &&
 		     bss_conf->enable_beacon)) {
 			if (mac->beacon_enabled == 0) {
@@ -1162,6 +1162,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
 			RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
 				 "BSS_CHANGED_ASSOC\n");
 		} else {
+			struct cfg80211_bss *bss = NULL;
+
 			mstatus = RT_MEDIA_DISCONNECT;
 
 			if (mac->link_state == MAC80211_LINKED)
@@ -1169,6 +1171,22 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
 			if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
 				rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
 			mac->link_state = MAC80211_NOLINK;
+
+			bss = cfg80211_get_bss(hw->wiphy, NULL,
+					       (u8 *)mac->bssid, NULL, 0,
+					       IEEE80211_BSS_TYPE_ESS,
+					       IEEE80211_PRIVACY_OFF);
+
+			RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+				 "bssid = %pMF\n", mac->bssid);
+
+			if (bss) {
+				cfg80211_unlink_bss(hw->wiphy, bss);
+				cfg80211_put_bss(hw->wiphy, bss);
+				RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+					 "cfg80211_unlink !!\n");
+			}
+
 			eth_zero_addr(mac->bssid);
 			mac->vendor = PEER_UNKNOWN;
 			mac->mode = 0;
@@ -1435,6 +1453,9 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
 
 	if (rtlpriv->cfg->ops->get_btc_status())
 		rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 1);
+	else if (rtlpriv->btcoexist.btc_ops)
+		rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
+								      1);
 
 	if (rtlpriv->dm.supp_phymode_switch) {
 		if (rtlpriv->cfg->ops->chk_switch_dmdp)
@@ -1490,6 +1511,9 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
 	rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
 	if (rtlpriv->cfg->ops->get_btc_status())
 		rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 0);
+	else if (rtlpriv->btcoexist.btc_ops)
+		rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
+								      0);
 }
 
 static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1513,9 +1537,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		return -ENOSPC;	/*User disabled HW-crypto */
 	}
 	/* To support IBSS, use sw-crypto for GTK */
-	if (((vif->type == NL80211_IFTYPE_ADHOC) ||
-	    (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
-	   !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
 		return -ENOSPC;
 	RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
 		 "%s hardware based encryption for keyidx: %d, mac: %pM\n",
@@ -1588,7 +1612,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 			rtlpriv->cfg->ops->enable_hw_sec(hw);
 		}
 	} else {
-		if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+		if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
 		    rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
 			if (rtlpriv->sec.pairwise_enc_algorithm ==
 			    NO_ENCRYPTION &&
@@ -1775,7 +1799,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
 				break;
 			case PWR_CMD_WRITE:
 				RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-					"rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+					 "%s(): PWR_CMD_WRITE\n", __func__);
 				offset = GET_PWR_CFG_OFFSET(cfg_cmd);
 
 				/*Read the value from system register*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 38fef6dbb44b..d70385be9976 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -23,15 +23,17 @@
  *****************************************************************************/
 
 #include "wifi.h"
+#include "cam.h"
 
 #include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_RTLWIFI_DEBUG
 void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
 		    const char *fmt, ...)
 {
 	if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
-		     (level <= rtlpriv->cfg->mod_params->debug_level))) {
+		     level <= rtlpriv->cfg->mod_params->debug_level)) {
 		struct va_format vaf;
 		va_list args;
 
@@ -51,7 +53,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
 		    const char *fmt, ...)
 {
 	if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
-		     (level <= rtlpriv->cfg->mod_params->debug_level))) {
+		     level <= rtlpriv->cfg->mod_params->debug_level)) {
 		struct va_format vaf;
 		va_list args;
 
@@ -81,4 +83,481 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
 }
 EXPORT_SYMBOL_GPL(_rtl_dbg_print_data);
 
+struct rtl_debugfs_priv {
+	struct rtl_priv *rtlpriv;
+	int (*cb_read)(struct seq_file *m, void *v);
+	ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+			    size_t count, loff_t *loff);
+	u32 cb_data;
+};
+
+static struct dentry *debugfs_topdir;
+
+static int rtl_debug_get_common(struct seq_file *m, void *v)
+{
+	struct rtl_debugfs_priv *debugfs_priv = m->private;
+
+	return debugfs_priv->cb_read(m, v);
+}
+
+static int dl_debug_open_common(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_debug_get_common, inode->i_private);
+}
+
+static const struct file_operations file_ops_common = {
+	.open = dl_debug_open_common,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
+{
+	struct rtl_debugfs_priv *debugfs_priv = m->private;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	u32 page = debugfs_priv->cb_data;
+	int i, n;
+	int max = 0xff;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr)			\
+static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = {	\
+	.cb_read = rtl_debug_get_mac_page,			\
+	.cb_data = addr,					\
+}
+
+RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
+RTL_DEBUG_IMPL_MAC_SERIES(1, 0x0100);
+RTL_DEBUG_IMPL_MAC_SERIES(2, 0x0200);
+RTL_DEBUG_IMPL_MAC_SERIES(3, 0x0300);
+RTL_DEBUG_IMPL_MAC_SERIES(4, 0x0400);
+RTL_DEBUG_IMPL_MAC_SERIES(5, 0x0500);
+RTL_DEBUG_IMPL_MAC_SERIES(6, 0x0600);
+RTL_DEBUG_IMPL_MAC_SERIES(7, 0x0700);
+RTL_DEBUG_IMPL_MAC_SERIES(10, 0x1000);
+RTL_DEBUG_IMPL_MAC_SERIES(11, 0x1100);
+RTL_DEBUG_IMPL_MAC_SERIES(12, 0x1200);
+RTL_DEBUG_IMPL_MAC_SERIES(13, 0x1300);
+RTL_DEBUG_IMPL_MAC_SERIES(14, 0x1400);
+RTL_DEBUG_IMPL_MAC_SERIES(15, 0x1500);
+RTL_DEBUG_IMPL_MAC_SERIES(16, 0x1600);
+RTL_DEBUG_IMPL_MAC_SERIES(17, 0x1700);
+
+static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
+{
+	struct rtl_debugfs_priv *debugfs_priv = m->private;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	struct ieee80211_hw *hw = rtlpriv->hw;
+	u32 page = debugfs_priv->cb_data;
+	int i, n;
+	int max = 0xff;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+#define RTL_DEBUG_IMPL_BB_SERIES(page, addr)			\
+static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = {	\
+	.cb_read = rtl_debug_get_bb_page,			\
+	.cb_data = addr,					\
+}
+
+RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
+RTL_DEBUG_IMPL_BB_SERIES(9, 0x0900);
+RTL_DEBUG_IMPL_BB_SERIES(a, 0x0a00);
+RTL_DEBUG_IMPL_BB_SERIES(b, 0x0b00);
+RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00);
+RTL_DEBUG_IMPL_BB_SERIES(d, 0x0d00);
+RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00);
+RTL_DEBUG_IMPL_BB_SERIES(f, 0x0f00);
+RTL_DEBUG_IMPL_BB_SERIES(18, 0x1800);
+RTL_DEBUG_IMPL_BB_SERIES(19, 0x1900);
+RTL_DEBUG_IMPL_BB_SERIES(1a, 0x1a00);
+RTL_DEBUG_IMPL_BB_SERIES(1b, 0x1b00);
+RTL_DEBUG_IMPL_BB_SERIES(1c, 0x1c00);
+RTL_DEBUG_IMPL_BB_SERIES(1d, 0x1d00);
+RTL_DEBUG_IMPL_BB_SERIES(1e, 0x1e00);
+RTL_DEBUG_IMPL_BB_SERIES(1f, 0x1f00);
+
+static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
+{
+	struct rtl_debugfs_priv *debugfs_priv = m->private;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	struct ieee80211_hw *hw = rtlpriv->hw;
+	enum radio_path rfpath = debugfs_priv->cb_data;
+	int i, n;
+	int max = 0x40;
+
+	if (IS_HARDWARE_TYPE_8822B(rtlpriv))
+		max = 0xff;
+
+	seq_printf(m, "\nPATH(%d)", rfpath);
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n);
+		for (i = 0; i < 4 && n <= max; n += 1, i++)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_rfreg(hw, rfpath, n, 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+#define RTL_DEBUG_IMPL_RF_SERIES(page, addr)			\
+static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = {	\
+	.cb_read = rtl_debug_get_reg_rf,			\
+	.cb_data = addr,					\
+}
+
+RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
+RTL_DEBUG_IMPL_RF_SERIES(b, RF90_PATH_B);
+
+static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
+{
+	struct rtl_debugfs_priv *debugfs_priv = m->private;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	int start = debugfs_priv->cb_data;
+	u32 target_cmd = 0;
+	u32 target_val = 0;
+	u8 entry_i = 0;
+	u32 ulstatus;
+	int i = 100, j = 0;
+	int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11);
+
+	/* This dump the current register page */
+	seq_printf(m,
+		   "\n#################### SECURITY CAM (%d-%d) ##################\n",
+		   start, end - 1);
+
+	for (j = start; j < end; j++) {
+		seq_printf(m, "\nD:  %2x > ", j);
+		for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+			/* polling bit, and No Write enable, and address  */
+			target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+			target_cmd = target_cmd | BIT(31);
+
+			/* Check polling bit is clear */
+			while ((i--) >= 0) {
+				ulstatus =
+				    rtl_read_dword(rtlpriv,
+						   rtlpriv->cfg->maps[RWCAM]);
+				if (ulstatus & BIT(31))
+					continue;
+				else
+					break;
+			}
+
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_cmd);
+			target_val = rtl_read_dword(rtlpriv,
+						    rtlpriv->cfg->maps[RCAMO]);
+			seq_printf(m, "%8.8x ", target_val);
+		}
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr)			\
+static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = {	\
+	.cb_read = rtl_debug_get_cam_register,			\
+	.cb_data = addr,					\
+}
+
+RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
+RTL_DEBUG_IMPL_CAM_SERIES(2, 11);
+RTL_DEBUG_IMPL_CAM_SERIES(3, 22);
+
+static int rtl_debug_get_btcoex(struct seq_file *m, void *v)
+{
+	struct rtl_debugfs_priv *debugfs_priv = m->private;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+
+	if (rtlpriv->cfg->ops->get_btc_status())
+		rtlpriv->btcoexist.btc_ops->btc_display_bt_coex_info(rtlpriv,
+								     m);
+
+	seq_puts(m, "\n");
+
+	return 0;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_btcoex = {
+	.cb_read = rtl_debug_get_btcoex,
+	.cb_data = 0,
+};
+
+static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+					 const char __user *buffer,
+					 size_t count, loff_t *loff)
+{
+	struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	char tmp[32 + 1];
+	int tmp_len;
+	u32 addr, val, len;
+	int num;
+
+	if (count < 3)
+		return -EFAULT;
+
+	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+		return count;
+
+	tmp[tmp_len] = '\0';
+
+	/* write BB/MAC register */
+	num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+	if (num !=  3)
+		return count;
+
+	switch (len) {
+	case 1:
+		rtl_write_byte(rtlpriv, addr, (u8)val);
+		break;
+	case 2:
+		rtl_write_word(rtlpriv, addr, (u16)val);
+		break;
+	case 4:
+		rtl_write_dword(rtlpriv, addr, val);
+		break;
+	default:
+		/*printk("error write length=%d", len);*/
+		break;
+	}
+
+	return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_reg = {
+	.cb_write = rtl_debugfs_set_write_reg,
+};
+
+static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
+					 const char __user *buffer,
+					 size_t count, loff_t *loff)
+{
+	struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	struct ieee80211_hw *hw = rtlpriv->hw;
+	char tmp[32 + 1];
+	int tmp_len;
+	u8 h2c_len, h2c_data_packed[8];
+	int h2c_data[8];	/* idx 0: cmd */
+	int i;
+
+	if (count < 3)
+		return -EFAULT;
+
+	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+		return count;
+
+	tmp[tmp_len] = '\0';
+
+	h2c_len = sscanf(tmp, "%X %X %X %X %X %X %X %X",
+			 &h2c_data[0], &h2c_data[1],
+			 &h2c_data[2], &h2c_data[3],
+			 &h2c_data[4], &h2c_data[5],
+			 &h2c_data[6], &h2c_data[7]);
+
+	if (h2c_len <= 0)
+		return count;
+
+	for (i = 0; i < h2c_len; i++)
+		h2c_data_packed[i] = (u8)h2c_data[i];
+
+	rtlpriv->cfg->ops->fill_h2c_cmd(hw, h2c_data_packed[0],
+					h2c_len - 1,
+					&h2c_data_packed[1]);
+
+	return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_h2c = {
+	.cb_write = rtl_debugfs_set_write_h2c,
+};
+
+static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+					   const char __user *buffer,
+					    size_t count, loff_t *loff)
+{
+	struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+	struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+	struct ieee80211_hw *hw = rtlpriv->hw;
+	char tmp[32 + 1];
+	int tmp_len;
+	int num;
+	int path;
+	u32 addr, bitmask, data;
+
+	if (count < 3)
+		return -EFAULT;
+
+	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+		return count;
+
+	tmp[tmp_len] = '\0';
+
+	num = sscanf(tmp, "%X %X %X %X",
+		     &path, &addr, &bitmask, &data);
+
+	if (num != 4) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+			 "Format is <path> <addr> <mask> <data>\n");
+		return count;
+	}
+
+	rtl_set_rfreg(hw, path, addr, bitmask, data);
+
+	return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_rfreg = {
+	.cb_write = rtl_debugfs_set_write_rfreg,
+};
+
+static int rtl_debugfs_close(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static ssize_t rtl_debugfs_common_write(struct file *filp,
+					const char __user *buffer,
+					size_t count, loff_t *loff)
+{
+	struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+
+	return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static const struct file_operations file_ops_common_write = {
+	.owner = THIS_MODULE,
+	.write = rtl_debugfs_common_write,
+	.open = simple_open,
+	.release = rtl_debugfs_close,
+};
+
+#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname)			   \
+	do {								   \
+		rtl_debug_priv_ ##name.rtlpriv = rtlpriv;		   \
+		if (!debugfs_create_file(#name, mode,			   \
+					 parent, &rtl_debug_priv_ ##name,  \
+					 &file_ops_ ##fopname))		   \
+			pr_err("Unable to initialize debugfs:%s/%s\n",	   \
+			       rtlpriv->dbg.debugfs_name,		   \
+			       #name);					   \
+	} while (0)
+
+#define RTL_DEBUGFS_ADD(name)						   \
+		RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0444, common)
+#define RTL_DEBUGFS_ADD_W(name)						   \
+		RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0222, common_write)
+
+void rtl_debug_add_one(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct dentry *parent;
+
+	snprintf(rtlpriv->dbg.debugfs_name, 18, "%pMF", rtlefuse->dev_addr);
+
+	rtlpriv->dbg.debugfs_dir =
+		debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
+	if (!rtlpriv->dbg.debugfs_dir) {
+		pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
+		       rtlpriv->dbg.debugfs_name);
+		return;
+	}
+
+	parent = rtlpriv->dbg.debugfs_dir;
+
+	RTL_DEBUGFS_ADD(mac_0);
+	RTL_DEBUGFS_ADD(mac_1);
+	RTL_DEBUGFS_ADD(mac_2);
+	RTL_DEBUGFS_ADD(mac_3);
+	RTL_DEBUGFS_ADD(mac_4);
+	RTL_DEBUGFS_ADD(mac_5);
+	RTL_DEBUGFS_ADD(mac_6);
+	RTL_DEBUGFS_ADD(mac_7);
+	RTL_DEBUGFS_ADD(bb_8);
+	RTL_DEBUGFS_ADD(bb_9);
+	RTL_DEBUGFS_ADD(bb_a);
+	RTL_DEBUGFS_ADD(bb_b);
+	RTL_DEBUGFS_ADD(bb_c);
+	RTL_DEBUGFS_ADD(bb_d);
+	RTL_DEBUGFS_ADD(bb_e);
+	RTL_DEBUGFS_ADD(bb_f);
+	RTL_DEBUGFS_ADD(mac_10);
+	RTL_DEBUGFS_ADD(mac_11);
+	RTL_DEBUGFS_ADD(mac_12);
+	RTL_DEBUGFS_ADD(mac_13);
+	RTL_DEBUGFS_ADD(mac_14);
+	RTL_DEBUGFS_ADD(mac_15);
+	RTL_DEBUGFS_ADD(mac_16);
+	RTL_DEBUGFS_ADD(mac_17);
+	RTL_DEBUGFS_ADD(bb_18);
+	RTL_DEBUGFS_ADD(bb_19);
+	RTL_DEBUGFS_ADD(bb_1a);
+	RTL_DEBUGFS_ADD(bb_1b);
+	RTL_DEBUGFS_ADD(bb_1c);
+	RTL_DEBUGFS_ADD(bb_1d);
+	RTL_DEBUGFS_ADD(bb_1e);
+	RTL_DEBUGFS_ADD(bb_1f);
+	RTL_DEBUGFS_ADD(rf_a);
+	RTL_DEBUGFS_ADD(rf_b);
+
+	RTL_DEBUGFS_ADD(cam_1);
+	RTL_DEBUGFS_ADD(cam_2);
+	RTL_DEBUGFS_ADD(cam_3);
+
+	RTL_DEBUGFS_ADD(btcoex);
+
+	RTL_DEBUGFS_ADD_W(write_reg);
+	RTL_DEBUGFS_ADD_W(write_h2c);
+	RTL_DEBUGFS_ADD_W(write_rfreg);
+}
+EXPORT_SYMBOL_GPL(rtl_debug_add_one);
+
+void rtl_debug_remove_one(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	debugfs_remove_recursive(rtlpriv->dbg.debugfs_dir);
+	rtlpriv->dbg.debugfs_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(rtl_debug_remove_one);
+
+void rtl_debugfs_add_topdir(void)
+{
+	debugfs_topdir = debugfs_create_dir("rtlwifi", NULL);
+}
+
+void rtl_debugfs_remove_topdir(void)
+{
+	debugfs_remove_recursive(debugfs_topdir);
+}
+
 #endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 947718001457..ad6834af618b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -219,4 +219,16 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
 }
 
 #endif
+
+#ifdef CONFIG_RTLWIFI_DEBUG
+void rtl_debug_add_one(struct ieee80211_hw *hw);
+void rtl_debug_remove_one(struct ieee80211_hw *hw);
+void rtl_debugfs_add_topdir(void);
+void rtl_debugfs_remove_topdir(void);
+#else
+#define rtl_debug_add_one(hw)
+#define rtl_debug_remove_one(hw)
+#define rtl_debugfs_add_topdir()
+#define rtl_debugfs_remove_topdir()
+#endif
 #endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index ef9acd466cca..35b50be633f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -257,11 +257,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
 			    sizeof(u8), GFP_ATOMIC);
 	if (!efuse_tbl)
 		return;
-	efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+	efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
 	if (!efuse_word)
 		goto out;
 	for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
-		efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16),
+		efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16),
 					GFP_ATOMIC);
 		if (!efuse_word[i])
 			goto done;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index c2575b0b9440..01ccf8884831 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -59,6 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	__le16 fc = rtl_get_fc(skb);
 	u8 queue_index = skb_get_queue_mapping(skb);
+	struct ieee80211_hdr *hdr;
 
 	if (unlikely(ieee80211_is_beacon(fc)))
 		return BEACON_QUEUE;
@@ -67,6 +68,13 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
 		if (ieee80211_is_nullfunc(fc))
 			return HIGH_QUEUE;
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
+		hdr = rtl_get_hdr(skb);
+
+		if (is_multicast_ether_addr(hdr->addr1) ||
+		    is_broadcast_ether_addr(hdr->addr1))
+			return HIGH_QUEUE;
+	}
 
 	return ac_to_hwq[queue_index];
 }
@@ -557,13 +565,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
 		else
 			entry = (u8 *)(&ring->desc[ring->idx]);
 
-		if (rtlpriv->cfg->ops->get_available_desc &&
-		    rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
-			RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
-				 "no available desc!\n");
-			return;
-		}
-
 		if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
 			return;
 		ring->idx = (ring->idx + 1) % ring->entries;
@@ -747,7 +748,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 	u8 tmp_one;
 	bool unicast = false;
 	u8 hw_queue = 0;
-	unsigned int rx_remained_cnt;
+	unsigned int rx_remained_cnt = 0;
 	struct rtl_stats stats = {
 		.signal = 0,
 		.rate = 0,
@@ -768,7 +769,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 		struct sk_buff *new_skb;
 
 		if (rtlpriv->use_new_trx_flow) {
-			rx_remained_cnt =
+			if (rx_remained_cnt == 0)
+				rx_remained_cnt =
 				rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
 								      hw_queue);
 			if (rx_remained_cnt == 0)
@@ -924,10 +926,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	unsigned long flags;
-	u32 inta = 0;
-	u32 intb = 0;
-	u32 intc = 0;
-	u32 intd = 0;
+	struct rtl_int intvec = {0};
+
 	irqreturn_t ret = IRQ_HANDLED;
 
 	if (rtlpci->irq_enabled == 0)
@@ -937,47 +937,47 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	rtlpriv->cfg->ops->disable_interrupt(hw);
 
 	/*read ISR: 4/8bytes */
-	rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
+	rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec);
 
 	/*Shared IRQ or HW disappeared */
-	if (!inta || inta == 0xffff)
+	if (!intvec.inta || intvec.inta == 0xffff)
 		goto done;
 
 	/*<1> beacon related */
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "beacon ok interrupt!\n");
 
-	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
+	if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "beacon err interrupt!\n");
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "prepare beacon for interrupt!\n");
 		tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
 	}
 
 	/*<2> Tx related */
-	if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+	if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "Manage ok interrupt!\n");
 		_rtl_pci_tx_isr(hw, MGNT_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "HIGH_QUEUE ok interrupt!\n");
 		_rtl_pci_tx_isr(hw, HIGH_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -985,7 +985,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 		_rtl_pci_tx_isr(hw, BK_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -993,7 +993,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 		_rtl_pci_tx_isr(hw, BE_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1001,7 +1001,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 		_rtl_pci_tx_isr(hw, VI_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1010,7 +1010,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	}
 
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
-		if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
+		if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
 			rtlpriv->link_info.num_tx_inperiod++;
 
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	}
 
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
-		if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+		if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
 			rtlpriv->link_info.num_tx_inperiod++;
 
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1030,25 +1030,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	}
 
 	/*<3> Rx related */
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
 		_rtl_pci_rx_interrupt(hw);
 	}
 
-	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+	if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
 			 "rx descriptor unavailable!\n");
 		_rtl_pci_rx_interrupt(hw);
 	}
 
-	if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+	if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
 		_rtl_pci_rx_interrupt(hw);
 	}
 
 	/*<4> fw related*/
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
-		if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+		if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 				 "firmware interrupt!\n");
 			queue_delayed_work(rtlpriv->works.rtl_wq,
@@ -1064,7 +1064,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	 */
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
 	    rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
-		if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+		if (unlikely(intvec.inta &
+		    rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 				 "hsisr interrupt!\n");
 			_rtl_pci_hs_interrupt(hw);
@@ -1250,7 +1251,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
 
 		rtlpci->tx_ring[prio].cur_tx_rp = 0;
 		rtlpci->tx_ring[prio].cur_tx_wp = 0;
-		rtlpci->tx_ring[prio].avl_desc = entries;
 	}
 
 	/* alloc dma for this ring */
@@ -1555,7 +1555,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
 				dev_kfree_skb_irq(skb);
 				ring->idx = (ring->idx + 1) % ring->entries;
 			}
+
+			if (rtlpriv->use_new_trx_flow) {
+				rtlpci->tx_ring[i].cur_tx_rp = 0;
+				rtlpci->tx_ring[i].cur_tx_wp = 0;
+			}
+
 			ring->idx = 0;
+			ring->entries = rtlpci->txringcount[i];
 		}
 	}
 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
@@ -1787,6 +1794,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+	struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
 
 	int err;
 
@@ -1796,9 +1804,12 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
 	if (rtlpriv->cfg->ops->get_btc_status &&
 	    rtlpriv->cfg->ops->get_btc_status()) {
 		rtlpriv->btcoexist.btc_info.ap_num = 36;
-		rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
-		rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
+		btc_ops->btc_init_variables(rtlpriv);
+		btc_ops->btc_init_hal_vars(rtlpriv);
+	} else if (btc_ops) {
+		btc_ops->btc_init_variables_wifi_only(rtlpriv);
 	}
+
 	err = rtlpriv->cfg->ops->hw_init(hw);
 	if (err) {
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
@@ -1834,7 +1845,10 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
 	u8 rf_timeout = 0;
 
 	if (rtlpriv->cfg->ops->get_btc_status())
-		rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+		rtlpriv->btcoexist.btc_ops->btc_halt_notify(rtlpriv);
+
+	if (rtlpriv->btcoexist.btc_ops)
+		rtlpriv->btcoexist.btc_ops->btc_deinit_variables(rtlpriv);
 
 	/*should be before disable interrupt&adapter
 	 *and will do it immediately.
@@ -2302,6 +2316,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
 	}
 	rtlpriv->mac80211.mac80211_registered = 1;
 
+	/* add for debug */
+	rtl_debug_add_one(hw);
+
 	/*init rfkill */
 	rtl_init_rfkill(hw);	/* Init PCI sw */
 
@@ -2350,6 +2367,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
 	wait_for_completion(&rtlpriv->firmware_loading_complete);
 	clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
 
+	/* remove form debug */
+	rtl_debug_remove_one(hw);
+
 	/*ieee80211_unregister_hw will call ops_stop */
 	if (rtlmac->mac80211_registered == 1) {
 		ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index e7d070e8da2d..3fb56c845a61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -173,7 +173,6 @@ struct rtl8192_tx_ring {
 	/*add for new trx flow*/
 	struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
 	dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
-	u16 avl_desc; /* available_desc_to_write */
 	u16 cur_tx_wp; /* current_tx_write_point */
 	u16 cur_tx_rp; /* current_tx_read_point */
 };
@@ -320,10 +319,10 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
 	writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
 }
 
-static inline u16 calc_fifo_space(u16 rp, u16 wp)
+static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
 {
 	if (rp <= wp)
-		return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
+		return size - 1 + rp - wp;
 	return rp - wp - 1;
 }
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 24c87fae5382..71af24e2e051 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -51,6 +51,11 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
 			&rtlmac->retry_long);
 	RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
 
+	rtlpriv->cfg->ops->switch_channel(hw);
+	rtlpriv->cfg->ops->set_channel_access(hw);
+	rtlpriv->cfg->ops->set_bw_mode(hw,
+			cfg80211_get_chandef_type(&hw->conf.chandef));
+
 	/*<3> Enable Interrupt */
 	rtlpriv->cfg->ops->enable_interrupt(hw);
 
@@ -289,7 +294,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
 
 	cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
 
-	spin_lock(&rtlpriv->locks.ips_lock);
+	mutex_lock(&rtlpriv->locks.ips_mutex);
 	if (ppsc->inactiveps) {
 		rtstate = ppsc->rfpwr_state;
 
@@ -306,7 +311,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
 									ppsc->inactive_pwrstate);
 		}
 	}
-	spin_unlock(&rtlpriv->locks.ips_lock);
+	mutex_unlock(&rtlpriv->locks.ips_mutex);
 }
 EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
 
@@ -415,7 +420,6 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	unsigned long flag;
 
 	if (!ppsc->fwctrl_lps)
 		return;
@@ -436,7 +440,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
 	if (mac->link_state != MAC80211_LINKED)
 		return;
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	mutex_lock(&rtlpriv->locks.lps_mutex);
 
 	/* Don't need to check (ppsc->dot11_psmode == EACTIVE), because
 	 * bt_ccoexist may ask to enter lps.
@@ -446,7 +450,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
 		 "Enter 802.11 power save mode...\n");
 	rtl_lps_set_psmode(hw, EAUTOPS);
 
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	mutex_unlock(&rtlpriv->locks.lps_mutex);
 }
 
 /* Interrupt safe routine to leave the leisure power save mode.*/
@@ -455,9 +459,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	unsigned long flag;
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	mutex_lock(&rtlpriv->locks.lps_mutex);
 
 	if (ppsc->fwctrl_lps) {
 		if (ppsc->dot11_psmode != EACTIVE) {
@@ -478,7 +481,7 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
 			rtl_lps_set_psmode(hw, EACTIVE);
 		}
 	}
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	mutex_unlock(&rtlpriv->locks.lps_mutex);
 }
 
 /* For sw LPS*/
@@ -568,7 +571,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	unsigned long flag;
 
 	if (!rtlpriv->psc.swctrl_lps)
 		return;
@@ -581,9 +583,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
 		RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
 	}
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	mutex_lock(&rtlpriv->locks.lps_mutex);
 	rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	mutex_unlock(&rtlpriv->locks.lps_mutex);
 }
 
 void rtl_swlps_rfon_wq_callback(void *data)
@@ -600,7 +602,6 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	unsigned long flag;
 	u8 sleep_intv;
 
 	if (!rtlpriv->psc.sw_ps_enabled)
@@ -624,9 +625,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
 	}
 	spin_unlock(&rtlpriv->locks.rf_ps_lock);
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	mutex_lock(&rtlpriv->locks.lps_mutex);
 	rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	mutex_unlock(&rtlpriv->locks.lps_mutex);
 
 	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
 	    !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 02811eda57cd..d1cb7d405618 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -123,7 +123,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
 			if (sta && (sta->ht_cap.cap &
 				    IEEE80211_HT_CAP_SUP_WIDTH_20_40))
 				rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-			if (sta && (sta->vht_cap.vht_supported))
+			if (sta && sta->vht_cap.vht_supported)
 				rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
 		} else {
 			if (mac->bw_40)
@@ -135,8 +135,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
 		if (sgi_20 || sgi_40 || sgi_80)
 			rate->flags |= IEEE80211_TX_RC_SHORT_GI;
 		if (sta && sta->ht_cap.ht_supported &&
-		    ((wireless_mode == WIRELESS_MODE_N_5G) ||
-		     (wireless_mode == WIRELESS_MODE_N_24G)))
+		    (wireless_mode == WIRELESS_MODE_N_5G ||
+		     wireless_mode == WIRELESS_MODE_N_24G))
 			rate->flags |= IEEE80211_TX_RC_MCS;
 		if (sta && sta->vht_cap.vht_supported &&
 		    (wireless_mode == WIRELESS_MODE_AC_5G ||
@@ -216,8 +216,8 @@ static void rtl_tx_status(void *ppriv,
 
 	if (sta) {
 		/* Check if aggregation has to be enabled for this tid */
-		sta_entry = (struct rtl_sta_info *) sta->drv_priv;
-		if ((sta->ht_cap.ht_supported) &&
+		sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+		if (sta->ht_cap.ht_supported &&
 		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
 			if (ieee80211_is_data_qos(fc)) {
 				u8 tid = rtl_get_tid(skb);
@@ -265,11 +265,9 @@ static void *rtl_rate_alloc_sta(void *ppriv,
 	struct rtl_priv *rtlpriv = ppriv;
 	struct rtl_rate_priv *rate_priv;
 
-	rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
-	if (!rate_priv) {
-		pr_err("Unable to allocate private rc structure\n");
+	rate_priv = kzalloc(sizeof(*rate_priv), gfp);
+	if (!rate_priv)
 		return NULL;
-	}
 
 	rtlpriv->rate_priv = rate_priv;
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index a2eca669873b..63874512598b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -141,6 +141,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
 		return 1;
 
 	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+	rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+	rtlhal->fw_subversion = pfwheader->subversion;
 	pfwdata = rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 	RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index e30a18e64ff5..988d5ac57d02 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1472,17 +1472,16 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 
 }
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index cdf49de1e6ed..214cd2a32018 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -29,8 +29,7 @@
 void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl88ee_hw_init(struct ieee80211_hw *hw);
 void rtl88ee_card_disable(struct ieee80211_hw *hw);
 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 0f4c86a28716..4a81e0ef4b8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1375,19 +1375,13 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
-
-	/*
-	 * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	 * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
-	 */
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 }
 
 void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index b5c8e2fc1ba2..6711ea1a75d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -42,8 +42,7 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl)
 void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92ce_hw_init(struct ieee80211_hw *hw);
 void rtl92ce_card_disable(struct ieee80211_hw *hw);
 void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 0da6c0136857..80123fd97221 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1356,19 +1356,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
-
-	/*
-	 * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	 * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
-	 */
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 }
 
 void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index 9236aa91273d..e6c702e69ecf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -29,8 +29,7 @@
 void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92de_hw_init(struct ieee80211_hw *hw);
 void rtl92de_card_disable(struct ieee80211_hw *hw);
 void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index fe5da637e77a..fd7928fdbd1a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1694,17 +1694,16 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
index cd6d3322f033..3a63bec9b0cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
@@ -29,8 +29,7 @@
 void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92ee_hw_init(struct ieee80211_hw *hw);
 void rtl92ee_card_disable(struct ieee80211_hw *hw);
 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 12255682e890..4f7444331b07 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -498,7 +498,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
 	if (!start_rx)
 		return 0;
 
-	remind_cnt = calc_fifo_space(read_point, write_point);
+	remind_cnt = calc_fifo_space(read_point, write_point,
+				     RTL_PCI_MAX_RX_COUNT);
 
 	if (remind_cnt == 0)
 		return 0;
@@ -548,7 +549,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
 
 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
 {
-	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	u16 point_diff = 0;
 	u16 current_tx_read_point = 0, current_tx_write_point = 0;
@@ -560,9 +560,9 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
 	current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
 
 	point_diff = calc_fifo_space(current_tx_read_point,
-				     current_tx_write_point);
+				     current_tx_write_point,
+				     TX_DESC_NUM_92E);
 
-	rtlpci->tx_ring[q_idx].avl_desc = point_diff;
 	return point_diff;
 }
 
@@ -907,10 +907,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
 		      u8 desc_name, u8 *val)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u16 cur_tx_rp = 0;
-	u16 cur_tx_wp = 0;
-	static bool over_run;
-	u32 tmp = 0;
 	u8 q_idx = *val;
 	bool dma64 = rtlpriv->cfg->mod_params->dma64;
 
@@ -931,38 +927,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
 				return;
 			}
 
+			/* make sure tx desc is available by caller */
 			ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
 
-			if (over_run) {
-				ring->cur_tx_wp = 0;
-				over_run = false;
-			}
-			if (ring->avl_desc > 1) {
-				ring->avl_desc--;
-
-				rtl_write_word(rtlpriv,
-					       get_desc_addr_fr_q_idx(q_idx),
-					       ring->cur_tx_wp);
-			}
-
-			if (ring->avl_desc < (max_tx_desc - 15)) {
-				u16 point_diff = 0;
-
-				tmp =
-				  rtl_read_dword(rtlpriv,
-						 get_desc_addr_fr_q_idx(q_idx));
-				cur_tx_rp = (u16)((tmp >> 16) & 0x0fff);
-				cur_tx_wp = (u16)(tmp & 0x0fff);
-
-				ring->cur_tx_wp = cur_tx_wp;
-				ring->cur_tx_rp = cur_tx_rp;
-				point_diff = ((cur_tx_rp > cur_tx_wp) ?
-					      (cur_tx_rp - cur_tx_wp) :
-					      (TX_DESC_NUM_92E - 1 -
-					       cur_tx_wp + cur_tx_rp));
-
-				ring->avl_desc = point_diff;
-			}
+			rtl_write_word(rtlpriv,
+				       get_desc_addr_fr_q_idx(q_idx),
+				       ring->cur_tx_wp);
 		}
 		break;
 		}
@@ -1044,13 +1014,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
 {
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u16 read_point, write_point, available_desc_num;
+	u16 read_point, write_point;
 	bool ret = false;
 	static u8 stop_report_cnt;
 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
 
 	{
-		u16 point_diff = 0;
 		u16 cur_tx_rp, cur_tx_wp;
 		u32 tmpu32 = 0;
 
@@ -1060,18 +1029,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
 		cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
 		cur_tx_wp = (u16)(tmpu32 & 0x0fff);
 
-		ring->cur_tx_wp = cur_tx_wp;
+		/* don't need to update ring->cur_tx_wp */
 		ring->cur_tx_rp = cur_tx_rp;
-		point_diff = ((cur_tx_rp > cur_tx_wp) ?
-			      (cur_tx_rp - cur_tx_wp) :
-			      (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp));
-
-		ring->avl_desc = point_diff;
 	}
 
 	read_point = ring->cur_tx_rp;
 	write_point = ring->cur_tx_wp;
-	available_desc_num = ring->avl_desc;
 
 	if (write_point > read_point) {
 		if (index < write_point && index >= read_point)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 76bf089cced4..30dea7b9bc17 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1558,17 +1558,17 @@ void rtl92se_card_disable(struct ieee80211_hw *hw)
 	udelay(100);
 }
 
-void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
-			     u32 *p_intb, u32 *p_intc, u32 *p_intd)
+void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, ISR + 4, intvec->intb);
 }
 
 void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index 607056010974..fa836ceefc8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -42,8 +42,7 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
 			u8 variable, u8 *val);
 void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92se_hw_init(struct ieee80211_hw *hw);
 void rtl92se_card_disable(struct ieee80211_hw *hw);
 void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index c3f98d58124c..545115db507e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1340,14 +1340,13 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
-				   u32 *p_inta, u32 *p_intb,
-				   u32 *p_intc, u32 *p_intd)
+				   struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, 0x3a0, intvec->inta);
 }
 
 void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
index 19e467a37c72..c76e453f4f43 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
@@ -34,8 +34,7 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
-				   u32 *p_inta, u32 *p_intb,
-				   u32 *p_intc, u32 *p_intd);
+				   struct rtl_int *int_vec);
 int rtl8723e_hw_init(struct ieee80211_hw *hw);
 void rtl8723e_card_disable(struct ieee80211_hw *hw);
 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 7cd1ffa7d4a7..f9ccd13c79f9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -43,6 +43,7 @@
 #include "../pwrseqcmd.h"
 #include "pwrseq.h"
 #include "../btcoexist/rtl_btc.h"
+#include <linux/kernel.h>
 
 #define LLT_CONFIG	5
 
@@ -1682,18 +1683,17 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd)
+				    struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
-					rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+				      rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
@@ -2127,28 +2127,28 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
 
 	if (rtlhal->oem_id == RT_CID_DEFAULT) {
 		/* Does this one have a Toshiba SMID from group 1? */
-		for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(toshiba_smid1); i++) {
 			if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
 				is_toshiba_smid1 = true;
 				break;
 			}
 		}
 		/* Does this one have a Toshiba SMID from group 2? */
-		for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(toshiba_smid2); i++) {
 			if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
 				is_toshiba_smid2 = true;
 				break;
 			}
 		}
 		/* Does this one have a Samsung SMID? */
-		for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(samsung_smid); i++) {
 			if (rtlefuse->eeprom_smid == samsung_smid[i]) {
 				is_samsung_smid = true;
 				break;
 			}
 		}
 		/* Does this one have a Lenovo SMID? */
-		for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(lenovo_smid); i++) {
 			if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
 				is_lenovo_smid = true;
 				break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
index 2215a792f6bf..ae856a19e81a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
@@ -30,8 +30,7 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd);
+				    struct rtl_int *int_vec);
 int rtl8723be_hw_init(struct ieee80211_hw *hw);
 void rtl8723be_card_disable(struct ieee80211_hw *hw);
 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 9606641519e7..1263b12db5dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -35,6 +35,7 @@
 #include "../rtl8723com/dm_common.h"
 #include "table.h"
 #include "trx.h"
+#include <linux/kernel.h>
 
 static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
 static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -1143,14 +1144,13 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
 			     DESC92C_RATEMCS2, DESC92C_RATEMCS3,
 			     DESC92C_RATEMCS4, DESC92C_RATEMCS5,
 			     DESC92C_RATEMCS6, DESC92C_RATEMCS7};
-	u8 i, size;
+	u8 i;
 	u8 power_index;
 
 	if (!rtlefuse->txpwr_fromeprom)
 		return;
 
-	size = sizeof(cck_rates) / sizeof(u8);
-	for (i = 0; i < size; i++) {
+	for (i = 0; i < ARRAY_SIZE(cck_rates); i++) {
 		power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
 					cck_rates[i],
 					rtl_priv(hw)->phy.current_chan_bw,
@@ -1158,8 +1158,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
 		_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
 						 cck_rates[i]);
 	}
-	size = sizeof(ofdm_rates) / sizeof(u8);
-	for (i = 0; i < size; i++) {
+	for (i = 0; i < ARRAY_SIZE(ofdm_rates); i++) {
 		power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
 					ofdm_rates[i],
 					rtl_priv(hw)->phy.current_chan_bw,
@@ -1167,8 +1166,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
 		_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
 						 ofdm_rates[i]);
 	}
-	size = sizeof(ht_rates_1t) / sizeof(u8);
-	for (i = 0; i < size; i++) {
+	for (i = 0; i < ARRAY_SIZE(ht_rates_1t); i++) {
 		power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
 					ht_rates_1t[i],
 					rtl_priv(hw)->phy.current_chan_bw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
index 381c16b9b3a9..160fee8333ae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
@@ -25,6 +25,7 @@
  *
  *****************************************************************************/
 
+#include <linux/kernel.h>
 #include "table.h"
 
 u32 RTL8723BEPHY_REG_1TARRAY[] = {
@@ -224,8 +225,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = {
 
 };
 
-u32 RTL8723BEPHY_REG_1TARRAYLEN =
-	sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32);
+u32 RTL8723BEPHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8723BEPHY_REG_1TARRAY);
 
 u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
@@ -236,8 +236,7 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
 };
 
-u32 RTL8723BEPHY_REG_ARRAY_PGLEN =
-		sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8723BEPHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8723BEPHY_REG_ARRAY_PG);
 
 u32 RTL8723BE_RADIOA_1TARRAY[] = {
 		0x000, 0x00010000,
@@ -373,8 +372,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = {
 
 };
 
-u32 RTL8723BE_RADIOA_1TARRAYLEN =
-	sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32);
+u32 RTL8723BE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8723BE_RADIOA_1TARRAY);
 
 u32 RTL8723BEMAC_1T_ARRAY[] = {
 		0x02F, 0x00000030,
@@ -483,7 +481,7 @@ u32 RTL8723BEMAC_1T_ARRAY[] = {
 
 };
 
-u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32);
+u32 RTL8723BEMAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8723BEMAC_1T_ARRAY);
 
 u32 RTL8723BEAGCTAB_1TARRAY[] = {
 		0xC78, 0xFD000001,
@@ -620,4 +618,4 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = {
 
 };
 
-u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32);
+u32 RTL8723BEAGCTAB_1TARRAYLEN = ARRAY_SIZE(RTL8723BEAGCTAB_1TARRAY);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index efa7e1262461..521039c5d4ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -196,10 +196,12 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
 	enum version_8723e version = rtlhal->version;
 	int max_page;
 
-	if (!rtlhal->pfirmware)
+	if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
 		return 1;
 
 	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+	rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+	rtlhal->fw_subversion = pfwheader->subversion;
 	pfwdata = rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 43e18c4c1e68..f20e77b4bb65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2483,17 +2483,16 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd)
+				    struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
index 284d259fe557..e2ab783a2ad9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
@@ -30,8 +30,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd);
+				    struct rtl_int *int_vec);
 int rtl8821ae_hw_init(struct ieee80211_hw *hw);
 void rtl8821ae_card_disable(struct ieee80211_hw *hw);
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index 408c4611e5de..f87f9d03b9fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -24,7 +24,7 @@
  * Larry Finger <Larry.Finger@lwfinger.net>
  *
  *****************************************************************************/
-
+#include <linux/kernel.h>
 #include "table.h"
 u32 RTL8812AE_PHY_REG_ARRAY[] = {
 		0x800, 0x8020D010,
@@ -258,8 +258,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
 		0xEB8, 0x00508242,
 };
 
-u32 RTL8812AE_PHY_REG_1TARRAYLEN =
-	sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY);
 
 u32 RTL8821AE_PHY_REG_ARRAY[] = {
 	0x800, 0x0020D090,
@@ -436,8 +435,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
 	0xCB8, 0x00508240,
 };
 
-u32 RTL8821AE_PHY_REG_1TARRAYLEN =
-	sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY);
 
 u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
@@ -488,8 +486,7 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
 	1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
 };
 
-u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
-		sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY_PG);
 
 u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
@@ -509,8 +506,7 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
 	1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
 };
 
-u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
-		sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY_PG);
 
 u32 RTL8812AE_RADIOA_ARRAY[] = {
 		0x000, 0x00010000,
@@ -927,7 +923,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
 		0x018, 0x0001712A,
 };
 
-u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOA_ARRAY);
 
 u32 RTL8812AE_RADIOB_ARRAY[] = {
 		0x056, 0x00051CF2,
@@ -1335,7 +1331,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
 		0x008, 0x00008400,
 };
 
-u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOB_ARRAY);
 
 u32 RTL8821AE_RADIOA_ARRAY[] = {
 		0x018, 0x0001712A,
@@ -1929,7 +1925,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
 
 };
 
-u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8821AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_RADIOA_ARRAY);
 
 u32 RTL8812AE_MAC_REG_ARRAY[] = {
 		0x010, 0x0000000C,
@@ -2041,7 +2037,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
 		0x718, 0x00000040,
 };
 
-u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
 
 u32 RTL8821AE_MAC_REG_ARRAY[] = {
 		0x428, 0x0000000A,
@@ -2143,7 +2139,7 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
 		0x718, 0x00000040,
 };
 
-u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8821AE_MAC_REG_ARRAY);
 
 u32 RTL8812AE_AGC_TAB_ARRAY[] = {
 	0x80000001, 0x00000000, 0x40000000, 0x00000000,
@@ -2479,8 +2475,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
 		0xE50, 0x00000020,
 };
 
-u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
-	sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY);
 
 u32 RTL8821AE_AGC_TAB_ARRAY[] = {
 		0x81C, 0xBF000001,
@@ -2676,8 +2671,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
 		0xC50, 0x00000020,
 };
 
-u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
-	sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY);
 
 /******************************************************************************
 *                           TXPWR_LMT.TXT
@@ -3250,7 +3244,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
 	"MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
-u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT);
 
 u8 *RTL8821AE_TXPWR_LMT[] = {
 	"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -3819,4 +3813,4 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
 	"MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
-u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8821AE_TXPWR_LMT);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 39b033b3b53a..ce3103bb8ebb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -962,7 +962,6 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
 	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
 	if (ieee80211_is_auth(fc)) {
 		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
-		rtl_ips_nic_on(hw);
 	}
 
 	if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 92d4859ec906..a7aacbc3984e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -873,6 +873,24 @@ enum ratr_table_mode {
 	RATR_INX_WIRELESS_AC_24N = 9,
 };
 
+enum ratr_table_mode_new {
+	RATEID_IDX_BGN_40M_2SS = 0,
+	RATEID_IDX_BGN_40M_1SS = 1,
+	RATEID_IDX_BGN_20M_2SS_BN = 2,
+	RATEID_IDX_BGN_20M_1SS_BN = 3,
+	RATEID_IDX_GN_N2SS = 4,
+	RATEID_IDX_GN_N1SS = 5,
+	RATEID_IDX_BG = 6,
+	RATEID_IDX_G = 7,
+	RATEID_IDX_B = 8,
+	RATEID_IDX_VHT_2SS = 9,
+	RATEID_IDX_VHT_1SS = 10,
+	RATEID_IDX_MIX1 = 11,
+	RATEID_IDX_MIX2 = 12,
+	RATEID_IDX_VHT_3SS = 13,
+	RATEID_IDX_BGN_3SS = 14,
+};
+
 enum rtl_link_state {
 	MAC80211_NOLINK = 0,
 	MAC80211_LINKING = 1,
@@ -931,6 +949,10 @@ enum package_type {
 	PACKAGE_TFBGA79
 };
 
+enum rtl_spec_ver {
+	RTL_SPEC_NEW_RATEID = BIT(0),	/* use ratr_table_mode_new */
+};
+
 struct octet_string {
 	u8 *octet;
 	u16 length;
@@ -2093,14 +2115,21 @@ struct rtl_wow_pattern {
 	u32 mask[4];
 };
 
+/* struct to store contents of interrupt vectors */
+struct rtl_int {
+	u32 inta;
+	u32 intb;
+	u32 intc;
+	u32 intd;
+};
+
 struct rtl_hal_ops {
 	int (*init_sw_vars) (struct ieee80211_hw *hw);
 	void (*deinit_sw_vars) (struct ieee80211_hw *hw);
 	void (*read_chip_version)(struct ieee80211_hw *hw);
 	void (*read_eeprom_info) (struct ieee80211_hw *hw);
 	void (*interrupt_recognized) (struct ieee80211_hw *hw,
-				      u32 *p_inta, u32 *p_intb,
-				      u32 *p_intc, u32 *p_intd);
+				      struct rtl_int *intvec);
 	int (*hw_init) (struct ieee80211_hw *hw);
 	void (*hw_disable) (struct ieee80211_hw *hw);
 	void (*hw_suspend) (struct ieee80211_hw *hw);
@@ -2308,6 +2337,7 @@ struct rtl_hal_cfg {
 	struct rtl_hal_ops *ops;
 	struct rtl_mod_params *mod_params;
 	struct rtl_hal_usbint_cfg *usb_interface_cfg;
+	enum rtl_spec_ver spec_ver;
 
 	/*this map used for some registers or vars
 	   defined int HAL but used in MAIN */
@@ -2318,17 +2348,14 @@ struct rtl_hal_cfg {
 struct rtl_locks {
 	/* mutex */
 	struct mutex conf_mutex;
-	struct mutex ps_mutex;
+	struct mutex ips_mutex;	/* mutex for enter/leave IPS */
+	struct mutex lps_mutex;	/* mutex for enter/leave LPS */
 
 	/*spin lock */
-	spinlock_t ips_lock;
 	spinlock_t irq_th_lock;
-	spinlock_t irq_pci_lock;
-	spinlock_t tx_lock;
 	spinlock_t h2c_lock;
 	spinlock_t rf_ps_lock;
 	spinlock_t rf_lock;
-	spinlock_t lps_lock;
 	spinlock_t waitq_lock;
 	spinlock_t entry_list_lock;
 	spinlock_t usb_lock;
@@ -2341,9 +2368,6 @@ struct rtl_locks {
 	/*Dual mac*/
 	spinlock_t cck_and_rw_pagea_lock;
 
-	/*Easy concurrent*/
-	spinlock_t check_sendpkt_lock;
-
 	spinlock_t iqk_lock;
 };
 
@@ -2374,6 +2398,12 @@ struct rtl_works {
 	struct work_struct fill_h2c_cmd;
 };
 
+struct rtl_debug {
+	/* add for debug */
+	struct dentry *debugfs_dir;
+	char debugfs_name[20];
+};
+
 #define MIMO_PS_STATIC			0
 #define MIMO_PS_DYNAMIC			1
 #define MIMO_PS_NOLIMIT			3
@@ -2493,6 +2523,9 @@ struct rtl_btc_info {
 struct bt_coexist_info {
 	struct rtl_btc_ops *btc_ops;
 	struct rtl_btc_info btc_info;
+	/* btc context */
+	void *btc_context;
+	void *wifi_only_context;
 	/* EEPROM BT info. */
 	u8 eeprom_bt_coexist;
 	u8 eeprom_bt_type;
@@ -2549,16 +2582,22 @@ struct bt_coexist_info {
 
 struct rtl_btc_ops {
 	void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+	void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv);
+	void (*btc_deinit_variables)(struct rtl_priv *rtlpriv);
 	void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+	void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
 	void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+	void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv);
 	void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
 	void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
 	void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+	void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv,
+					  u8 scantype);
 	void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
 	void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
 					enum rt_media_status mstatus);
 	void (*btc_periodical) (struct rtl_priv *rtlpriv);
-	void (*btc_halt_notify) (void);
+	void (*btc_halt_notify)(struct rtl_priv *rtlpriv);
 	void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
 				   u8 *tmp_buf, u8 length);
 	void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv,
@@ -2568,6 +2607,12 @@ struct rtl_btc_ops {
 	bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
 	void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
 					  u8 pkt_type);
+	void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type,
+				       bool scanning);
+	void (*btc_switch_band_notify_wifi_only)(struct rtl_priv *rtlpriv,
+						 u8 type, bool scanning);
+	void (*btc_display_bt_coex_info)(struct rtl_priv *rtlpriv,
+					 struct seq_file *m);
 	void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
 	u8   (*btc_get_lps_val)(struct rtl_priv *rtlpriv);
 	u8   (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv);
@@ -2642,6 +2687,7 @@ struct rtl_priv {
 	/* c2hcmd list for kthread level access */
 	struct list_head c2hcmd_list;
 
+	struct rtl_debug dbg;
 	int max_fw_size;
 
 	/*
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 1d799bffaa9f..e7d77acdf18c 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -310,10 +310,8 @@ static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
 	/* asking for the data path parameters */
 	wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
 				GFP_KERNEL);
-	if (!wl->data_path) {
-		wl1251_error("Couldnt allocate data path parameters");
+	if (!wl->data_path)
 		return -ENOMEM;
-	}
 
 	ret = wl1251_acx_data_path_params(wl, wl->data_path);
 	if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 6d02c660b4ab..037defd10b91 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 		WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
 
 		enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
-		wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+		ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
 		if (ret < 0)
 			goto out_sleep;
 	}
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index a4859993db3c..3ca9167d6146 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -146,7 +146,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 	ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG,
 				   feature, sizeof(*feature));
 	if (ret < 0) {
-		wl1271_error("Couldnt set HW encryption");
+		wl1271_error("Couldn't set HW encryption");
 		goto out;
 	}
 
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index f46d7fdf9a00..7011c5d9599f 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1129,10 +1129,8 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl);
 int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			    s8 *avg_rssi);
 
-#ifdef CONFIG_PM
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
 					enum rx_filter_action action);
 int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
 			     struct wl12xx_rx_filter *filter);
-#endif /* CONFIG_PM */
 #endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d47921a84509..09714034dbf1 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -42,6 +42,7 @@
 #include "sysfs.h"
 
 #define WL1271_BOOT_RETRIES 3
+#define WL1271_SUSPEND_SLEEP 100
 
 static char *fwlog_param;
 static int fwlog_mem_blocks = -1;
@@ -388,7 +389,6 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 {
 	struct wl12xx_vif *wlvif;
-	struct timespec ts;
 	u32 old_tx_blk_count = wl->tx_blocks_available;
 	int avail, freed_blocks;
 	int i;
@@ -485,8 +485,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 	}
 
 	/* update the host-chipset time offset */
-	getnstimeofday(&ts);
-	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+	wl->time_offset = (ktime_get_boot_ns() >> 10) -
 		(s64)(status->fw_localtime);
 
 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
@@ -979,6 +978,24 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
 }
 
+static int wlcore_fw_sleep(struct wl1271 *wl)
+{
+	int ret;
+
+	mutex_lock(&wl->mutex);
+	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+	if (ret < 0) {
+		wl12xx_queue_recovery_work(wl);
+		goto out;
+	}
+	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+out:
+	mutex_unlock(&wl->mutex);
+	mdelay(WL1271_SUSPEND_SLEEP);
+
+	return 0;
+}
+
 static int wl1271_setup(struct wl1271 *wl)
 {
 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1326,7 +1343,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
 }
 
 
-#ifdef CONFIG_PM
 static int
 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
 {
@@ -1698,8 +1714,8 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 	}
 }
 
-static int wl1271_op_suspend(struct ieee80211_hw *hw,
-			    struct cfg80211_wowlan *wow)
+static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
+					    struct cfg80211_wowlan *wow)
 {
 	struct wl1271 *wl = hw->priv;
 	struct wl12xx_vif *wlvif;
@@ -1749,7 +1765,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 		goto out_sleep;
 
 out_sleep:
-	wl1271_ps_elp_sleep(wl);
 	mutex_unlock(&wl->mutex);
 
 	if (ret < 0) {
@@ -1782,10 +1797,19 @@ out_sleep:
 	 */
 	cancel_delayed_work(&wl->tx_watchdog_work);
 
+	/*
+	 * Use an immediate call for allowing the firmware to go into power
+	 * save during suspend.
+	 * Using a workque for this last write was only hapenning on resume
+	 * leaving the firmware with power save disabled during suspend,
+	 * while consuming full power during wowlan suspend.
+	 */
+	wlcore_fw_sleep(wl);
+
 	return 0;
 }
 
-static int wl1271_op_resume(struct ieee80211_hw *hw)
+static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
 {
 	struct wl1271 *wl = hw->priv;
 	struct wl12xx_vif *wlvif;
@@ -1869,7 +1893,6 @@ out:
 
 	return 0;
 }
-#endif
 
 static int wl1271_op_start(struct ieee80211_hw *hw)
 {
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index a3f5e9ca492a..00e9b4624dcf 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -264,7 +264,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			       struct sk_buff *skb, u32 extra,
 			       struct ieee80211_tx_info *control, u8 hlid)
 {
-	struct timespec ts;
 	struct wl1271_tx_hw_descr *desc;
 	int ac, rate_idx;
 	s64 hosttime;
@@ -287,8 +286,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 	}
 
 	/* configure packet life time */
-	getnstimeofday(&ts);
-	hosttime = (timespec_to_ns(&ts) >> 10);
+	hosttime = (ktime_get_boot_ns() >> 10);
 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
 
 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index a327be1d264b..8c0c92712fc9 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -77,6 +77,11 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
 	if (of_property_read_bool(child, "broken-turn-around"))
 		mdio->phy_ignore_ta_mask |= 1 << addr;
 
+	of_property_read_u32(child, "reset-assert-us",
+			     &phy->mdio.reset_assert_delay);
+	of_property_read_u32(child, "reset-deassert-us",
+			     &phy->mdio.reset_deassert_delay);
+
 	/* Associate the OF node with the device structure so it
 	 * can be looked up later */
 	of_node_get(child);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4a7c6864fdf4..764ca7b8840d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1458,6 +1458,7 @@ struct pci_devres {
 	unsigned int pinned:1;
 	unsigned int orig_intx:1;
 	unsigned int restore_intx:1;
+	unsigned int mwi:1;
 	u32 region_mask;
 };
 
@@ -1476,6 +1477,9 @@ static void pcim_release(struct device *gendev, void *res)
 		if (this->region_mask & (1 << i))
 			pci_release_region(dev, i);
 
+	if (this->mwi)
+		pci_clear_mwi(dev);
+
 	if (this->restore_intx)
 		pci_intx(dev, this->orig_intx);
 
@@ -3761,6 +3765,27 @@ int pci_set_mwi(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_set_mwi);
 
 /**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+	struct pci_devres *dr;
+
+	dr = find_pci_dr(dev);
+	if (!dr)
+		return -ENOMEM;
+
+	dr->mwi = 1;
+	return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+/**
  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
  * @dev: the PCI device for which MWI is enabled
  *
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a782a207ad31..c7e484f70654 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -91,9 +91,6 @@ config QETH_L3
 	  To compile as a module choose M. The module name is qeth_l3.
 	  If unsure, choose Y.
 
-config QETH_IPV6
-	def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-
 config CCWGROUP
 	tristate
 	default (LCS || CTCM || QETH)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 92ae84a927fc..0ee8f33efb54 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -756,18 +756,14 @@ lcs_get_lancmd(struct lcs_card *card, int count)
 static void
 lcs_get_reply(struct lcs_reply *reply)
 {
-	WARN_ON(atomic_read(&reply->refcnt) <= 0);
-	atomic_inc(&reply->refcnt);
+	refcount_inc(&reply->refcnt);
 }
 
 static void
 lcs_put_reply(struct lcs_reply *reply)
 {
-        WARN_ON(atomic_read(&reply->refcnt) <= 0);
-        if (atomic_dec_and_test(&reply->refcnt)) {
+	if (refcount_dec_and_test(&reply->refcnt))
 		kfree(reply);
-	}
-
 }
 
 static struct lcs_reply *
@@ -780,7 +776,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
 	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
 	if (!reply)
 		return NULL;
-	atomic_set(&reply->refcnt,1);
+	refcount_set(&reply->refcnt, 1);
 	reply->sequence_no = cmd->sequence_no;
 	reply->received = 0;
 	reply->rc = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index fbc8b90b1f85..bd52caa3b11b 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -5,6 +5,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
+#include <linux/refcount.h>
 #include <asm/ccwdev.h>
 
 #define LCS_DBF_TEXT(level, name, text) \
@@ -271,7 +272,7 @@ struct lcs_buffer {
 struct lcs_reply {
 	struct list_head list;
 	__u16 sequence_no;
-	atomic_t refcnt;
+	refcount_t refcnt;
 	/* Callback for completion notification. */
 	void (*callback)(struct lcs_card *, struct lcs_cmd *);
 	wait_queue_head_t wait_q;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index badf42acbf95..db42107bf2f5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/hashtable.h>
 #include <linux/ip.h>
+#include <linux/refcount.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -296,8 +297,23 @@ struct qeth_hdr_layer3 {
 	__u8  ext_flags;
 	__u16 vlan_id;
 	__u16 frame_offset;
-	__u8  dest_addr[16];
-} __attribute__ ((packed));
+	union {
+		/* TX: */
+		u8 ipv6_addr[16];
+		struct ipv4 {
+			u8 res[12];
+			u32 addr;
+		} ipv4;
+		/* RX: */
+		struct rx {
+			u8 res1[2];
+			u8 src_mac[6];
+			u8 res2[4];
+			u16 vlan_id;
+			u8 res3[2];
+		} rx;
+	} next_hop;
+};
 
 struct qeth_hdr_layer2 {
 	__u8 id;
@@ -504,12 +520,6 @@ struct qeth_qdio_info {
 	int default_out_queue;
 };
 
-#define QETH_ETH_MAC_V4      0x0100 /* like v4 */
-#define QETH_ETH_MAC_V6      0x3333 /* like v6 */
-/* tr mc mac is longer, but that will be enough to detect mc frames */
-#define QETH_TR_MAC_NC       0xc000 /* non-canonical */
-#define QETH_TR_MAC_C        0x0300 /* canonical */
-
 /**
  * buffer stuff for read channel
  */
@@ -632,7 +642,7 @@ struct qeth_reply {
 	int rc;
 	void *param;
 	struct qeth_card *card;
-	atomic_t refcnt;
+	refcount_t refcnt;
 };
 
 struct qeth_card_blkt {
@@ -846,14 +856,16 @@ static inline int qeth_get_micros(void)
 
 static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
-	__be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+	__be16 prot = veth->h_vlan_proto;
+
+	if (prot == htons(ETH_P_8021Q))
+		prot = veth->h_vlan_encapsulated_proto;
 
-	if (be16_to_cpu(*p) == ETH_P_8021Q)
-		p += 2;
-	switch (be16_to_cpu(*p)) {
-	case ETH_P_IPV6:
+	switch (prot) {
+	case htons(ETH_P_IPV6):
 		return 6;
-	case ETH_P_IP:
+	case htons(ETH_P_IP):
 		return 4;
 	default:
 		return 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3614df68830f..6abd3bc285e4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -36,6 +36,7 @@
 #include <asm/diag.h>
 #include <asm/cio.h>
 #include <asm/ccwdev.h>
+#include <asm/cpcmd.h>
 
 #include "qeth_core.h"
 
@@ -564,7 +565,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
 	reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
 	if (reply) {
-		atomic_set(&reply->refcnt, 1);
+		refcount_set(&reply->refcnt, 1);
 		atomic_set(&reply->received, 0);
 		reply->card = card;
 	}
@@ -573,14 +574,12 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
 static void qeth_get_reply(struct qeth_reply *reply)
 {
-	WARN_ON(atomic_read(&reply->refcnt) <= 0);
-	atomic_inc(&reply->refcnt);
+	refcount_inc(&reply->refcnt);
 }
 
 static void qeth_put_reply(struct qeth_reply *reply)
 {
-	WARN_ON(atomic_read(&reply->refcnt) <= 0);
-	if (atomic_dec_and_test(&reply->refcnt))
+	if (refcount_dec_and_test(&reply->refcnt))
 		kfree(reply);
 }
 
@@ -1717,23 +1716,87 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
 			       (prcd[0x11] == _ascebc['M']));
 }
 
+static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
+{
+	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+	struct diag26c_vnic_resp *response = NULL;
+	struct diag26c_vnic_req *request = NULL;
+	struct ccw_dev_id id;
+	char userid[80];
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+
+	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
+	if (rc)
+		goto out;
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+	if (!request || !response) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ccw_device_get_id(CARD_RDEV(card), &id);
+	request->resp_buf_len = sizeof(*response);
+	request->resp_version = DIAG26C_VERSION6_VM65918;
+	request->req_format = DIAG26C_VNIC_INFO;
+	ASCEBC(userid, 8);
+	memcpy(&request->sys_name, userid, 8);
+	request->devno = id.devno;
+
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	if (rc)
+		goto out;
+	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+	if (request->resp_buf_len < sizeof(*response) ||
+	    response->version != request->resp_version) {
+		rc = -EIO;
+		goto out;
+	}
+
+	if (response->protocol == VNIC_INFO_PROT_L2)
+		disc = QETH_DISCIPLINE_LAYER2;
+	else if (response->protocol == VNIC_INFO_PROT_L3)
+		disc = QETH_DISCIPLINE_LAYER3;
+
+out:
+	kfree(response);
+	kfree(request);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+	return disc;
+}
+
 /* Determine whether the device requires a specific layer discipline */
 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
 {
+	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+
 	if (card->info.type == QETH_CARD_TYPE_OSM ||
-	    card->info.type == QETH_CARD_TYPE_OSN) {
+	    card->info.type == QETH_CARD_TYPE_OSN)
+		disc = QETH_DISCIPLINE_LAYER2;
+	else if (card->info.guestlan)
+		disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
+				QETH_DISCIPLINE_LAYER3 :
+				qeth_vm_detect_layer(card);
+
+	switch (disc) {
+	case QETH_DISCIPLINE_LAYER2:
 		QETH_DBF_TEXT(SETUP, 3, "force l2");
-		return QETH_DISCIPLINE_LAYER2;
-	}
-
-	/* virtual HiperSocket is L3 only: */
-	if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) {
+		break;
+	case QETH_DISCIPLINE_LAYER3:
 		QETH_DBF_TEXT(SETUP, 3, "force l3");
-		return QETH_DISCIPLINE_LAYER3;
+		break;
+	default:
+		QETH_DBF_TEXT(SETUP, 3, "force no");
 	}
 
-	QETH_DBF_TEXT(SETUP, 3, "force no");
-	return QETH_DISCIPLINE_UNDETERMINED;
+	return disc;
 }
 
 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
@@ -4218,9 +4281,8 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (!card->options.layer2 ||
 	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
-		memcpy(card->dev->dev_addr,
-		       &cmd->data.setadapterparms.data.change_addr.addr,
-		       OSA_ADDR_LEN);
+		ether_addr_copy(card->dev->dev_addr,
+				cmd->data.setadapterparms.data.change_addr.addr);
 		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
 	}
 	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
@@ -4242,9 +4304,9 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
 	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
-	cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
-	memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
-	       card->dev->dev_addr, OSA_ADDR_LEN);
+	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+			card->dev->dev_addr);
 	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
 			       NULL);
 	return rc;
@@ -4789,9 +4851,12 @@ int qeth_vm_request_mac(struct qeth_card *card)
 	request->op_code = DIAG26C_GET_MAC;
 	request->devno = id.devno;
 
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
 	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
 	if (rc)
 		goto out;
+	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
 
 	if (request->resp_buf_len < sizeof(*response) ||
 	    response->version != request->resp_version) {
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index ff6877f7b6f8..619f897b4bb0 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -10,6 +10,7 @@
 #define __QETH_CORE_MPC_H__
 
 #include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
 
 #define IPA_PDU_HEADER_SIZE	0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -25,7 +26,6 @@ extern unsigned char IPA_PDU_HEADER[];
 #define QETH_SEQ_NO_LENGTH	4
 #define QETH_MPC_TOKEN_LENGTH	4
 #define QETH_MCL_LENGTH		4
-#define OSA_ADDR_LEN		6
 
 #define QETH_TIMEOUT		(10 * HZ)
 #define QETH_IPA_TIMEOUT	(45 * HZ)
@@ -416,12 +416,11 @@ struct qeth_query_cmds_supp {
 } __attribute__ ((packed));
 
 struct qeth_change_addr {
-	__u32 cmd;
-	__u32 addr_size;
-	__u32 no_macs;
-	__u8 addr[OSA_ADDR_LEN];
-} __attribute__ ((packed));
-
+	u32 cmd;
+	u32 addr_size;
+	u32 no_macs;
+	u8 addr[ETH_ALEN];
+};
 
 struct qeth_snmp_cmd {
 	__u8  token[16];
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 09b1c4ef3dc9..f2130051ca11 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -22,8 +22,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
 
 struct qeth_mac {
-	u8 mac_addr[OSA_ADDR_LEN];
-	u8 is_uc:1;
+	u8 mac_addr[ETH_ALEN];
 	u8 disp_flag:2;
 	struct hlist_node hnode;
 };
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5863ea170ff2..7f236440483f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -109,8 +109,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
 	if (!iob)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-	cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
-	memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+	cmd->data.setdelmac.mac_length = ETH_ALEN;
+	ether_addr_copy(cmd->data.setdelmac.mac, mac);
 	return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
 					   NULL, NULL));
 }
@@ -123,7 +123,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
 	if (rc == 0) {
 		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-		memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+		ether_addr_copy(card->dev->dev_addr, mac);
 		dev_info(&card->gdev->dev,
 			"MAC address %pM successfully registered on device %s\n",
 			card->dev->dev_addr, card->dev->name);
@@ -156,54 +156,37 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
 	return rc;
 }
 
-static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
+	enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+					IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
 	int rc;
 
-	QETH_CARD_TEXT(card, 2, "L2Sgmac");
-	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
+	QETH_CARD_TEXT(card, 2, "L2Wmac");
+	rc = qeth_l2_send_setdelmac(card, mac, cmd);
 	if (rc == -EEXIST)
-		QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
-			mac, QETH_CARD_IFNAME(card));
+		QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+				 mac, QETH_CARD_IFNAME(card));
 	else if (rc)
-		QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
-			mac, QETH_CARD_IFNAME(card), rc);
+		QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+				 mac, QETH_CARD_IFNAME(card), rc);
 	return rc;
 }
 
-static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
+	enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+					IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
 	int rc;
 
-	QETH_CARD_TEXT(card, 2, "L2Dgmac");
-	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
+	QETH_CARD_TEXT(card, 2, "L2Rmac");
+	rc = qeth_l2_send_setdelmac(card, mac, cmd);
 	if (rc)
-		QETH_DBF_MESSAGE(2,
-			"Could not delete group MAC %pM on %s: %d\n",
-			mac, QETH_CARD_IFNAME(card), rc);
+		QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+				 mac, QETH_CARD_IFNAME(card), rc);
 	return rc;
 }
 
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-	if (mac->is_uc) {
-		return qeth_l2_send_setdelmac(card, mac->mac_addr,
-						IPA_CMD_SETVMAC);
-	} else {
-		return qeth_l2_send_setgroupmac(card, mac->mac_addr);
-	}
-}
-
-static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-	if (mac->is_uc) {
-		return qeth_l2_send_setdelmac(card, mac->mac_addr,
-						IPA_CMD_DELVMAC);
-	} else {
-		return qeth_l2_send_delgroupmac(card, mac->mac_addr);
-	}
-}
-
 static void qeth_l2_del_all_macs(struct qeth_card *card)
 {
 	struct qeth_mac *mac;
@@ -549,7 +532,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
 		QETH_CARD_TEXT(card, 3, "setmcTYP");
 		return -EOPNOTSUPP;
 	}
-	QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+	QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
 		QETH_CARD_TEXT(card, 3, "setmcREC");
 		return -ERESTARTSYS;
@@ -597,27 +580,23 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
  * only if there is not in the hash table storage already
  *
 */
-static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
-			    u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
 {
 	u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
 	struct qeth_mac *mac;
 
 	hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
-		if (is_uc == mac->is_uc &&
-		    !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+		if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
 			mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
 			return;
 		}
 	}
 
 	mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
-
 	if (!mac)
 		return;
 
-	memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
-	mac->is_uc = is_uc;
+	ether_addr_copy(mac->mac_addr, ha->addr);
 	mac->disp_flag = QETH_DISP_ADDR_ADD;
 
 	hash_add(card->mac_htable, &mac->hnode, mac_hash);
@@ -643,26 +622,29 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
 	spin_lock_bh(&card->mclock);
 
 	netdev_for_each_mc_addr(ha, dev)
-		qeth_l2_add_mac(card, ha, 0);
-
+		qeth_l2_add_mac(card, ha);
 	netdev_for_each_uc_addr(ha, dev)
-		qeth_l2_add_mac(card, ha, 1);
+		qeth_l2_add_mac(card, ha);
 
 	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
-		if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
-			qeth_l2_remove_mac(card, mac);
+		switch (mac->disp_flag) {
+		case QETH_DISP_ADDR_DELETE:
+			qeth_l2_remove_mac(card, mac->mac_addr);
 			hash_del(&mac->hnode);
 			kfree(mac);
-
-		} else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
-			rc = qeth_l2_write_mac(card, mac);
+			break;
+		case QETH_DISP_ADDR_ADD:
+			rc = qeth_l2_write_mac(card, mac->mac_addr);
 			if (rc) {
 				hash_del(&mac->hnode);
 				kfree(mac);
-			} else
-				mac->disp_flag = QETH_DISP_ADDR_DELETE;
-		} else
+				break;
+			}
+			/* fall through */
+		default:
+			/* for next call to set_rx_mode(): */
 			mac->disp_flag = QETH_DISP_ADDR_DELETE;
+		}
 	}
 
 	spin_unlock_bh(&card->mclock);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e5833837b799..bdd45f4dcace 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -29,7 +29,7 @@ struct qeth_ipaddr {
 	 */
 	int  ref_counter;
 	enum qeth_prot_versions proto;
-	unsigned char mac[OSA_ADDR_LEN];
+	unsigned char mac[ETH_ALEN];
 	union {
 		struct {
 			unsigned int addr;
@@ -69,19 +69,20 @@ struct qeth_ipato_entry {
 extern const struct attribute_group *qeth_l3_attr_groups[];
 
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
-int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
 int qeth_l3_create_device_attributes(struct device *);
 void qeth_l3_remove_device_attributes(struct device *);
 int qeth_l3_setrouting_v4(struct qeth_card *);
 int qeth_l3_setrouting_v6(struct qeth_card *);
 int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
-void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
-			u8 *, int);
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+			    enum qeth_prot_versions proto, u8 *addr,
+			    int mask_bits);
 int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr);
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
-			const u8 *);
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr);
 void qeth_l3_update_ipato(struct qeth_card *card);
 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ef0961e18686..b0c888e86cd4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -18,15 +18,20 @@
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/skbuff.h>
 
 #include <net/ip.h>
 #include <net/arp.h>
 #include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_checksum.h>
 #include <net/iucv/af_iucv.h>
@@ -37,99 +42,22 @@
 
 static int qeth_l3_set_offline(struct ccwgroup_device *);
 static int qeth_l3_stop(struct net_device *);
-static void qeth_l3_set_multicast_list(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
 static int qeth_l3_register_addr_entry(struct qeth_card *,
 		struct qeth_ipaddr *);
 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
 		struct qeth_ipaddr *);
 
-static int qeth_l3_isxdigit(char *buf)
-{
-	while (*buf) {
-		if (!isxdigit(*buf++))
-			return 0;
-	}
-	return 1;
-}
-
 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
 {
 	sprintf(buf, "%pI4", addr);
 }
 
-static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
-{
-	int count = 0, rc = 0;
-	unsigned int in[4];
-	char c;
-
-	rc = sscanf(buf, "%u.%u.%u.%u%c",
-		    &in[0], &in[1], &in[2], &in[3], &c);
-	if (rc != 4 && (rc != 5 || c != '\n'))
-		return -EINVAL;
-	for (count = 0; count < 4; count++) {
-		if (in[count] > 255)
-			return -EINVAL;
-		addr[count] = in[count];
-	}
-	return 0;
-}
-
 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
 {
 	sprintf(buf, "%pI6", addr);
 }
 
-static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
-{
-	const char *end, *end_tmp, *start;
-	__u16 *in;
-	char num[5];
-	int num2, cnt, out, found, save_cnt;
-	unsigned short in_tmp[8] = {0, };
-
-	cnt = out = found = save_cnt = num2 = 0;
-	end = start = buf;
-	in = (__u16 *) addr;
-	memset(in, 0, 16);
-	while (*end) {
-		end = strchr(start, ':');
-		if (end == NULL) {
-			end = buf + strlen(buf);
-			end_tmp = strchr(start, '\n');
-			if (end_tmp != NULL)
-				end = end_tmp;
-			out = 1;
-		}
-		if ((end - start)) {
-			memset(num, 0, 5);
-			if ((end - start) > 4)
-				return -EINVAL;
-			memcpy(num, start, end - start);
-			if (!qeth_l3_isxdigit(num))
-				return -EINVAL;
-			sscanf(start, "%x", &num2);
-			if (found)
-				in_tmp[save_cnt++] = num2;
-			else
-				in[cnt++] = num2;
-			if (out)
-				break;
-		} else {
-			if (found)
-				return -EINVAL;
-			found = 1;
-		}
-		start = ++end;
-	}
-	if (cnt + save_cnt > 8)
-		return -EINVAL;
-	cnt = 7;
-	while (save_cnt)
-		in[cnt--] = in_tmp[--save_cnt];
-	return 0;
-}
-
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
 				char *buf)
 {
@@ -139,17 +67,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
 		qeth_l3_ipaddr6_to_string(addr, buf);
 }
 
-int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
-				__u8 *addr)
-{
-	if (proto == QETH_PROT_IPV4)
-		return qeth_l3_string_to_ipaddr4(buf, addr);
-	else if (proto == QETH_PROT_IPV6)
-		return qeth_l3_string_to_ipaddr6(buf, addr);
-	else
-		return -EINVAL;
-}
-
 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
 {
 	int i, j;
@@ -207,8 +124,8 @@ inline int
 qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
 {
 	return addr1->proto == addr2->proto &&
-		!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u))  &&
-		!memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+	       !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+	       ether_addr_equal_64bits(addr1->mac, addr2->mac);
 }
 
 static struct qeth_ipaddr *
@@ -446,7 +363,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
 	if (!iob)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-	memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+	ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
 	if (addr->proto == QETH_PROT_IPV6)
 		memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
 		       sizeof(struct in6_addr));
@@ -582,7 +499,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 3, "setrtg6");
-#ifdef CONFIG_QETH_IPV6
 
 	if (!qeth_is_supported(card, IPA_IPV6))
 		return 0;
@@ -599,7 +515,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 			" on %s. Type set to 'no router'.\n", rc,
 			QETH_CARD_IFNAME(card));
 	}
-#endif
 	return rc;
 }
 
@@ -673,10 +588,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
 	return rc;
 }
 
-void qeth_l3_del_ipato_entry(struct qeth_card *card,
-		enum qeth_prot_versions proto, u8 *addr, int mask_bits)
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+			    enum qeth_prot_versions proto, u8 *addr,
+			    int mask_bits)
 {
 	struct qeth_ipato_entry *ipatoe, *tmp;
+	int rc = -ENOENT;
 
 	QETH_CARD_TEXT(card, 2, "delipato");
 
@@ -691,10 +608,12 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
 			list_del(&ipatoe->entry);
 			qeth_l3_update_ipato(card);
 			kfree(ipatoe);
+			rc = 0;
 		}
 	}
 
 	spin_unlock_bh(&card->ip_lock);
+	return rc;
 }
 
 /*
@@ -704,7 +623,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 	      const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
-	int rc = 0;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -728,7 +647,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 	if (qeth_l3_ip_from_hash(card, ipaddr))
 		rc = -EEXIST;
 	else
-		qeth_l3_add_ip(card, ipaddr);
+		rc = qeth_l3_add_ip(card, ipaddr);
 
 	spin_unlock_bh(&card->ip_lock);
 
@@ -737,10 +656,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 	return rc;
 }
 
-void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
-	      const u8 *addr)
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -755,13 +675,14 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 		}
 		ipaddr->type = QETH_IP_TYPE_VIPA;
 	} else
-		return;
+		return -ENOMEM;
 
 	spin_lock_bh(&card->ip_lock);
-	qeth_l3_delete_ip(card, ipaddr);
+	rc = qeth_l3_delete_ip(card, ipaddr);
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(ipaddr);
+	return rc;
 }
 
 /*
@@ -771,7 +692,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 	      const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
-	int rc = 0;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -796,7 +717,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 	if (qeth_l3_ip_from_hash(card, ipaddr))
 		rc = -EEXIST;
 	else
-		qeth_l3_add_ip(card, ipaddr);
+		rc = qeth_l3_add_ip(card, ipaddr);
 
 	spin_unlock_bh(&card->ip_lock);
 
@@ -805,10 +726,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 	return rc;
 }
 
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
-			const u8 *addr)
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -823,13 +745,14 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 		}
 		ipaddr->type = QETH_IP_TYPE_RXIP;
 	} else
-		return;
+		return -ENOMEM;
 
 	spin_lock_bh(&card->ip_lock);
-	qeth_l3_delete_ip(card, ipaddr);
+	rc = qeth_l3_delete_ip(card, ipaddr);
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(ipaddr);
+	return rc;
 }
 
 static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -896,27 +819,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
 	return rc;
 }
 
-static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
-{
-	if (cast_type == RTN_MULTICAST)
-		return QETH_CAST_MULTICAST;
-	if (cast_type == RTN_BROADCAST)
-		return QETH_CAST_BROADCAST;
-	return QETH_CAST_UNICAST;
-}
-
-static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
-{
-	u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
-	if (cast_type == RTN_MULTICAST)
-		return ct | QETH_CAST_MULTICAST;
-	if (cast_type == RTN_ANYCAST)
-		return ct | QETH_CAST_ANYCAST;
-	if (cast_type == RTN_BROADCAST)
-		return ct | QETH_CAST_BROADCAST;
-	return ct | QETH_CAST_UNICAST;
-}
-
 static int qeth_l3_setadapter_parms(struct qeth_card *card)
 {
 	int rc = 0;
@@ -933,7 +835,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
 	return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
 		enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
 {
@@ -949,7 +850,6 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
 				   qeth_setassparms_cb, NULL);
 	return rc;
 }
-#endif
 
 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
 {
@@ -1045,7 +945,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
 	return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
 {
 	int rc;
@@ -1091,12 +990,9 @@ out:
 	dev_info(&card->gdev->dev, "IPV6 enabled\n");
 	return 0;
 }
-#endif
 
 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
 {
-	int rc = 0;
-
 	QETH_CARD_TEXT(card, 3, "strtipv6");
 
 	if (!qeth_is_supported(card, IPA_IPV6)) {
@@ -1104,10 +1000,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
 			"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
 		return 0;
 	}
-#ifdef CONFIG_QETH_IPV6
-	rc = qeth_l3_softsetup_ipv6(card);
-#endif
-	return rc ;
+	return qeth_l3_softsetup_ipv6(card);
 }
 
 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
@@ -1179,8 +1072,8 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code == 0)
-		memcpy(card->dev->dev_addr,
-			cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+		ether_addr_copy(card->dev->dev_addr,
+				cmd->data.create_destroy_addr.unique_id);
 	else
 		eth_random_addr(card->dev->dev_addr);
 
@@ -1328,81 +1221,22 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
 	return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
 }
 
-static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
-{
-	ip_eth_mc_map(ipm, mac);
-}
-
-static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
-{
-	struct qeth_ipaddr *addr;
-	int i;
-
-	hash_for_each(card->ip_mc_htable, i, addr, hnode)
-		addr->disp_flag = QETH_DISP_ADDR_DELETE;
-
-}
-
-static void qeth_l3_add_all_new_mc(struct qeth_card *card)
-{
-	struct qeth_ipaddr *addr;
-	struct hlist_node *tmp;
-	int i;
-	int rc;
-
-	hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
-			rc = qeth_l3_register_addr_entry(card, addr);
-			if (!rc || (rc == IPA_RC_LAN_OFFLINE))
-				addr->ref_counter = 1;
-			else {
-				hash_del(&addr->hnode);
-				kfree(addr);
-			}
-		}
-	}
-
-}
-
-static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
-{
-	struct qeth_ipaddr *addr;
-	struct hlist_node *tmp;
-	int i;
-	int rc;
-
-	hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-		if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
-			rc = qeth_l3_deregister_addr_entry(card, addr);
-			if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
-				hash_del(&addr->hnode);
-				kfree(addr);
-			}
-		}
-	}
-
-}
-
-
 static void
 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 {
 	struct ip_mc_list *im4;
 	struct qeth_ipaddr *tmp, *ipm;
-	char buf[MAX_ADDR_LEN];
 
 	QETH_CARD_TEXT(card, 4, "addmc");
 
 	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
-		if (!tmp)
-			return;
+	if (!tmp)
+		return;
 
 	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
 	     im4 = rcu_dereference(im4->next_rcu)) {
-		qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
-
+		ip_eth_mc_map(im4->multiaddr, tmp->mac);
 		tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
-		memcpy(tmp->mac, buf, sizeof(tmp->mac));
 		tmp->is_multicast = 1;
 
 		ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1412,7 +1246,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 			ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 			if (!ipm)
 				continue;
-			memcpy(ipm->mac, buf, sizeof(tmp->mac));
+			ether_addr_copy(ipm->mac, tmp->mac);
 			ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
 			ipm->is_multicast = 1;
 			ipm->disp_flag = QETH_DISP_ADDR_ADD;
@@ -1466,25 +1300,21 @@ unlock:
 	rcu_read_unlock();
 }
 
-#ifdef CONFIG_QETH_IPV6
-static void
-qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+				    struct inet6_dev *in6_dev)
 {
 	struct qeth_ipaddr *ipm;
 	struct ifmcaddr6 *im6;
 	struct qeth_ipaddr *tmp;
-	char buf[MAX_ADDR_LEN];
 
 	QETH_CARD_TEXT(card, 4, "addmc6");
 
 	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
-		if (!tmp)
-			return;
+	if (!tmp)
+		return;
 
 	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
-		ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
-
-		memcpy(tmp->mac, buf, sizeof(tmp->mac));
+		ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
 		memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
 		       sizeof(struct in6_addr));
 		tmp->is_multicast = 1;
@@ -1499,7 +1329,7 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
 		if (!ipm)
 			continue;
 
-		memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+		ether_addr_copy(ipm->mac, tmp->mac);
 		memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
 		       sizeof(struct in6_addr));
 		ipm->is_multicast = 1;
@@ -1560,7 +1390,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
 	rcu_read_unlock();
 	in6_dev_put(in6_dev);
 }
-#endif /* CONFIG_QETH_IPV6 */
 
 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 			unsigned short vid)
@@ -1600,9 +1429,8 @@ out:
 }
 
 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
-			unsigned short vid)
+					 unsigned short vid)
 {
-#ifdef CONFIG_QETH_IPV6
 	struct inet6_dev *in6_dev;
 	struct inet6_ifaddr *ifa;
 	struct qeth_ipaddr *addr;
@@ -1637,7 +1465,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
 	kfree(addr);
 out:
 	in6_dev_put(in6_dev);
-#endif /* CONFIG_QETH_IPV6 */
 }
 
 static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
@@ -1672,44 +1499,31 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
 	/* unregister IP addresses of vlan device */
 	qeth_l3_free_vlan_addresses(card, vid);
 	clear_bit(vid, card->active_vlans);
-	qeth_l3_set_multicast_list(card->dev);
+	qeth_l3_set_rx_mode(dev);
 	return 0;
 }
 
 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 				struct qeth_hdr *hdr)
 {
-	__u16 prot;
-	struct iphdr *ip_hdr;
-	unsigned char tg_addr[MAX_ADDR_LEN];
-
 	if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
-		prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
-			      ETH_P_IP;
+		u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+								 ETH_P_IP;
+		unsigned char tg_addr[ETH_ALEN];
+
+		skb_reset_network_header(skb);
 		switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
 		case QETH_CAST_MULTICAST:
-			switch (prot) {
-#ifdef CONFIG_QETH_IPV6
-			case ETH_P_IPV6:
-				ndisc_mc_map((struct in6_addr *)
-				     skb->data + 24,
-				     tg_addr, card->dev, 0);
-				break;
-#endif
-			case ETH_P_IP:
-				ip_hdr = (struct iphdr *)skb->data;
-				ip_eth_mc_map(ip_hdr->daddr, tg_addr);
-				break;
-			default:
-				memcpy(tg_addr, card->dev->broadcast,
-					card->dev->addr_len);
-			}
+			if (prot == ETH_P_IP)
+				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+			else
+				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
 			card->stats.multicast++;
 			skb->pkt_type = PACKET_MULTICAST;
 			break;
 		case QETH_CAST_BROADCAST:
-			memcpy(tg_addr, card->dev->broadcast,
-				card->dev->addr_len);
+			ether_addr_copy(tg_addr, card->dev->broadcast);
 			card->stats.multicast++;
 			skb->pkt_type = PACKET_BROADCAST;
 			break;
@@ -1721,12 +1535,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 				skb->pkt_type = PACKET_OTHERHOST;
 			else
 				skb->pkt_type = PACKET_HOST;
-			memcpy(tg_addr, card->dev->dev_addr,
-				card->dev->addr_len);
+			ether_addr_copy(tg_addr, card->dev->dev_addr);
 		}
 		if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
 			card->dev->header_ops->create(skb, card->dev, prot,
-				tg_addr, &hdr->hdr.l3.dest_addr[2],
+				tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
 				card->dev->addr_len);
 		else
 			card->dev->header_ops->create(skb, card->dev, prot,
@@ -1741,7 +1554,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 				      QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
 		u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
 				hdr->hdr.l3.vlan_id :
-				*((u16 *)&hdr->hdr.l3.dest_addr[12]);
+				hdr->hdr.l3.next_hop.rx.vlan_id;
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
 	}
 
@@ -1949,26 +1762,46 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
 	}
 }
 
-static void qeth_l3_set_multicast_list(struct net_device *dev)
+static void qeth_l3_set_rx_mode(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
+	struct qeth_ipaddr *addr;
+	struct hlist_node *tmp;
+	int i, rc;
 
 	QETH_CARD_TEXT(card, 3, "setmulti");
 	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
 	    (card->state != CARD_STATE_UP))
 		return;
 	if (!card->options.sniffer) {
-
 		spin_lock_bh(&card->mclock);
 
-		qeth_l3_mark_all_mc_to_be_deleted(card);
-
 		qeth_l3_add_multicast_ipv4(card);
-#ifdef CONFIG_QETH_IPV6
 		qeth_l3_add_multicast_ipv6(card);
-#endif
-		qeth_l3_delete_nonused_mc(card);
-		qeth_l3_add_all_new_mc(card);
+
+		hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+			switch (addr->disp_flag) {
+			case QETH_DISP_ADDR_DELETE:
+				rc = qeth_l3_deregister_addr_entry(card, addr);
+				if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+					hash_del(&addr->hnode);
+					kfree(addr);
+				}
+				break;
+			case QETH_DISP_ADDR_ADD:
+				rc = qeth_l3_register_addr_entry(card, addr);
+				if (rc && rc != IPA_RC_LAN_OFFLINE) {
+					hash_del(&addr->hnode);
+					kfree(addr);
+					break;
+				}
+				addr->ref_counter = 1;
+				/* fall through */
+			default:
+				/* for next call to set_rx_mode(): */
+				addr->disp_flag = QETH_DISP_ADDR_DELETE;
+			}
+		}
 
 		spin_unlock_bh(&card->mclock);
 
@@ -2237,12 +2070,10 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
 			rc = -EFAULT;
 		goto free_and_out;
 	}
-#ifdef CONFIG_QETH_IPV6
 	if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
 		/* fails in case of GuestLAN QDIO mode */
 		qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
 	}
-#endif
 	if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
 		QETH_CARD_TEXT(card, 4, "qactf");
 		rc = -EFAULT;
@@ -2422,9 +2253,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 	return rc;
 }
 
-static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
 {
-	int cast_type = RTN_UNSPEC;
 	struct neighbour *n = NULL;
 	struct dst_entry *dst;
 
@@ -2433,48 +2263,34 @@ static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 	if (dst)
 		n = dst_neigh_lookup_skb(dst, skb);
 	if (n) {
-		cast_type = n->type;
+		int cast_type = n->type;
+
 		rcu_read_unlock();
 		neigh_release(n);
 		if ((cast_type == RTN_BROADCAST) ||
 		    (cast_type == RTN_MULTICAST) ||
 		    (cast_type == RTN_ANYCAST))
 			return cast_type;
-		else
-			return RTN_UNSPEC;
+		return RTN_UNSPEC;
 	}
 	rcu_read_unlock();
 
-	/* try something else */
+	/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
 	if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
-		return (skb_network_header(skb)[24] == 0xff) ?
-				RTN_MULTICAST : 0;
+		return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNSPEC;
 	else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
-		return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
-				RTN_MULTICAST : 0;
-	/* ... */
-	if (!memcmp(skb->data, skb->dev->broadcast, 6))
+		return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNSPEC;
+
+	/* ... and MAC address */
+	if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
 		return RTN_BROADCAST;
-	else {
-		u16 hdr_mac;
-
-		hdr_mac = *((u16 *)skb->data);
-		/* tr multicast? */
-		switch (card->info.link_type) {
-		case QETH_LINK_TYPE_HSTR:
-		case QETH_LINK_TYPE_LANE_TR:
-			if ((hdr_mac == QETH_TR_MAC_NC) ||
-			    (hdr_mac == QETH_TR_MAC_C))
-				return RTN_MULTICAST;
-			break;
-		/* eth or so multicast? */
-		default:
-		if ((hdr_mac == QETH_ETH_MAC_V4) ||
-			    (hdr_mac == QETH_ETH_MAC_V6))
-				return RTN_MULTICAST;
-		}
-	}
-	return cast_type;
+	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+		return RTN_MULTICAST;
+
+	/* default to unicast */
+	return RTN_UNSPEC;
 }
 
 static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
@@ -2494,17 +2310,27 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
 	daddr[0] = 0xfe;
 	daddr[1] = 0x80;
 	memcpy(&daddr[8], iucv_hdr->destUserID, 8);
-	memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+	memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
 }
 
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
-		struct sk_buff *skb, int ipv, int cast_type)
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
 {
-	struct dst_entry *dst;
+	if (cast_type == RTN_MULTICAST)
+		return QETH_CAST_MULTICAST;
+	if (cast_type == RTN_ANYCAST)
+		return QETH_CAST_ANYCAST;
+	if (cast_type == RTN_BROADCAST)
+		return QETH_CAST_BROADCAST;
+	return QETH_CAST_UNICAST;
+}
 
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+				struct sk_buff *skb, int ipv, int cast_type,
+				unsigned int data_len)
+{
 	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
-	hdr->hdr.l3.ext_flags = 0;
+	hdr->hdr.l3.length = data_len;
 
 	/*
 	 * before we're going to overwrite this location with next hop ip.
@@ -2518,44 +2344,40 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 		hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
 	}
 
-	hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+	/* OSA only: */
+	if (!ipv) {
+		hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+					    skb->dev->broadcast))
+			hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+		else
+			hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+				QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+		return;
+	}
 
+	hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
 	rcu_read_lock();
-	dst = skb_dst(skb);
 	if (ipv == 4) {
-		struct rtable *rt = (struct rtable *) dst;
-		__be32 *pkey = &ip_hdr(skb)->daddr;
-
-		if (rt && rt->rt_gateway)
-			pkey = &rt->rt_gateway;
+		struct rtable *rt = skb_rtable(skb);
 
-		/* IPv4 */
-		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
-		memset(hdr->hdr.l3.dest_addr, 0, 12);
-		*((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
-	} else if (ipv == 6) {
-		struct rt6_info *rt = (struct rt6_info *) dst;
-		struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+		*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+				rt_nexthop(rt, ip_hdr(skb)->daddr) :
+				ip_hdr(skb)->daddr;
+	} else {
+		/* IPv6 */
+		const struct rt6_info *rt = skb_rt6_info(skb);
+		const struct in6_addr *next_hop;
 
 		if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
-			pkey = &rt->rt6i_gateway;
+			next_hop = &rt->rt6i_gateway;
+		else
+			next_hop = &ipv6_hdr(skb)->daddr;
+		memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
 
-		/* IPv6 */
-		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
-		if (card->info.type == QETH_CARD_TYPE_IQD)
-			hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
-		memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
-	} else {
-		if (!memcmp(skb->data + sizeof(struct qeth_hdr),
-			    skb->dev->broadcast, 6)) {
-			/* broadcast? */
-			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
-						QETH_HDR_PASSTHRU;
-		} else {
-			hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
-				QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
-				QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
-		}
+		hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+		if (card->info.type != QETH_CARD_TYPE_IQD)
+			hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
 	}
 	rcu_read_unlock();
 }
@@ -2587,7 +2409,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 
 	/*fix header to TSO values ...*/
 	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
-	hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
 	/*set values which are fix for the first approach ...*/
 	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
 	hdr->ext.imb_hdr_no  = 1;
@@ -2655,7 +2476,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 	struct qeth_card *card = dev->ml_priv;
 	struct sk_buff *new_skb = NULL;
 	int ipv = qeth_get_ip_version(skb);
-	int cast_type = qeth_l3_get_cast_type(card, skb);
+	int cast_type = qeth_l3_get_cast_type(skb);
 	struct qeth_qdio_out_q *queue =
 		card->qdio.out_qs[card->qdio.do_prio_queueing
 			|| (cast_type && card->info.is_multicast_different) ?
@@ -2748,21 +2569,23 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 	if (use_tso) {
 		hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
 		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+				    new_skb->len - sizeof(struct qeth_hdr_tso));
 		qeth_tso_fill_header(card, hdr, new_skb);
 		hdr_elements++;
 	} else {
 		if (data_offset < 0) {
 			hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-			qeth_l3_fill_header(card, hdr, new_skb, ipv,
-						cast_type);
+			qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+					    new_skb->len -
+					    sizeof(struct qeth_hdr));
 		} else {
 			if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
 				qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
 			else {
 				qeth_l3_fill_header(card, hdr, new_skb, ipv,
-							cast_type);
-				hdr->hdr.l3.length = new_skb->len - data_offset;
+						    cast_type,
+						    new_skb->len - data_offset);
 			}
 		}
 
@@ -2930,7 +2753,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
+	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
 	.ndo_do_ioctl		= qeth_do_ioctl,
 	.ndo_change_mtu		= qeth_change_mtu,
 	.ndo_fix_features	= qeth_fix_features,
@@ -2947,7 +2770,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
 	.ndo_features_check	= qeth_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
+	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
 	.ndo_do_ioctl		= qeth_do_ioctl,
 	.ndo_change_mtu		= qeth_change_mtu,
 	.ndo_fix_features	= qeth_fix_features,
@@ -3145,7 +2968,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 			__qeth_l3_open(card->dev);
 		else
 			dev_open(card->dev);
-		qeth_l3_set_multicast_list(card->dev);
+		qeth_l3_set_rx_mode(card->dev);
 		qeth_recover_features(card->dev);
 		rtnl_unlock();
 	}
@@ -3371,10 +3194,6 @@ static struct notifier_block qeth_l3_ip_notifier = {
 	NULL,
 };
 
-#ifdef CONFIG_QETH_IPV6
-/**
- * IPv6 event handler
- */
 static int qeth_l3_ip6_event(struct notifier_block *this,
 			     unsigned long event, void *ptr)
 {
@@ -3419,7 +3238,6 @@ static struct notifier_block qeth_l3_ip6_notifier = {
 	qeth_l3_ip6_event,
 	NULL,
 };
-#endif
 
 static int qeth_l3_register_notifiers(void)
 {
@@ -3429,35 +3247,25 @@ static int qeth_l3_register_notifiers(void)
 	rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
 	if (rc)
 		return rc;
-#ifdef CONFIG_QETH_IPV6
 	rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
 	if (rc) {
 		unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
 		return rc;
 	}
-#else
-	pr_warn("There is no IPv6 support for the layer 3 discipline\n");
-#endif
 	return 0;
 }
 
 static void qeth_l3_unregister_notifiers(void)
 {
-
 	QETH_DBF_TEXT(SETUP, 5, "unregnot");
 	WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
-#ifdef CONFIG_QETH_IPV6
 	WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
-#endif /* QETH_IPV6 */
 }
 
 static int __init qeth_l3_init(void)
 {
-	int rc = 0;
-
 	pr_info("register layer 3 discipline\n");
-	rc = qeth_l3_register_notifiers();
-	return rc;
+	return qeth_l3_register_notifiers();
 }
 
 static void __exit qeth_l3_exit(void)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 6ea2b528a64e..a645cfe66ddf 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -10,11 +10,23 @@
 #include <linux/slab.h>
 #include <asm/ebcdic.h>
 #include <linux/hashtable.h>
+#include <linux/inet.h>
 #include "qeth_l3.h"
 
 #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
 struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
 
+static int qeth_l3_string_to_ipaddr(const char *buf,
+				    enum qeth_prot_versions proto, u8 *addr)
+{
+	const char *end;
+
+	if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+	    (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+		return -EINVAL;
+	return 0;
+}
+
 static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
 			struct qeth_routing_info *route, char *buf)
 {
@@ -262,7 +274,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 	struct qeth_card *card = dev_get_drvdata(dev);
 	struct qeth_ipaddr *addr;
 	char *tmp;
-	int i;
+	int rc, i;
 
 	if (!card)
 		return -EINVAL;
@@ -331,11 +343,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 		return -ENOMEM;
 
 	spin_lock_bh(&card->ip_lock);
-	qeth_l3_add_ip(card, addr);
+	rc = qeth_l3_add_ip(card, addr);
 	spin_unlock_bh(&card->ip_lock);
 	kfree(addr);
 
-	return count;
+	return rc ? rc : count;
 }
 
 static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
@@ -573,7 +585,7 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
 	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
 	if (!rc)
-		qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+		rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -693,22 +705,25 @@ static const struct attribute_group qeth_device_ipato_group = {
 	.attrs = qeth_ipato_device_attrs,
 };
 
-static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
-			enum qeth_prot_versions proto)
+static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
+				       enum qeth_prot_versions proto,
+				       enum qeth_ip_types type)
 {
+	struct qeth_card *card = dev_get_drvdata(dev);
 	struct qeth_ipaddr *ipaddr;
 	char addr_str[40];
 	int str_len = 0;
 	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
 	int i;
 
+	if (!card)
+		return -EINVAL;
+
 	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
 	entry_len += 2; /* \n + terminator */
 	spin_lock_bh(&card->ip_lock);
 	hash_for_each(card->ip_htable, i, ipaddr, hnode) {
-		if (ipaddr->proto != proto)
-			continue;
-		if (ipaddr->type != QETH_IP_TYPE_VIPA)
+		if (ipaddr->proto != proto || ipaddr->type != type)
 			continue;
 		/* String must not be longer than PAGE_SIZE. So we check if
 		 * string length gets near PAGE_SIZE. Then we can savely display
@@ -727,14 +742,11 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
 }
 
 static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+				       QETH_IP_TYPE_VIPA);
 }
 
 static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
@@ -784,7 +796,7 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
 	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_vipae(buf, proto, addr);
 	if (!rc)
-		qeth_l3_del_vipa(card, proto, addr);
+		rc = qeth_l3_del_vipa(card, proto, addr);
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -804,14 +816,11 @@ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
 			qeth_l3_dev_vipa_del4_store);
 
 static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+				       QETH_IP_TYPE_VIPA);
 }
 
 static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
@@ -856,48 +865,12 @@ static const struct attribute_group qeth_device_vipa_group = {
 	.attrs = qeth_vipa_device_attrs,
 };
 
-static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
-		       enum qeth_prot_versions proto)
-{
-	struct qeth_ipaddr *ipaddr;
-	char addr_str[40];
-	int str_len = 0;
-	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
-	int i;
-
-	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
-	entry_len += 2; /* \n + terminator */
-	spin_lock_bh(&card->ip_lock);
-	hash_for_each(card->ip_htable, i, ipaddr, hnode) {
-		if (ipaddr->proto != proto)
-			continue;
-		if (ipaddr->type != QETH_IP_TYPE_RXIP)
-			continue;
-		/* String must not be longer than PAGE_SIZE. So we check if
-		 * string length gets near PAGE_SIZE. Then we can savely display
-		 * the next IPv6 address (worst case, compared to IPv4) */
-		if ((PAGE_SIZE - str_len) <= entry_len)
-			break;
-		qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
-			addr_str);
-		str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
-				    addr_str);
-	}
-	spin_unlock_bh(&card->ip_lock);
-	str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
-
-	return str_len;
-}
-
 static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+				       QETH_IP_TYPE_RXIP);
 }
 
 static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
@@ -964,7 +937,7 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
 	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_rxipe(buf, proto, addr);
 	if (!rc)
-		qeth_l3_del_rxip(card, proto, addr);
+		rc = qeth_l3_del_rxip(card, proto, addr);
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -984,14 +957,11 @@ static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
 			qeth_l3_dev_rxip_del4_store);
 
 static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+				       QETH_IP_TYPE_RXIP);
 }
 
 static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 7d91e53562f8..a980ef756a67 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 				u32 task_retry_id,
 				u8 fcp_cmd_payload[32])
 {
-	struct fcoe_task_context *ctx = task_params->context;
+	struct e4_fcoe_task_context *ctx = task_params->context;
+	const u8 val_byte = ctx->ystorm_ag_context.byte0;
+	struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct ystorm_fcoe_task_st_ctx *y_st_ctx;
 	struct tstorm_fcoe_task_st_ctx *t_st_ctx;
-	struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct mstorm_fcoe_task_st_ctx *m_st_ctx;
 	u32 io_size, val;
 	bool slow_sgl;
 
 	memset(ctx, 0, sizeof(*(ctx)));
+	ctx->ystorm_ag_context.byte0 = val_byte;
 	slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
 				    sgl_task_params->small_mid_sge);
 	io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
@@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 	y_st_ctx = &ctx->ystorm_st_context;
 	y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
 	y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
-	y_st_ctx->task_type = task_params->task_type;
+	y_st_ctx->task_type = (u8)task_params->task_type;
 	memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
 	       fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
 
 	/* Tstorm ctx */
 	t_st_ctx = &ctx->tstorm_st_context;
-	t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
-					FCOE_TASK_DEV_TYPE_TAPE :
-					FCOE_TASK_DEV_TYPE_DISK);
+	t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+					    FCOE_TASK_DEV_TYPE_TAPE :
+					    FCOE_TASK_DEV_TYPE_DISK);
 	t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
 	val = cpu_to_le32(task_params->cq_rss_number);
 	t_st_ctx->read_only.glbl_q_num = val;
 	t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
-	t_st_ctx->read_only.task_type = task_params->task_type;
+	t_st_ctx->read_only.task_type = (u8)task_params->task_type;
 	SET_FIELD(t_st_ctx->read_write.flags,
 		  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
 	t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 		SET_FIELD(m_st_ctx->flags,
 			  MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
 			  (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+		m_st_ctx->sgl_params.sgl_num_sges =
+			cpu_to_le16(sgl_task_params->num_sges);
 	} else {
 		/* Tstorm ctx */
 		SET_FIELD(t_st_ctx->read_write.flags,
@@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 				      sgl_task_params);
 	}
 
+	/* Init Sqe */
 	init_common_sqe(task_params, SEND_FCOE_CMD);
+
 	return 0;
 }
 
@@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
 	struct scsi_sgl_task_params *rx_sgl_task_params,
 	u8 fw_to_place_fc_header)
 {
-	struct fcoe_task_context *ctx = task_params->context;
+	struct e4_fcoe_task_context *ctx = task_params->context;
+	const u8 val_byte = ctx->ystorm_ag_context.byte0;
+	struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct ystorm_fcoe_task_st_ctx *y_st_ctx;
 	struct tstorm_fcoe_task_st_ctx *t_st_ctx;
-	struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct mstorm_fcoe_task_st_ctx *m_st_ctx;
 	u32 val;
 
 	memset(ctx, 0, sizeof(*(ctx)));
+	ctx->ystorm_ag_context.byte0 = val_byte;
 
 	/* Init Ystorm */
 	y_st_ctx = &ctx->ystorm_st_context;
@@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
 	SET_FIELD(y_st_ctx->sgl_mode,
 		  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
 	y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
-	y_st_ctx->task_type = task_params->task_type;
+	y_st_ctx->task_type = (u8)task_params->task_type;
 	memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
 	       mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
 
@@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
 	t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
 	val = cpu_to_le32(task_params->cq_rss_number);
 	t_st_ctx->read_only.glbl_q_num = val;
-	t_st_ctx->read_only.task_type = task_params->task_type;
+	t_st_ctx->read_only.task_type = (u8)task_params->task_type;
 	SET_FIELD(t_st_ctx->read_write.flags,
 		  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
 	t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
 }
 
 int init_initiator_sequence_recovery_fcoe_task(
-	struct fcoe_task_params *task_params, u32 off)
+	struct fcoe_task_params *task_params, u32 desired_offset)
 {
 	init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
-	task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+	task_params->sqe->additional_info_union.seq_rec_updated_offset =
+								desired_offset;
 	return 0;
 }
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index f9c50faa748e..b5c236efd465 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -13,7 +13,7 @@
 
 struct fcoe_task_params {
 	/* Output parameter [set/filled by the HSI function] */
-	struct fcoe_task_context *context;
+	struct e4_fcoe_task_context *context;
 
 	/* Output parameter [set/filled by the HSI function] */
 	struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9bf7b227e69a..c105a2e48ac1 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -129,7 +129,7 @@ struct qedf_ioreq {
 	struct delayed_work timeout_work;
 	struct completion tm_done;
 	struct completion abts_done;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	struct fcoe_task_params *task_params;
 	struct scsi_sgl_task_params *sgl_task_params;
 	int idx;
@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
 	unsigned int timer_msec);
 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
-	struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
 extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59c18ca4cda9..aa22b11436ba 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
 	struct qedf_ioreq *els_req;
 	struct qedf_mp_req *mp_req;
 	struct fc_frame_header *fc_hdr;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	int rc = 0;
 	uint32_t did, sid;
 	uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 7faef80c5f7a..503c1ae3ccd0 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -225,19 +225,6 @@ enum fcoe_cqe_type {
 	MAX_FCOE_CQE_TYPE
 };
 
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
-	FCOE_TASK_DEV_TYPE_DISK,
-	FCOE_TASK_DEV_TYPE_TAPE,
-	MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
 /*
  * FCoE fast path error codes
  */
@@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
 	MAX_FCOE_SP_ERROR_CODE
 };
 
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
-	SEND_FCOE_CMD,
-	SEND_FCOE_MIDPATH,
-	SEND_FCOE_ABTS_REQUEST,
-	FCOE_EXCHANGE_CLEANUP,
-	FCOE_SEQUENCE_RECOVERY,
-	SEND_FCOE_XFER_RDY,
-	SEND_FCOE_RSP,
-	SEND_FCOE_RSP_WITH_SENSE_DATA,
-	SEND_FCOE_TARGET_DATA,
-	SEND_FCOE_INITIATOR_DATA,
-	/*
-	 * Xfer Continuation (==1) ready to be sent. Previous XFERs data
-	 * received successfully.
-	 */
-	SEND_FCOE_XFER_CONTINUATION_RDY,
-	SEND_FCOE_TARGET_ABTS_RSP,
-	MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
 /*
  * FCoE task TX state
  */
@@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
 	MAX_FCOE_TASK_TX_STATE
 };
 
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
-	FCOE_TASK_TYPE_WRITE_INITIATOR,
-	FCOE_TASK_TYPE_READ_INITIATOR,
-	FCOE_TASK_TYPE_MIDPATH,
-	FCOE_TASK_TYPE_UNSOLICITED,
-	FCOE_TASK_TYPE_ABTS,
-	FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
-	FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
-	FCOE_TASK_TYPE_WRITE_TARGET,
-	FCOE_TASK_TYPE_READ_TARGET,
-	FCOE_TASK_TYPE_RSP,
-	FCOE_TASK_TYPE_RSP_SENSE_DATA,
-	FCOE_TASK_TYPE_ABTS_TARGET,
-	FCOE_TASK_TYPE_ENUM_SIZE,
-	MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
-	/* Start physical address for the RQ (receive queue) PBL. */
-	struct regpair rq_pbl_addr;
-	/* Start physical address for the CQ (completion queue) PBL. */
-	struct regpair cq_pbl_addr;
-	/* Start physical address for the CMDQ (command queue) PBL. */
-	struct regpair cmdq_pbl_addr;
-};
-
 #endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ded386036c27..b15e69586a36 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
 }
 
 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
-	struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+	struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
 	struct fcoe_wqe *sqe)
 {
 	enum fcoe_task_type task_type;
@@ -597,7 +597,7 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 
 	/* Note init_initiator_rw_fcoe_task memsets the task context */
 	io_req->task = task_ctx;
-	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
 	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
 	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 
@@ -673,7 +673,7 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
-	struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
 {
 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
 	struct qedf_rport *fcport = io_req->fcport;
@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
 
 	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
-	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
 	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
 
 	/* Setup the task from io_req for easy reference */
@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 	struct Scsi_Host *host = sc_cmd->device->host;
 	struct fc_lport *lport = shost_priv(host);
 	struct qedf_ctx *qedf = lport_priv(lport);
-	struct fcoe_task_context *task_ctx;
+	struct e4_fcoe_task_context *task_ctx;
 	u16 xid;
 	enum fcoe_task_type req_type = 0;
 	struct fcoe_wqe *sqe;
@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	struct qedf_ioreq *io_req)
 {
 	u16 xid, rval;
-	struct fcoe_task_context *task_ctx;
+	struct e4_fcoe_task_context *task_ctx;
 	struct scsi_cmnd *sc_cmd;
 	struct fcoe_cqe_rsp_info *fcp_rsp;
 	struct qedf_rport *fcport;
@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 	struct qedf_rport *fcport;
 	struct qedf_ctx *qedf;
 	uint16_t xid;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	int tmo = 0;
 	int rc = SUCCESS;
 	unsigned long flags;
@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 	uint8_t tm_flags)
 {
 	struct qedf_ioreq *io_req;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	struct qedf_ctx *qedf = fcport->qedf;
 	struct fc_lport *lport = qedf->lport;
 	int rc = 0;
@@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
 	struct qedf_io_work *io_work;
 	u32 bdq_idx;
 	void *bdq_addr;
+	struct scsi_bd *p_bd_info;
 
+	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
-	    "address.hi=%x address.lo=%x opaque_data.hi=%x "
-	    "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
-	    qedf->bdq_prod_idx, pktlen);
-
-	bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+		  le32_to_cpu(p_bd_info->address.hi),
+		  le32_to_cpu(p_bd_info->address.lo),
+		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+		  qedf->bdq_prod_idx, pktlen);
+
+	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
 	if (bdq_idx >= QEDF_BDQ_SIZE) {
 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
 		    bdq_idx);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 4809debc6110..ccd9a08ea030 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
 	struct qedf_ctx *qedf = fp->qedf;
 	struct global_queue *que;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	u16 prod_idx;
 
 	/* Get the pointer to the global CQ this completion is on */
@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
 {
 	struct qedf_ctx *qedf = fp->qedf;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	struct global_queue *que;
 	u16 prod_idx;
 	struct fcoe_cqe *cqe;
@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
 	struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int ret;
 
 	sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
-	    sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+	    sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
 
 	if (!sb_virt) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
@@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
 	for (i = 0; i < QEDF_BDQ_SIZE; i++) {
 		pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
 		pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
-		pbl->opaque.hi = 0;
+		pbl->opaque.fcoe_opaque.hi = 0;
 		/* Opaque lo data is an index into the BDQ array */
-		pbl->opaque.lo = cpu_to_le32(i);
+		pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
 		pbl++;
 	}
 
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 397b3b8ee51a..c2478056356a 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
  *  this source tree.
  */
 
-#define QEDF_VERSION		"8.20.5.0"
+#define QEDF_VERSION		"8.33.0.20"
 #define QEDF_DRIVER_MAJOR_VER		8
-#define QEDF_DRIVER_MINOR_VER		20
-#define QEDF_DRIVER_REV_VER		5
-#define QEDF_DRIVER_ENG_VER		0
+#define QEDF_DRIVER_MINOR_VER		33
+#define QEDF_DRIVER_REV_VER		0
+#define QEDF_DRIVER_ENG_VER		20
 
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 39d77818a677..fd8a1eea3163 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
 {
 	struct qedi_fastpath *fp = NULL;
 	struct qed_sb_info *sb_info = NULL;
-	struct status_block *sb = NULL;
+	struct status_block_e4 *sb = NULL;
 	struct global_queue *que = NULL;
 	int id;
 	u16 prod_idx;
@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
 		sb_info = fp->sb_info;
 		sb = sb_info->sb_virt;
 		prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
-			    STATUS_BLOCK_PROD_INDEX_MASK);
+			    STATUS_BLOCK_E4_PROD_INDEX_MASK);
 		seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
 		que = qedi->global_queues[fp->sb_id];
 		seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 20a9259304f2..667d7697ba01 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
 {
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_task_context *task_ctx;
+	struct e4_iscsi_task_context *task_ctx;
 	struct iscsi_text_rsp *resp_hdr_ptr;
 	struct iscsi_text_response_hdr *cqe_text_response;
 	struct qedi_cmd *cmd;
@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
 {
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_task_context *task_ctx;
+	struct e4_iscsi_task_context *task_ctx;
 	struct iscsi_login_rsp *resp_hdr_ptr;
 	struct iscsi_login_response_hdr *cqe_login_response;
 	struct qedi_cmd *cmd;
@@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
 		  (qedi->bdq_prod_idx % qedi->rq_num_entries));
 
 	/* Obtain buffer address from rqe_opaque */
-	idx = cqe->rqe_opaque.lo;
+	idx = cqe->rqe_opaque;
 	if (idx > (QEDI_BDQ_NUM - 1)) {
 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 			  "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
 	}
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
-		  "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
-		  cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+		  "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 		  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
 	struct scsi_bd *pbl;
 
 	/* Obtain buffer address from rqe_opaque */
-	idx = cqe->rqe_opaque.lo;
+	idx = cqe->rqe_opaque;
 	if (idx > (QEDI_BDQ_NUM - 1)) {
 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 			  "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 		  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
 		  pbl, pbl->address.hi, pbl->address.lo, idx);
-	pbl->opaque.hi = 0;
-	pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+	pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+	pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+	pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+	pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
 
 	/* Increment producer to let f/w know we've handled the frame */
 	qedi->bdq_prod_idx += count;
@@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params tx_sgl_task_params;
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_login_req *login_hdr;
 	struct scsi_sge *resp_sge = NULL;
@@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params tx_sgl_task_params;
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_logout *logout_hdr = NULL;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct qedi_cmd *qedi_cmd;
@@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 	struct iscsi_tmf_request_hdr tmf_pdu_header;
 	struct iscsi_task_params task_params;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_task *ctask;
 	struct iscsi_tm *tmf_hdr;
@@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params tx_sgl_task_params;
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_text *text_hdr;
 	struct scsi_sge *req_sge = NULL;
@@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_nopout *nopout_hdr;
 	struct scsi_sge *resp_sge = NULL;
 	struct qedi_cmd *qedi_cmd;
@@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
 	struct iscsi_task_params task_params;
 	struct iscsi_conn_params conn_params;
 	struct scsi_initiator_cmd_params cmd_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
 	enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	cmd->task_id = tid;
 
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 7df32a68bd54..a269da1a6c75 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
 				    struct data_hdr *pdu_header,
 				    enum iscsi_task_type task_type)
 {
-	struct iscsi_task_context *context;
-	u16 index;
+	struct e4_iscsi_task_context *context;
 	u32 val;
+	u16 index;
+	u8 val_byte;
 
 	context = task_params->context;
+	val_byte = context->mstorm_ag_context.cdu_validation;
 	memset(context, 0, sizeof(*context));
+	context->mstorm_ag_context.cdu_validation = val_byte;
 
 	for (index = 0; index <
 	     ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
@@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
 					    cpu_to_le16(task_params->conn_icid);
 
 	SET_FIELD(context->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+		  E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 
 	context->ustorm_st_context.task_type = task_type;
 	context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
 
 static
 void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
-			       struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
-			       u32 remaining_recv_len,
-			       u32 expected_data_transfer_len,
-			       u8 num_sges, bool tx_dif_conn_err_en)
+			struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+			u32 remaining_recv_len, u32 expected_data_transfer_len,
+			u8 num_sges, bool tx_dif_conn_err_en)
 {
 	u32 val;
 
@@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
 	ustorm_st_cxt->exp_data_transfer_len = val;
 	SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
 	SET_FIELD(ustorm_ag_cxt->flags2,
-		  USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+		  E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
 		  tx_dif_conn_err_en ? 1 : 0);
 }
 
 static
-void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
 					struct iscsi_conn_params  *conn_params,
 					enum iscsi_task_type task_type,
 					u32 task_size,
@@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
 			     cpu_to_le16(dif_task_params->application_tag_mask);
 		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
 			  dif_task_params->crc_seed ? 1 : 0);
-		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+		SET_FIELD(rdif_context->flags0,
+			  RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
 			  dif_task_params->host_guard_type);
 		SET_FIELD(rdif_context->flags0,
-			  RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+			  RDIF_TASK_CONTEXT_PROTECTION_TYPE,
 			  dif_task_params->protection_type);
 		SET_FIELD(rdif_context->flags0,
-			  RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+			  RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
 		SET_FIELD(rdif_context->flags0,
-			  RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+			  RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
 			  dif_task_params->keep_ref_tag_const ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+			  RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
 			  (dif_task_params->validate_app_tag &&
 			  dif_task_params->dif_on_network) ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_VALIDATEGUARD,
+			  RDIF_TASK_CONTEXT_VALIDATE_GUARD,
 			  (dif_task_params->validate_guard &&
 			  dif_task_params->dif_on_network) ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+			  RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
 			  (dif_task_params->validate_ref_tag &&
 			  dif_task_params->dif_on_network) ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_HOSTINTERFACE,
+			  RDIF_TASK_CONTEXT_HOST_INTERFACE,
 			  dif_task_params->dif_on_host ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+			  RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
 			  dif_task_params->dif_on_network ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDGUARD,
+			  RDIF_TASK_CONTEXT_FORWARD_GUARD,
 			  dif_task_params->forward_guard ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+			  RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
 			  dif_task_params->forward_app_tag ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDREFTAG,
+			  RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
 			  dif_task_params->forward_ref_tag ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+			  RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
 			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+			  RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
 			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_INTERVALSIZE,
+			  RDIF_TASK_CONTEXT_INTERVAL_SIZE,
 			  dif_task_params->dif_block_size_log - 9);
 		SET_FIELD(rdif_context->state,
-			  RDIF_TASK_CONTEXT_REFTAGMASK,
+			  RDIF_TASK_CONTEXT_REF_TAG_MASK,
 			  dif_task_params->ref_tag_mask);
-		SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+		SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
 			  dif_task_params->ignore_app_tag);
 	}
 
@@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
 	    task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
 		tdif_context->app_tag_value =
 				  cpu_to_le16(dif_task_params->application_tag);
-		tdif_context->partial_crc_valueB =
+		tdif_context->partial_crc_value_b =
 		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
 		tdif_context->partial_crc_value_a =
 		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
@@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
 			  dif_task_params->crc_seed ? 1 : 0);
 
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+			  TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
 			  dif_task_params->tx_dif_conn_err_en ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
 			  dif_task_params->forward_guard   ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
 			  dif_task_params->forward_app_tag ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
 			  dif_task_params->forward_ref_tag ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
 			  dif_task_params->dif_block_size_log - 9);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_HOST_INTERFACE,
 			  dif_task_params->dif_on_host    ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+			  TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
 			  dif_task_params->dif_on_network ? 1 : 0);
 		val = cpu_to_le32(dif_task_params->initial_ref_tag);
 		tdif_context->initial_ref_tag = val;
 		tdif_context->app_tag_mask =
 			     cpu_to_le16(dif_task_params->application_tag_mask);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+			  TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
 			  dif_task_params->host_guard_type);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+			  TDIF_TASK_CONTEXT_PROTECTION_TYPE,
 			  dif_task_params->protection_type);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+			  TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
 			  dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+			  TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
 			  dif_task_params->keep_ref_tag_const ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_VALIDATE_GUARD,
 			  (dif_task_params->validate_guard &&
 			   dif_task_params->dif_on_host) ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+			  TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
 			  (dif_task_params->validate_app_tag &&
 			  dif_task_params->dif_on_host) ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+			  TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
 			  (dif_task_params->validate_ref_tag &&
 			   dif_task_params->dif_on_host) ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+			  TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
 			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+			  TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
 			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_REFTAGMASK,
+			  TDIF_TASK_CONTEXT_REF_TAG_MASK,
 			  dif_task_params->ref_tag_mask);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+			  TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
 			  dif_task_params->ignore_app_tag ? 1 : 0);
 	}
 }
 
-static void set_local_completion_context(struct iscsi_task_context *context)
+static void set_local_completion_context(struct e4_iscsi_task_context *context)
 {
 	SET_FIELD(context->ystorm_st_context.state.flags,
 		  YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
 			      struct scsi_dif_task_params *dif_task_params)
 {
 	u32 exp_data_transfer_len = conn_params->max_burst_length;
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 	bool slow_io = false;
 	u32 task_size, val;
 	u8 num_sges = 0;
@@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
 
 	cxt = task_params->context;
 
-	val = cpu_to_le32(task_size);
-	cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
-	init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
-					     cmd_params);
-	val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
-	cxt->mstorm_st_context.sense_db.lo = val;
 
-	val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
-	cxt->mstorm_st_context.sense_db.hi = val;
+	if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+		set_local_completion_context(cxt);
+	} else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+		val = cpu_to_le32(task_size +
+			   ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+		cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+		cxt->mstorm_st_context.expected_itt =
+						   cpu_to_le32(pdu_header->itt);
+	} else {
+		val = cpu_to_le32(task_size);
+		cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+									    val;
+		init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+						     cmd_params);
+		val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+		cxt->mstorm_st_context.sense_db.lo = val;
+
+		val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+		cxt->mstorm_st_context.sense_db.hi = val;
+	}
 
 	if (task_params->tx_io_size) {
 		init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
 				       dif_task_params);
+		init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+				       dif_task_params);
 		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
 				      &cxt->ystorm_st_context.state.data_desc,
 				      sgl_task_params);
@@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
 				      struct scsi_sgl_task_params *tx_params,
 				      struct scsi_sgl_task_params *rx_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
@@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
 				struct scsi_sgl_task_params *tx_sgl_task_params,
 				struct scsi_sgl_task_params *rx_sgl_task_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
@@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
 				       struct scsi_sgl_task_params *tx_params,
 				       struct scsi_sgl_task_params *rx_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
@@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
 				     struct scsi_sgl_task_params *tx_params,
 				     struct scsi_sgl_task_params *rx_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index b6f24f91849d..c3deb77ac388 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -13,7 +13,7 @@
 #include "qedi_fw_scsi.h"
 
 struct iscsi_task_params {
-	struct iscsi_task_context *context;
+	struct e4_iscsi_task_context *context;
 	struct iscsi_wqe	  *sqe;
 	u32			  tx_io_size;
 	u32			  rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 63d793f46064..f5b5a31999aa 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
 void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
 void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
 void qedi_process_iscsi_error(struct qedi_endpoint *ep,
-			      struct async_data *data);
+			      struct iscsi_eqe_data *data);
 void qedi_start_conn_recovery(struct qedi_ctx *qedi,
 			      struct qedi_conn *qedi_conn);
 struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+			    struct iscsi_eqe_data *data);
 void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
 void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
 void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index a02b34ea5cab..7ec7f6e00fb8 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 	conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
 	conn_info->dup_ack_theshold = 3;
 	conn_info->rcv_wnd = 65535;
-	conn_info->cwnd = DEF_MAX_CWND;
 
 	conn_info->ss_thresh = 65535;
 	conn_info->srtt = 300;
@@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 				       (qedi_ep->ip_type == TCP_IPV6),
 				       1, (qedi_ep->vlan_id != 0));
 
+	conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
 	conn_info->rcv_wnd_scale = 4;
-	conn_info->ts_ticks_per_second = 1000;
 	conn_info->da_timeout_value = 200;
 	conn_info->ack_frequency = 2;
 
@@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
 	return msg;
 }
 
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+			      struct iscsi_eqe_data *data)
 {
 	struct qedi_conn *qedi_conn;
 	struct qedi_ctx *qedi;
@@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
 		qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
 }
 
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+			    struct iscsi_eqe_data *data)
 {
 	struct qedi_conn *qedi_conn;
 
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 3247287cb0e7..ea1315189922 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
 	struct scsi_cmnd *scsi_cmd;
 	struct scatterlist *sg;
 	struct qedi_io_bdt io_tbl;
-	struct iscsi_task_context request;
+	struct e4_iscsi_task_context request;
 	unsigned char *sense_buffer;
 	dma_addr_t sense_buffer_dma;
 	u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 58596d17f7d9..029e2e69b29f 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
 {
 	struct qedi_ctx *qedi;
 	struct qedi_endpoint *qedi_ep;
-	struct async_data *data;
+	struct iscsi_eqe_data *data;
 	int rval = 0;
 
 	if (!context || !fw_handle) {
@@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
 		  "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
 
-	data = (struct async_data *)fw_handle;
+	data = (struct iscsi_eqe_data *)fw_handle;
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-		  "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
-		   data->cid, data->itid, data->error_code,
-		   data->fw_debug_param);
+		  "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+		   data->icid, data->conn_id, data->error_code,
+		   data->error_pdu_opcode_reserved);
 
-	qedi_ep = qedi->ep_tbl[data->cid];
+	qedi_ep = qedi->ep_tbl[data->icid];
 
 	if (!qedi_ep) {
 		QEDI_WARN(&qedi->dbg_ctx,
 			  "Cannot process event, ep already disconnected, cid=0x%x\n",
-			   data->cid);
+			   data->icid);
 		WARN_ON(1);
 		return -ENODEV;
 	}
@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
 static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
 				  struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int ret;
 
 	sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
-				     sizeof(struct status_block), &sb_phys,
+				     sizeof(struct status_block_e4), &sb_phys,
 				     GFP_KERNEL);
 	if (!sb_virt) {
 		QEDI_ERR(&qedi->dbg_ctx,
@@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
 
 	qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
 	qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
-	qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
 
 err_alloc_mem:
 	return rval;
@@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
 {
 	struct qedi_ctx *qedi = fp->qedi;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	struct qedi_percpu_s *p = NULL;
 	struct global_queue *que;
 	u16 prod_idx;
@@ -1017,7 +1016,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
 	struct qedi_ctx *qedi = fp->qedi;
 	struct global_queue *que;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	u16 prod_idx;
 
 	barrier();
@@ -1264,8 +1263,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 			  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
 			  pbl, pbl->address.hi, pbl->address.lo, i);
-		pbl->opaque.hi = 0;
-		pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+		pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+		pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+		pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+		pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
 		pbl++;
 	}
 
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index d61e3ac22e67..8a0e523fc089 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
  * this source tree.
  */
 
-#define QEDI_MODULE_VERSION	"8.10.4.0"
+#define QEDI_MODULE_VERSION	"8.33.0.20"
 #define QEDI_DRIVER_MAJOR_VER		8
-#define QEDI_DRIVER_MINOR_VER		10
-#define QEDI_DRIVER_REV_VER		4
-#define QEDI_DRIVER_ENG_VER		0
+#define QEDI_DRIVER_MINOR_VER		33
+#define QEDI_DRIVER_REV_VER		0
+#define QEDI_DRIVER_ENG_VER		20
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 71c73766ee22..ee18428a051f 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -3,10 +3,7 @@ config SSB_POSSIBLE
 	depends on HAS_IOMEM && HAS_DMA
 	default y
 
-menu "Sonics Silicon Backplane"
-	depends on SSB_POSSIBLE
-
-config SSB
+menuconfig SSB
 	tristate "Sonics Silicon Backplane support"
 	depends on SSB_POSSIBLE
 	help
@@ -21,6 +18,8 @@ config SSB
 
 	  If unsure, say N.
 
+if SSB
+
 # Common SPROM support routines
 config SSB_SPROM
 	bool
@@ -185,4 +184,4 @@ config SSB_DRIVER_GPIO
 
 	  If unsure, say N
 
-endmenu
+endif # SSB
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 28133dbd2808..5a135ed46159 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -252,6 +252,18 @@ void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable)
 }
 EXPORT_SYMBOL_GPL(serdev_device_set_flow_control);
 
+int serdev_device_set_parity(struct serdev_device *serdev,
+			     enum serdev_parity parity)
+{
+	struct serdev_controller *ctrl = serdev->ctrl;
+
+	if (!ctrl || !ctrl->ops->set_parity)
+		return -ENOTSUPP;
+
+	return ctrl->ops->set_parity(ctrl, parity);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_parity);
+
 void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout)
 {
 	struct serdev_controller *ctrl = serdev->ctrl;
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 247788a16f0b..1bdf2959ac5c 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -190,6 +190,29 @@ static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable
 	tty_set_termios(tty, &ktermios);
 }
 
+static int ttyport_set_parity(struct serdev_controller *ctrl,
+			      enum serdev_parity parity)
+{
+	struct serport *serport = serdev_controller_get_drvdata(ctrl);
+	struct tty_struct *tty = serport->tty;
+	struct ktermios ktermios = tty->termios;
+
+	ktermios.c_cflag &= ~(PARENB | PARODD | CMSPAR);
+	if (parity != SERDEV_PARITY_NONE) {
+		ktermios.c_cflag |= PARENB;
+		if (parity == SERDEV_PARITY_ODD)
+			ktermios.c_cflag |= PARODD;
+	}
+
+	tty_set_termios(tty, &ktermios);
+
+	if ((tty->termios.c_cflag & (PARENB | PARODD | CMSPAR)) !=
+	    (ktermios.c_cflag & (PARENB | PARODD | CMSPAR)))
+		return -EINVAL;
+
+	return 0;
+}
+
 static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout)
 {
 	struct serport *serport = serdev_controller_get_drvdata(ctrl);
@@ -227,6 +250,7 @@ static const struct serdev_controller_ops ctrl_ops = {
 	.open = ttyport_open,
 	.close = ttyport_close,
 	.set_flow_control = ttyport_set_flow_control,
+	.set_parity = ttyport_set_parity,
 	.set_baudrate = ttyport_set_baudrate,
 	.wait_until_sent = ttyport_wait_until_sent,
 	.get_tiocm = ttyport_get_tiocm,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9524ee16878a..9c3f8160ef24 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref {
 
 #define VHOST_RX_BATCH 64
 struct vhost_net_buf {
-	struct sk_buff **queue;
+	void **queue;
 	int tail;
 	int head;
 };
@@ -108,7 +108,7 @@ struct vhost_net_virtqueue {
 	/* Reference counting for outstanding ubufs.
 	 * Protected by vq mutex. Writers must also take device mutex. */
 	struct vhost_net_ubuf_ref *ubufs;
-	struct skb_array *rx_array;
+	struct ptr_ring *rx_ring;
 	struct vhost_net_buf rxq;
 };
 
@@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
 	struct vhost_net_buf *rxq = &nvq->rxq;
 
 	rxq->head = 0;
-	rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
 					      VHOST_RX_BATCH);
 	return rxq->tail;
 }
@@ -167,13 +167,25 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
 {
 	struct vhost_net_buf *rxq = &nvq->rxq;
 
-	if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
-		skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
-				    vhost_net_buf_get_size(rxq));
+	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+				   vhost_net_buf_get_size(rxq),
+				   __skb_array_destroy_skb);
 		rxq->head = rxq->tail = 0;
 	}
 }
 
+static int vhost_net_buf_peek_len(void *ptr)
+{
+	if (tun_is_xdp_buff(ptr)) {
+		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+		return xdp->data_end - xdp->data;
+	}
+
+	return __skb_array_len_with_tag(ptr);
+}
+
 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
 {
 	struct vhost_net_buf *rxq = &nvq->rxq;
@@ -185,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
 		return 0;
 
 out:
-	return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+	return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
 }
 
 static void vhost_net_buf_init(struct vhost_net_buf *rxq)
@@ -583,7 +595,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
 	int len = 0;
 	unsigned long flags;
 
-	if (rvq->rx_array)
+	if (rvq->rx_ring)
 		return vhost_net_buf_peek(rvq);
 
 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
@@ -744,7 +756,7 @@ static void handle_rx(struct vhost_net *net)
 	};
 	size_t total_len = 0;
 	int err, mergeable;
-	s16 headcount;
+	s16 headcount, nheads = 0;
 	size_t vhost_hlen, sock_hlen;
 	size_t vhost_len, sock_len;
 	struct socket *sock;
@@ -772,7 +784,7 @@ static void handle_rx(struct vhost_net *net)
 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
 		sock_len += sock_hlen;
 		vhost_len = sock_len + vhost_hlen;
-		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+		headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
 					&in, vq_log, &log,
 					likely(mergeable) ? UIO_MAXIOV : 1);
 		/* On error, stop handling until the next kick. */
@@ -790,7 +802,7 @@ static void handle_rx(struct vhost_net *net)
 			 * they refilled. */
 			goto out;
 		}
-		if (nvq->rx_array)
+		if (nvq->rx_ring)
 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
 		/* On overrun, truncate and discard */
 		if (unlikely(headcount > UIO_MAXIOV)) {
@@ -844,8 +856,12 @@ static void handle_rx(struct vhost_net *net)
 			vhost_discard_vq_desc(vq, headcount);
 			goto out;
 		}
-		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
-					    headcount);
+		nheads += headcount;
+		if (nheads > VHOST_RX_BATCH) {
+			vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+						    nheads);
+			nheads = 0;
+		}
 		if (unlikely(vq_log))
 			vhost_log_write(vq, vq_log, log, vhost_len);
 		total_len += vhost_len;
@@ -856,6 +872,9 @@ static void handle_rx(struct vhost_net *net)
 	}
 	vhost_net_enable_vq(net, vq);
 out:
+	if (nheads)
+		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+					    nheads);
 	mutex_unlock(&vq->mutex);
 }
 
@@ -896,7 +915,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
 	struct vhost_net *n;
 	struct vhost_dev *dev;
 	struct vhost_virtqueue **vqs;
-	struct sk_buff **queue;
+	void **queue;
 	int i;
 
 	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -908,7 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
 		return -ENOMEM;
 	}
 
-	queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+	queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
 			      GFP_KERNEL);
 	if (!queue) {
 		kfree(vqs);
@@ -1046,23 +1065,23 @@ err:
 	return ERR_PTR(r);
 }
 
-static struct skb_array *get_tap_skb_array(int fd)
+static struct ptr_ring *get_tap_ptr_ring(int fd)
 {
-	struct skb_array *array;
+	struct ptr_ring *ring;
 	struct file *file = fget(fd);
 
 	if (!file)
 		return NULL;
-	array = tun_get_skb_array(file);
-	if (!IS_ERR(array))
+	ring = tun_get_tx_ring(file);
+	if (!IS_ERR(ring))
 		goto out;
-	array = tap_get_skb_array(file);
-	if (!IS_ERR(array))
+	ring = tap_get_ptr_ring(file);
+	if (!IS_ERR(ring))
 		goto out;
-	array = NULL;
+	ring = NULL;
 out:
 	fput(file);
-	return array;
+	return ring;
 }
 
 static struct socket *get_tap_socket(int fd)
@@ -1143,7 +1162,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
 		vq->private_data = sock;
 		vhost_net_buf_unproduce(nvq);
 		if (index == VHOST_NET_VQ_RX)
-			nvq->rx_array = get_tap_skb_array(fd);
+			nvq->rx_ring = get_tap_ptr_ring(fd);
 		r = vhost_vq_init_access(vq);
 		if (r)
 			goto err_used;
@@ -1208,6 +1227,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
 	}
 	vhost_net_stop(n, &tx_sock, &rx_sock);
 	vhost_net_flush(n);
+	vhost_dev_stop(&n->dev);
 	vhost_dev_reset_owner(&n->dev, umem);
 	vhost_net_vq_reset(n);
 done:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index ed095202942f..21f34ad0d411 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -30,6 +30,7 @@
 #include <linux/ratelimit.h>
 #include <linux/uuid.h>
 #include <linux/semaphore.h>
+#include <linux/error-injection.h>
 #include <asm/unaligned.h>
 #include "ctree.h"
 #include "disk-io.h"
@@ -3115,6 +3116,7 @@ recovery_tree_root:
 		goto fail_block_groups;
 	goto retry_root_backup;
 }
+ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 014f3c090231..a9f22ac50d6a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/math64.h>
 #include <linux/ratelimit.h>
+#include <linux/error-injection.h>
 #include "ctree.h"
 #include "free-space-cache.h"
 #include "transaction.h"
@@ -332,6 +333,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 
 	return 0;
 }
+ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
 
 static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
 {
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 11066d8647d2..90af87ff29ba 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1549,16 +1549,13 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 	rhashtable_walk_enter(&gl_hash_table, &iter);
 
 	do {
-		gl = ERR_PTR(rhashtable_walk_start(&iter));
-		if (IS_ERR(gl))
-			goto walk_stop;
+		rhashtable_walk_start(&iter);
 
 		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
 			if (gl->gl_name.ln_sbd == sdp &&
 			    lockref_get_not_dead(&gl->gl_lockref))
 				examiner(gl);
 
-walk_stop:
 		rhashtable_walk_stop(&iter);
 	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
 
@@ -1947,7 +1944,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 	loff_t n = *pos;
 
 	rhashtable_walk_enter(&gl_hash_table, &gi->hti);
-	if (rhashtable_walk_start(&gi->hti) != 0)
+	if (rhashtable_walk_start_check(&gi->hti) != 0)
 		return NULL;
 
 	do {
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 7c6f76d29f56..36b0772701a0 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -103,14 +103,14 @@ slow:
 	goto got_it;
 }
 
-void *ns_get_path(struct path *path, struct task_struct *task,
-			const struct proc_ns_operations *ns_ops)
+void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
+		     void *private_data)
 {
 	struct ns_common *ns;
 	void *ret;
 
 again:
-	ns = ns_ops->get(task);
+	ns = ns_get_cb(private_data);
 	if (!ns)
 		return ERR_PTR(-ENOENT);
 
@@ -120,6 +120,29 @@ again:
 	return ret;
 }
 
+struct ns_get_path_task_args {
+	const struct proc_ns_operations *ns_ops;
+	struct task_struct *task;
+};
+
+static struct ns_common *ns_get_path_task(void *private_data)
+{
+	struct ns_get_path_task_args *args = private_data;
+
+	return args->ns_ops->get(args->task);
+}
+
+void *ns_get_path(struct path *path, struct task_struct *task,
+		  const struct proc_ns_operations *ns_ops)
+{
+	struct ns_get_path_task_args args = {
+		.ns_ops	= ns_ops,
+		.task	= task,
+	};
+
+	return ns_get_path_cb(path, ns_get_path_task, &args);
+}
+
 int open_related_ns(struct ns_common *ns,
 		   struct ns_common *(*get_ns)(struct ns_common *ns))
 {
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 000000000000..296c65442f00
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ERROR_INJECTION_H
+#define _ASM_GENERIC_ERROR_INJECTION_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+enum {
+	EI_ETYPE_NONE,		/* Dummy value for undefined case */
+	EI_ETYPE_NULL,		/* Return NULL if failure */
+	EI_ETYPE_ERRNO,		/* Return -ERRNO if failure */
+	EI_ETYPE_ERRNO_NULL,	/* Return -ERRNO or NULL if failure */
+};
+
+struct error_injection_entry {
+	unsigned long	addr;
+	int		etype;
+};
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * Whitelist ganerating macro. Specify functions which can be
+ * error-injectable using this macro.
+ */
+#define ALLOW_ERROR_INJECTION(fname, _etype)				\
+static struct error_injection_entry __used				\
+	__attribute__((__section__("_error_injection_whitelist")))	\
+	_eil_addr_##fname = {						\
+		.addr = (unsigned long)fname,				\
+		.etype = EI_ETYPE_##_etype,				\
+	};
+#else
+#define ALLOW_ERROR_INJECTION(fname, _etype)
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a564b83bf013..1ab0e520d6fc 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -136,6 +136,15 @@
 #define KPROBE_BLACKLIST()
 #endif
 
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#define ERROR_INJECT_WHITELIST()	STRUCT_ALIGN();			      \
+			VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
+			KEEP(*(_error_injection_whitelist))		      \
+			VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
+#else
+#define ERROR_INJECT_WHITELIST()
+#endif
+
 #ifdef CONFIG_EVENT_TRACING
 #define FTRACE_EVENTS()	. = ALIGN(8);					\
 			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
@@ -568,6 +577,7 @@
 	FTRACE_EVENTS()							\
 	TRACE_SYSCALLS()						\
 	KPROBE_BLACKLIST()						\
+	ERROR_INJECT_WHITELIST()					\
 	MEM_DISCARD(init.rodata)					\
 	CLK_OF_TABLES()							\
 	RESERVEDMEM_OF_TABLES()						\
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f0ea50ba0550..e6d41b65d396 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
 #define ACPI_COMPANION_SET(dev, adev)	set_primary_fwnode(dev, (adev) ? \
 	acpi_fwnode_handle(adev) : NULL)
 #define ACPI_HANDLE(dev)		acpi_device_handle(ACPI_COMPANION(dev))
+#define ACPI_HANDLE_FWNODE(fwnode)	\
+				acpi_device_handle(to_acpi_device_node(fwnode))
 
 static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
 {
@@ -628,6 +630,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count)
 #define ACPI_COMPANION(dev)		(NULL)
 #define ACPI_COMPANION_SET(dev, adev)	do { } while (0)
 #define ACPI_HANDLE(dev)		(NULL)
+#define ACPI_HANDLE_FWNODE(fwnode)	(NULL)
 #define ACPI_DEVICE_CLASS(_cls, _msk)	.cls = (0), .cls_msk = (0),
 
 struct fwnode_handle;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0b25cf87b6d6..66df387106de 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,6 +17,7 @@
 #include <linux/numa.h>
 #include <linux/wait.h>
 
+struct bpf_verifier_env;
 struct perf_event;
 struct bpf_prog;
 struct bpf_map;
@@ -24,6 +25,7 @@ struct bpf_map;
 /* map is generic key/value storage optionally accesible by eBPF programs */
 struct bpf_map_ops {
 	/* funcs callable from userspace (via syscall) */
+	int (*map_alloc_check)(union bpf_attr *attr);
 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
 	void (*map_release)(struct bpf_map *map, struct file *map_file);
 	void (*map_free)(struct bpf_map *map);
@@ -72,6 +74,33 @@ struct bpf_map {
 	char name[BPF_OBJ_NAME_LEN];
 };
 
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+	int (*map_get_next_key)(struct bpf_offloaded_map *map,
+				void *key, void *next_key);
+	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
+			       void *key, void *value);
+	int (*map_update_elem)(struct bpf_offloaded_map *map,
+			       void *key, void *value, u64 flags);
+	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
+};
+
+struct bpf_offloaded_map {
+	struct bpf_map map;
+	struct net_device *netdev;
+	const struct bpf_map_dev_ops *dev_ops;
+	void *dev_priv;
+	struct list_head offloads;
+};
+
+static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
+{
+	return container_of(map, struct bpf_offloaded_map, map);
+}
+
+extern const struct bpf_map_ops bpf_map_offload_ops;
+
 /* function argument constraints */
 enum bpf_arg_type {
 	ARG_DONTCARE = 0,	/* unused argument in helper function */
@@ -193,14 +222,20 @@ struct bpf_verifier_ops {
 				  struct bpf_prog *prog, u32 *target_size);
 };
 
-struct bpf_dev_offload {
+struct bpf_prog_offload_ops {
+	int (*insn_hook)(struct bpf_verifier_env *env,
+			 int insn_idx, int prev_insn_idx);
+};
+
+struct bpf_prog_offload {
 	struct bpf_prog		*prog;
 	struct net_device	*netdev;
 	void			*dev_priv;
 	struct list_head	offloads;
 	bool			dev_state;
-	bool			verifier_running;
-	wait_queue_head_t	verifier_done;
+	const struct bpf_prog_offload_ops *dev_ops;
+	void			*jited_image;
+	u32			jited_len;
 };
 
 struct bpf_prog_aux {
@@ -209,6 +244,10 @@ struct bpf_prog_aux {
 	u32 max_ctx_offset;
 	u32 stack_depth;
 	u32 id;
+	u32 func_cnt;
+	bool offload_requested;
+	struct bpf_prog **func;
+	void *jit_data; /* JIT specific data. arch dependent */
 	struct latch_tree_node ksym_tnode;
 	struct list_head ksym_lnode;
 	const struct bpf_prog_ops *ops;
@@ -220,7 +259,7 @@ struct bpf_prog_aux {
 #ifdef CONFIG_SECURITY
 	void *security;
 #endif
-	struct bpf_dev_offload *offload;
+	struct bpf_prog_offload *offload;
 	union {
 		struct work_struct work;
 		struct rcu_head	rcu;
@@ -295,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
 
 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
 				struct bpf_prog *old_prog);
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+			     __u32 __user *prog_ids, u32 request_cnt,
+			     __u32 __user *prog_cnt);
 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
 			struct bpf_prog *exclude_prog,
 			struct bpf_prog *include_prog,
@@ -355,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog);
 int __bpf_prog_charge(struct user_struct *user, u32 pages);
 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
 
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -363,6 +408,7 @@ void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
 void *bpf_map_area_alloc(size_t size, int numa_node);
 void bpf_map_area_free(void *base);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 
 extern int sysctl_unprivileged_bpf_disabled;
 
@@ -409,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
 
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
@@ -536,14 +583,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
 
 int bpf_prog_offload_compile(struct bpf_prog *prog);
 void bpf_prog_offload_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+			       struct bpf_prog *prog);
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
+int bpf_map_offload_update_elem(struct bpf_map *map,
+				void *key, void *value, u64 flags);
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
+int bpf_map_offload_get_next_key(struct bpf_map *map,
+				 void *key, void *next_key);
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
 
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
 
 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 {
-	return aux->offload;
+	return aux->offload_requested;
 }
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+	return unlikely(map->ops == &bpf_map_offload_ops);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
+void bpf_map_offload_map_free(struct bpf_map *map);
 #else
 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
 					union bpf_attr *attr)
@@ -555,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 {
 	return false;
 }
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+	return false;
+}
+
+static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_map_offload_map_free(struct bpf_map *map)
+{
+}
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
 #else
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 978c1d9c9383..19b8349a3809 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
 BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
 #ifdef CONFIG_NET
 BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#ifdef CONFIG_STREAM_PARSER
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
 BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
 #endif
 BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1632bb13ad8a..6b66cd1aa0b9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -76,6 +76,14 @@ struct bpf_reg_state {
 	s64 smax_value; /* maximum possible (s64)value */
 	u64 umin_value; /* minimum possible (u64)value */
 	u64 umax_value; /* maximum possible (u64)value */
+	/* Inside the callee two registers can be both PTR_TO_STACK like
+	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
+	 * while another to the caller's stack. To differentiate them 'frameno'
+	 * is used which is an index in bpf_verifier_state->frame[] array
+	 * pointing to bpf_func_state.
+	 * This field must be second to last, for states_equal() reasons.
+	 */
+	u32 frameno;
 	/* This field must be last, for states_equal() reasons. */
 	enum bpf_reg_liveness live;
 };
@@ -83,7 +91,8 @@ struct bpf_reg_state {
 enum bpf_stack_slot_type {
 	STACK_INVALID,    /* nothing was stored in this stack slot */
 	STACK_SPILL,      /* register spilled into stack */
-	STACK_MISC	  /* BPF program wrote some data into this slot */
+	STACK_MISC,	  /* BPF program wrote some data into this slot */
+	STACK_ZERO,	  /* BPF program wrote constant zero */
 };
 
 #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
@@ -96,13 +105,34 @@ struct bpf_stack_state {
 /* state of the program:
  * type of all registers and stack info
  */
-struct bpf_verifier_state {
+struct bpf_func_state {
 	struct bpf_reg_state regs[MAX_BPF_REG];
 	struct bpf_verifier_state *parent;
+	/* index of call instruction that called into this func */
+	int callsite;
+	/* stack frame number of this function state from pov of
+	 * enclosing bpf_verifier_state.
+	 * 0 = main function, 1 = first callee.
+	 */
+	u32 frameno;
+	/* subprog number == index within subprog_stack_depth
+	 * zero == main subprog
+	 */
+	u32 subprogno;
+
+	/* should be second to last. See copy_func_state() */
 	int allocated_stack;
 	struct bpf_stack_state *stack;
 };
 
+#define MAX_CALL_FRAMES 8
+struct bpf_verifier_state {
+	/* call stack tracking */
+	struct bpf_func_state *frame[MAX_CALL_FRAMES];
+	struct bpf_verifier_state *parent;
+	u32 curframe;
+};
+
 /* linked list of verifier states used to prune search */
 struct bpf_verifier_state_list {
 	struct bpf_verifier_state state;
@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
 	union {
 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
 		struct bpf_map *map_ptr;	/* pointer for call insn into lookup_elem */
+		s32 call_imm;			/* saved imm field of call insn */
 	};
 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
 	bool seen; /* this insn was processed by the verifier */
@@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
 	return log->len_used >= log->len_total - 1;
 }
 
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
-	int (*insn_hook)(struct bpf_verifier_env *env,
-			 int insn_idx, int prev_insn_idx);
-};
+#define BPF_MAX_SUBPROGS 256
 
 /* single container for all structs
  * one verifier_env per bpf_check() call
@@ -152,29 +179,31 @@ struct bpf_verifier_env {
 	bool strict_alignment;		/* perform strict pointer alignment checks */
 	struct bpf_verifier_state *cur_state; /* current verifier state */
 	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
-	const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
 	u32 used_map_cnt;		/* number of used maps */
 	u32 id_gen;			/* used to generate unique reg IDs */
 	bool allow_ptr_leaks;
 	bool seen_direct_write;
 	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
-
 	struct bpf_verifer_log log;
+	u32 subprog_starts[BPF_MAX_SUBPROGS];
+	/* computes the stack depth of each bpf function */
+	u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
+	u32 subprog_cnt;
 };
 
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+					   const char *fmt, ...);
+
 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
 {
-	return env->cur_state->regs;
+	struct bpf_verifier_state *cur = env->cur_state;
+
+	return cur->frame[cur->curframe]->regs;
 }
 
-#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
-#else
-static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
-	return -EOPNOTSUPP;
-}
-#endif
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+				 int insn_idx, int prev_insn_idx);
 
 #endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 8ff86b4c1b8a..d3339dd48b1a 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -14,6 +14,7 @@
 #define PHY_ID_BCM5241			0x0143bc30
 #define PHY_ID_BCMAC131			0x0143bc70
 #define PHY_ID_BCM5481			0x0143bca0
+#define PHY_ID_BCM5395			0x0143bcf0
 #define PHY_ID_BCM54810			0x03625d00
 #define PHY_ID_BCM5482			0x0143bcb0
 #define PHY_ID_BCM5411			0x00206070
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 61f1cf2d9f44..055aaf5ed9af 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -46,6 +46,7 @@ struct can_priv {
 	unsigned int bitrate_const_cnt;
 	const u32 *data_bitrate_const;
 	unsigned int data_bitrate_const_cnt;
+	u32 bitrate_max;
 	struct can_clock clock;
 
 	enum can_state state;
@@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
 void can_free_echo_skb(struct net_device *dev, unsigned int idx);
 
+#ifdef CONFIG_OF
+void of_can_transceiver(struct net_device *dev);
+#else
+static inline void of_can_transceiver(struct net_device *dev) { }
+#endif
+
 struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
 struct sk_buff *alloc_canfd_skb(struct net_device *dev,
 				struct canfd_frame **cfd);
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index f48a85c377de..b4f22112ba75 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -23,9 +23,10 @@ struct lan9303 {
 	struct regmap_irq_chip_data *irq_data;
 	struct gpio_desc *reset_gpio;
 	u32 reset_duration; /* in [ms] */
-	bool phy_addr_sel_strap;
+	int phy_addr_base;
 	struct dsa_switch *ds;
 	struct mutex indirect_mutex; /* protect indexed register access */
+	struct mutex alr_mutex; /* protect ALR access */
 	const struct lan9303_phy_ops *ops;
 	bool is_bridged; /* true if port 1 and 2 are bridged */
 
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 000000000000..280c61ecbf20
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERROR_INJECTION_H
+#define _LINUX_ERROR_INJECTION_H
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+
+#include <asm/error-injection.h>
+
+extern bool within_error_injection_list(unsigned long addr);
+extern int get_injectable_error_type(unsigned long addr);
+
+#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
+
+#include <asm-generic/error-injection.h>
+static inline bool within_error_injection_list(unsigned long addr)
+{
+	return false;
+}
+
+static inline int get_injectable_error_type(unsigned long addr)
+{
+	return EI_ETYPE_NONE;
+}
+
+#endif
+
+#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 80b5b482cb46..276932d75975 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,7 +18,9 @@
 #include <linux/capability.h>
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
+#include <linux/kallsyms.h>
 
+#include <net/xdp.h>
 #include <net/sch_generic.h>
 
 #include <uapi/linux/filter.h>
@@ -58,6 +60,9 @@ struct bpf_prog_aux;
 /* unused opcode to mark special call to bpf_tail_call() helper */
 #define BPF_TAIL_CALL	0xf0
 
+/* unused opcode to mark call to interpreter with arguments */
+#define BPF_CALL_ARGS	0xe0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
@@ -455,10 +460,14 @@ struct bpf_binary_header {
 struct bpf_prog {
 	u16			pages;		/* Number of allocated pages */
 	u16			jited:1,	/* Is our filter JIT'ed? */
+				jit_requested:1,/* archs need to JIT the prog */
 				locked:1,	/* Program image locked? */
 				gpl_compatible:1, /* Is filter GPL compatible? */
 				cb_access:1,	/* Is control block accessed? */
-				dst_needed:1;	/* Do we need dst entry? */
+				dst_needed:1,	/* Do we need dst entry? */
+				blinded:1,	/* Was blinded */
+				is_func:1,	/* program is a bpf function */
+				kprobe_override:1; /* Do we override a kprobe? */
 	enum bpf_prog_type	type;		/* Type of BPF program */
 	u32			len;		/* Number of filter blocks */
 	u32			jited_len;	/* Size of jited insns in bytes */
@@ -495,6 +504,7 @@ struct xdp_buff {
 	void *data_end;
 	void *data_meta;
 	void *data_hard_start;
+	struct xdp_rxq_info *rxq;
 };
 
 /* Compute the linear packet data range [data, data_end) which
@@ -678,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
 
+bool bpf_opcode_in_insntable(u8 code);
+
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 				  gfp_t gfp_extra_flags);
@@ -709,11 +721,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#define __bpf_call_base_args \
+	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+	 __bpf_call_base)
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_pkt_data(void *func);
 
+static inline bool bpf_dump_raw_ok(void)
+{
+	/* Reconstruction of call-sites is dependent on kallsyms,
+	 * thus make dump the same restriction.
+	 */
+	return kallsyms_show_value() == 1;
+}
+
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 				       const struct bpf_insn *patch, u32 len);
 
@@ -797,7 +820,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 	return fp->jited && bpf_jit_is_ebpf();
 }
 
-static inline bool bpf_jit_blinding_enabled(void)
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 {
 	/* These are the prerequisites, should someone ever have the
 	 * idea to call blinding outside of them, we make sure to
@@ -805,7 +828,7 @@ static inline bool bpf_jit_blinding_enabled(void)
 	 */
 	if (!bpf_jit_is_ebpf())
 		return false;
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return false;
 	if (!bpf_jit_harden)
 		return false;
@@ -982,9 +1005,20 @@ struct bpf_sock_ops_kern {
 	struct	sock *sk;
 	u32	op;
 	union {
+		u32 args[4];
 		u32 reply;
 		u32 replylong[4];
 	};
+	u32	is_fullsock;
+	u64	temp;			/* temp and everything after is not
+					 * initialized to 0 before calling
+					 * the BPF program. New fields that
+					 * should be initialized to 0 should
+					 * be inserted before temp.
+					 * temp is scratch storage used by
+					 * sock_ops_convert_ctx_access
+					 * as temporary storage of a register.
+					 */
 };
 
 #endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6c9336626592..93bd6fcd6e62 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -127,28 +127,6 @@ struct hv_ring_buffer_info {
 	u32 priv_read_index;
 };
 
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
-			     u32 *read, u32 *write)
-{
-	u32 read_loc, write_loc, dsize;
-
-	/* Capture the read/write indices before they changed */
-	read_loc = rbi->ring_buffer->read_index;
-	write_loc = rbi->ring_buffer->write_index;
-	dsize = rbi->ring_datasize;
-
-	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
-		read_loc - write_loc;
-	*read = dsize - *write;
-}
 
 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 {
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4c54611e03e9..622658dfbf0a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -13,6 +13,8 @@ struct ifla_vf_stats {
 	__u64 tx_bytes;
 	__u64 broadcast;
 	__u64 multicast;
+	__u64 rx_dropped;
+	__u64 tx_dropped;
 };
 
 struct ifla_vf_info {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index bedf54b6f943..4cb7aeeafce0 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -30,10 +30,10 @@ struct macvlan_dev {
 	enum macvlan_mode	mode;
 	u16			flags;
 	int			nest_level;
+	unsigned int		macaddr_count;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	struct netpoll		*netpoll;
 #endif
-	unsigned int		macaddr_count;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3ecef57c31e3..8e66866c11be 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -4,7 +4,7 @@
 
 #if IS_ENABLED(CONFIG_TAP)
 struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
 #else
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
@@ -70,7 +70,7 @@ struct tap_queue {
 	u16 queue_index;
 	bool enabled;
 	struct list_head next;
-	struct skb_array skb_array;
+	struct ptr_ring ring;
 };
 
 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf42d577..c5b0a75a7812 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -17,9 +17,14 @@
 
 #include <uapi/linux/if_tun.h>
 
+#define TUN_XDP_FLAG 0x1UL
+
 #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
 struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
+bool tun_is_xdp_buff(void *ptr);
+void *tun_xdp_to_ptr(void *ptr);
+void *tun_ptr_to_xdp(void *ptr);
 #else
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
+static inline bool tun_is_xdp_buff(void *ptr)
+{
+	return false;
+}
+static inline void *tun_xdp_to_ptr(void *ptr)
+{
+	return NULL;
+}
+static inline void *tun_ptr_to_xdp(void *ptr)
+{
+	return NULL;
+}
 #endif /* CONFIG_TUN */
 #endif /* __IF_TUN_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 1ac5bf95bfdd..e16fe7d44a71 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
 }
 
 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
 void devinet_init(void);
 struct in_device *inetdev_by_index(struct net *, int);
 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab16ecdc..2cfffe586885 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -12,6 +12,7 @@
 #include <uapi/linux/mdio.h>
 #include <linux/mod_devicetable.h>
 
+struct gpio_desc;
 struct mii_bus;
 
 /* Multiple levels of nesting are possible. However typically this is
@@ -39,6 +40,9 @@ struct mdio_device {
 	/* Bus address of the MDIO device (0-31) */
 	int addr;
 	int flags;
+	struct gpio_desc *reset;
+	unsigned int reset_assert_delay;
+	unsigned int reset_deassert_delay;
 };
 #define to_mdio_device(d) container_of(d, struct mdio_device, dev)
 
@@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev);
 struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
 int mdio_device_register(struct mdio_device *mdiodev);
 void mdio_device_remove(struct mdio_device *mdiodev);
+void mdio_device_reset(struct mdio_device *mdiodev, int value);
 int mdio_driver_register(struct mdio_driver *drv);
 void mdio_driver_unregister(struct mdio_driver *drv);
 int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
@@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
 	return reg;
 }
 
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..a0b48afcb422 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
 struct mlx5_flow_namespace *
 mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 			enum mlx5_flow_namespace_type type);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+				  enum mlx5_flow_namespace_type type,
+				  int vport);
 
 struct mlx5_flow_table *
 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 7ac7bd76c7af..f4e417686f62 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1026,13 +1026,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_3b8[0x3];
 	u8         log_min_stride_sz_sq[0x5];
 
-	u8         reserved_at_3c0[0x1b];
+	u8         hairpin[0x1];
+	u8         reserved_at_3c1[0x2];
+	u8         log_max_hairpin_queues[0x5];
+	u8         reserved_at_3c8[0x3];
+	u8         log_max_hairpin_wq_data_sz[0x5];
+	u8         reserved_at_3d0[0x3];
+	u8         log_max_hairpin_num_packets[0x5];
+	u8         reserved_at_3d8[0x3];
 	u8         log_max_wq_sz[0x5];
 
 	u8         nic_vport_change_event[0x1];
 	u8         disable_local_lb_uc[0x1];
 	u8         disable_local_lb_mc[0x1];
-	u8         reserved_at_3e3[0x8];
+	u8         log_min_hairpin_wq_data_sz[0x5];
+	u8         reserved_at_3e8[0x3];
 	u8         log_max_vlan_list[0x5];
 	u8         reserved_at_3f0[0x3];
 	u8         log_max_current_mc_list[0x5];
@@ -1171,7 +1179,12 @@ struct mlx5_ifc_wq_bits {
 	u8         reserved_at_118[0x3];
 	u8         log_wq_sz[0x5];
 
-	u8         reserved_at_120[0x15];
+	u8         reserved_at_120[0x3];
+	u8         log_hairpin_num_packets[0x5];
+	u8         reserved_at_128[0x3];
+	u8         log_hairpin_data_sz[0x5];
+	u8         reserved_at_130[0x5];
+
 	u8         log_wqe_num_of_strides[0x3];
 	u8         two_byte_shift_en[0x1];
 	u8         reserved_at_139[0x4];
@@ -2491,7 +2504,8 @@ struct mlx5_ifc_sqc_bits {
 	u8         state[0x4];
 	u8         reg_umr[0x1];
 	u8         allow_swp[0x1];
-	u8         reserved_at_e[0x12];
+	u8         hairpin[0x1];
+	u8         reserved_at_f[0x11];
 
 	u8         reserved_at_20[0x8];
 	u8         user_index[0x18];
@@ -2499,7 +2513,13 @@ struct mlx5_ifc_sqc_bits {
 	u8         reserved_at_40[0x8];
 	u8         cqn[0x18];
 
-	u8         reserved_at_60[0x90];
+	u8         reserved_at_60[0x8];
+	u8         hairpin_peer_rq[0x18];
+
+	u8         reserved_at_80[0x10];
+	u8         hairpin_peer_vhca[0x10];
+
+	u8         reserved_at_a0[0x50];
 
 	u8         packet_pacing_rate_limit_index[0x10];
 	u8         tis_lst_sz[0x10];
@@ -2571,7 +2591,8 @@ struct mlx5_ifc_rqc_bits {
 	u8         state[0x4];
 	u8         reserved_at_c[0x1];
 	u8         flush_in_error_en[0x1];
-	u8         reserved_at_e[0x12];
+	u8         hairpin[0x1];
+	u8         reserved_at_f[0x11];
 
 	u8         reserved_at_20[0x8];
 	u8         user_index[0x18];
@@ -2585,7 +2606,13 @@ struct mlx5_ifc_rqc_bits {
 	u8         reserved_at_80[0x8];
 	u8         rmpn[0x18];
 
-	u8         reserved_at_a0[0xe0];
+	u8         reserved_at_a0[0x8];
+	u8         hairpin_peer_sq[0x18];
+
+	u8         reserved_at_c0[0x10];
+	u8         hairpin_peer_vhca[0x10];
+
+	u8         reserved_at_e0[0xa0];
 
 	struct mlx5_ifc_wq_bits wq;
 };
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5ece25..7e8f281f8c00 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -75,4 +75,27 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
 			 int inlen);
 void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
 
+struct mlx5_hairpin_params {
+	u8  log_data_size;
+	u8  log_num_packets;
+	u16 q_counter;
+	int num_channels;
+};
+
+struct mlx5_hairpin {
+	struct mlx5_core_dev *func_mdev;
+	struct mlx5_core_dev *peer_mdev;
+
+	int num_channels;
+
+	u32 *rqn;
+	u32 *sqn;
+};
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+			 struct mlx5_core_dev *peer_mdev,
+			 struct mlx5_hairpin_params *params);
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
 #endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/module.h b/include/linux/module.h
index 1d8f245967be..70245f1a3590 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -19,6 +19,7 @@
 #include <linux/jump_label.h>
 #include <linux/export.h>
 #include <linux/rbtree_latch.h>
+#include <linux/error-injection.h>
 
 #include <linux/percpu.h>
 #include <asm/module.h>
@@ -475,6 +476,11 @@ struct module {
 	ctor_fn_t *ctors;
 	unsigned int num_ctors;
 #endif
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+	struct error_injection_entry *ei_funcs;
+	unsigned int num_ei_funcs;
+#endif
 } ____cacheline_aligned __randomize_layout;
 #ifndef MODULE_ARCH_INIT
 #define MODULE_ARCH_INIT {}
diff --git a/include/linux/net.h b/include/linux/net.h
index c2d468cb9821..91216b16feb7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -306,7 +306,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 		    size_t size, int flags);
 int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
 			   size_t size, int flags);
-int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
 
 /* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
new file mode 100644
index 000000000000..bebeaad897cc
--- /dev/null
+++ b/include/linux/net_dim.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NET_DIM_H
+#define NET_DIM_H
+
+#include <linux/module.h>
+
+struct net_dim_cq_moder {
+	u16 usec;
+	u16 pkts;
+	u8 cq_period_mode;
+};
+
+struct net_dim_sample {
+	ktime_t time;
+	u32     pkt_ctr;
+	u32     byte_ctr;
+	u16     event_ctr;
+};
+
+struct net_dim_stats {
+	int ppms; /* packets per msec */
+	int bpms; /* bytes per msec */
+	int epms; /* events per msec */
+};
+
+struct net_dim { /* Adaptive Moderation */
+	u8                                      state;
+	struct net_dim_stats                    prev_stats;
+	struct net_dim_sample                   start_sample;
+	struct work_struct                      work;
+	u8                                      profile_ix;
+	u8                                      mode;
+	u8                                      tune_state;
+	u8                                      steps_right;
+	u8                                      steps_left;
+	u8                                      tired;
+};
+
+enum {
+	NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+	NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+	NET_DIM_CQ_PERIOD_NUM_MODES
+};
+
+/* Adaptive moderation logic */
+enum {
+	NET_DIM_START_MEASURE,
+	NET_DIM_MEASURE_IN_PROGRESS,
+	NET_DIM_APPLY_NEW_PROFILE,
+};
+
+enum {
+	NET_DIM_PARKING_ON_TOP,
+	NET_DIM_PARKING_TIRED,
+	NET_DIM_GOING_RIGHT,
+	NET_DIM_GOING_LEFT,
+};
+
+enum {
+	NET_DIM_STATS_WORSE,
+	NET_DIM_STATS_SAME,
+	NET_DIM_STATS_BETTER,
+};
+
+enum {
+	NET_DIM_STEPPED,
+	NET_DIM_TOO_TIRED,
+	NET_DIM_ON_EDGE,
+};
+
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+/* Adaptive moderation profiles */
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
+#define NET_DIM_EQE_PROFILES { \
+	{1,   NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{8,   NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{64,  NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_CQE_PROFILES { \
+	{2,  256},             \
+	{8,  128},             \
+	{16, 64},              \
+	{32, 64},              \
+	{64, 64}               \
+}
+
+static const struct net_dim_cq_moder
+profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+	NET_DIM_EQE_PROFILES,
+	NET_DIM_CQE_PROFILES,
+};
+
+static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
+							  int ix)
+{
+	struct net_dim_cq_moder cq_moder;
+
+	cq_moder = profile[cq_period_mode][ix];
+	cq_moder.cq_period_mode = cq_period_mode;
+	return cq_moder;
+}
+
+static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
+{
+	int default_profile_ix;
+
+	if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
+		default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
+	else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
+		default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
+
+	return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
+}
+
+static inline bool net_dim_on_top(struct net_dim *dim)
+{
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+	case NET_DIM_PARKING_TIRED:
+		return true;
+	case NET_DIM_GOING_RIGHT:
+		return (dim->steps_left > 1) && (dim->steps_right == 1);
+	default: /* NET_DIM_GOING_LEFT */
+		return (dim->steps_right > 1) && (dim->steps_left == 1);
+	}
+}
+
+static inline void net_dim_turn(struct net_dim *dim)
+{
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+	case NET_DIM_PARKING_TIRED:
+		break;
+	case NET_DIM_GOING_RIGHT:
+		dim->tune_state = NET_DIM_GOING_LEFT;
+		dim->steps_left = 0;
+		break;
+	case NET_DIM_GOING_LEFT:
+		dim->tune_state = NET_DIM_GOING_RIGHT;
+		dim->steps_right = 0;
+		break;
+	}
+}
+
+static inline int net_dim_step(struct net_dim *dim)
+{
+	if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+		return NET_DIM_TOO_TIRED;
+
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+	case NET_DIM_PARKING_TIRED:
+		break;
+	case NET_DIM_GOING_RIGHT:
+		if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+			return NET_DIM_ON_EDGE;
+		dim->profile_ix++;
+		dim->steps_right++;
+		break;
+	case NET_DIM_GOING_LEFT:
+		if (dim->profile_ix == 0)
+			return NET_DIM_ON_EDGE;
+		dim->profile_ix--;
+		dim->steps_left++;
+		break;
+	}
+
+	dim->tired++;
+	return NET_DIM_STEPPED;
+}
+
+static inline void net_dim_park_on_top(struct net_dim *dim)
+{
+	dim->steps_right  = 0;
+	dim->steps_left   = 0;
+	dim->tired        = 0;
+	dim->tune_state   = NET_DIM_PARKING_ON_TOP;
+}
+
+static inline void net_dim_park_tired(struct net_dim *dim)
+{
+	dim->steps_right  = 0;
+	dim->steps_left   = 0;
+	dim->tune_state   = NET_DIM_PARKING_TIRED;
+}
+
+static inline void net_dim_exit_parking(struct net_dim *dim)
+{
+	dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
+					  NET_DIM_GOING_RIGHT;
+	net_dim_step(dim);
+}
+
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+	(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
+static inline int net_dim_stats_compare(struct net_dim_stats *curr,
+					struct net_dim_stats *prev)
+{
+	if (!prev->bpms)
+		return curr->bpms ? NET_DIM_STATS_BETTER :
+				    NET_DIM_STATS_SAME;
+
+	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+		return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
+						   NET_DIM_STATS_WORSE;
+
+	if (!prev->ppms)
+		return curr->ppms ? NET_DIM_STATS_BETTER :
+				    NET_DIM_STATS_SAME;
+
+	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+		return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
+						   NET_DIM_STATS_WORSE;
+
+	if (!prev->epms)
+		return NET_DIM_STATS_SAME;
+
+	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+		return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
+						   NET_DIM_STATS_WORSE;
+
+	return NET_DIM_STATS_SAME;
+}
+
+static inline bool net_dim_decision(struct net_dim_stats *curr_stats,
+				    struct net_dim *dim)
+{
+	int prev_state = dim->tune_state;
+	int prev_ix = dim->profile_ix;
+	int stats_res;
+	int step_res;
+
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+		stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+		if (stats_res != NET_DIM_STATS_SAME)
+			net_dim_exit_parking(dim);
+		break;
+
+	case NET_DIM_PARKING_TIRED:
+		dim->tired--;
+		if (!dim->tired)
+			net_dim_exit_parking(dim);
+		break;
+
+	case NET_DIM_GOING_RIGHT:
+	case NET_DIM_GOING_LEFT:
+		stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+		if (stats_res != NET_DIM_STATS_BETTER)
+			net_dim_turn(dim);
+
+		if (net_dim_on_top(dim)) {
+			net_dim_park_on_top(dim);
+			break;
+		}
+
+		step_res = net_dim_step(dim);
+		switch (step_res) {
+		case NET_DIM_ON_EDGE:
+			net_dim_park_on_top(dim);
+			break;
+		case NET_DIM_TOO_TIRED:
+			net_dim_park_tired(dim);
+			break;
+		}
+
+		break;
+	}
+
+	if ((prev_state      != NET_DIM_PARKING_ON_TOP) ||
+	    (dim->tune_state != NET_DIM_PARKING_ON_TOP))
+		dim->prev_stats = *curr_stats;
+
+	return dim->profile_ix != prev_ix;
+}
+
+static inline void net_dim_sample(u16 event_ctr,
+				  u64 packets,
+				  u64 bytes,
+				  struct net_dim_sample *s)
+{
+	s->time	     = ktime_get();
+	s->pkt_ctr   = packets;
+	s->byte_ctr  = bytes;
+	s->event_ctr = event_ctr;
+}
+
+#define NET_DIM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
+
+static inline void net_dim_calc_stats(struct net_dim_sample *start,
+				      struct net_dim_sample *end,
+				      struct net_dim_stats *curr_stats)
+{
+	/* u32 holds up to 71 minutes, should be enough */
+	u32 delta_us = ktime_us_delta(end->time, start->time);
+	u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+	u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+			     start->byte_ctr);
+
+	if (!delta_us)
+		return;
+
+	curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+	curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+	curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
+					delta_us);
+}
+
+static inline void net_dim(struct net_dim *dim,
+			   struct net_dim_sample end_sample)
+{
+	struct net_dim_stats curr_stats;
+	u16 nevents;
+
+	switch (dim->state) {
+	case NET_DIM_MEASURE_IN_PROGRESS:
+		nevents = BIT_GAP(BITS_PER_TYPE(u16),
+				  end_sample.event_ctr,
+				  dim->start_sample.event_ctr);
+		if (nevents < NET_DIM_NEVENTS)
+			break;
+		net_dim_calc_stats(&dim->start_sample, &end_sample,
+				   &curr_stats);
+		if (net_dim_decision(&curr_stats, dim)) {
+			dim->state = NET_DIM_APPLY_NEW_PROFILE;
+			schedule_work(&dim->work);
+			break;
+		}
+		/* fall through */
+	case NET_DIM_START_MEASURE:
+		dim->state = NET_DIM_MEASURE_IN_PROGRESS;
+		break;
+	case NET_DIM_APPLY_NEW_PROFILE:
+		break;
+	}
+}
+
+#endif /* NET_DIM_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index b1b0ca7ccb2b..db84c516bcfb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -78,6 +78,8 @@ enum {
 	NETIF_F_HW_ESP_TX_CSUM_BIT,	/* ESP with TX checksum offload */
 	NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
 
+	NETIF_F_GRO_HW_BIT,		/* Hardware Generic receive offload */
+
 	/*
 	 * Add your fresh new feature above and remember to update
 	 * netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -97,6 +99,7 @@ enum {
 #define NETIF_F_FRAGLIST	__NETIF_F(FRAGLIST)
 #define NETIF_F_FSO		__NETIF_F(FSO)
 #define NETIF_F_GRO		__NETIF_F(GRO)
+#define NETIF_F_GRO_HW		__NETIF_F(GRO_HW)
 #define NETIF_F_GSO		__NETIF_F(GSO)
 #define NETIF_F_GSO_ROBUST	__NETIF_F(GSO_ROBUST)
 #define NETIF_F_HIGHDMA		__NETIF_F(HIGHDMA)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ef789e1d679e..4c77f39ebd65 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -44,6 +44,7 @@
 #include <net/dcbnl.h>
 #endif
 #include <net/netprio_cgroup.h>
+#include <net/xdp.h>
 
 #include <linux/netdev_features.h>
 #include <linux/neighbour.h>
@@ -686,6 +687,7 @@ struct netdev_rx_queue {
 #endif
 	struct kobject			kobj;
 	struct net_device		*dev;
+	struct xdp_rxq_info		xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -778,6 +780,7 @@ enum tc_setup_type {
 	TC_SETUP_BLOCK,
 	TC_SETUP_QDISC_CBS,
 	TC_SETUP_QDISC_RED,
+	TC_SETUP_QDISC_PRIO,
 };
 
 /* These structures hold the attributes of bpf state that are being passed
@@ -802,9 +805,11 @@ enum bpf_netdev_command {
 	BPF_OFFLOAD_VERIFIER_PREP,
 	BPF_OFFLOAD_TRANSLATE,
 	BPF_OFFLOAD_DESTROY,
+	BPF_OFFLOAD_MAP_ALLOC,
+	BPF_OFFLOAD_MAP_FREE,
 };
 
-struct bpf_ext_analyzer_ops;
+struct bpf_prog_offload_ops;
 struct netlink_ext_ack;
 
 struct netdev_bpf {
@@ -820,16 +825,22 @@ struct netdev_bpf {
 		struct {
 			u8 prog_attached;
 			u32 prog_id;
+			/* flags with which program was installed */
+			u32 prog_flags;
 		};
 		/* BPF_OFFLOAD_VERIFIER_PREP */
 		struct {
 			struct bpf_prog *prog;
-			const struct bpf_ext_analyzer_ops *ops; /* callee set */
+			const struct bpf_prog_offload_ops *ops; /* callee set */
 		} verifier;
 		/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
 		struct {
 			struct bpf_prog *prog;
 		} offload;
+		/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
+		struct {
+			struct bpf_offloaded_map *offmap;
+		};
 	};
 };
 
@@ -840,6 +851,7 @@ struct xfrmdev_ops {
 	void	(*xdo_dev_state_free) (struct xfrm_state *x);
 	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
 				       struct xfrm_state *x);
+	void	(*xdo_dev_state_advance_esn) (struct xfrm_state *x);
 };
 #endif
 
@@ -1458,8 +1470,6 @@ enum netdev_priv_flags {
  *	@base_addr:	Device I/O address
  *	@irq:		Device IRQ number
  *
- *	@carrier_changes:	Stats to monitor carrier on<->off transitions
- *
  *	@state:		Generic network queuing layer state, see netdev_state_t
  *	@dev_list:	The global list of network devices
  *	@napi_list:	List entry used for polling NAPI devices
@@ -1495,6 +1505,8 @@ enum netdev_priv_flags {
  *			do not use this in drivers
  *	@rx_nohandler:	nohandler dropped packets by core network on
  *			inactive devices, do not use this in drivers
+ *	@carrier_up_count:	Number of times the carrier has been up
+ *	@carrier_down_count:	Number of times the carrier has been down
  *
  *	@wireless_handlers:	List of functions to handle Wireless Extensions,
  *				instead of ioctl,
@@ -1669,8 +1681,6 @@ struct net_device {
 	unsigned long		base_addr;
 	int			irq;
 
-	atomic_t		carrier_changes;
-
 	/*
 	 *	Some hardware also needs these fields (state,dev_list,
 	 *	napi_list,unreg_list,close_list) but they are not
@@ -1708,6 +1718,10 @@ struct net_device {
 	atomic_long_t		tx_dropped;
 	atomic_long_t		rx_nohandler;
 
+	/* Stats to monitor link on/off, flapping */
+	atomic_t		carrier_up_count;
+	atomic_t		carrier_down_count;
+
 #ifdef CONFIG_WIRELESS_EXT
 	const struct iw_handler_def *wireless_handlers;
 	struct iw_public_data	*wireless_data;
@@ -1724,7 +1738,7 @@ struct net_device {
 	const struct ndisc_ops *ndisc_ops;
 #endif
 
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
 	const struct xfrmdev_ops *xfrmdev_ops;
 #endif
 
@@ -1801,12 +1815,9 @@ struct net_device {
 	/* Interface address info used in eth_type_trans() */
 	unsigned char		*dev_addr;
 
-#ifdef CONFIG_SYSFS
 	struct netdev_rx_queue	*_rx;
-
 	unsigned int		num_rx_queues;
 	unsigned int		real_num_rx_queues;
-#endif
 
 	struct bpf_prog __rcu	*xdp_prog;
 	unsigned long		gro_flush_timeout;
@@ -2751,7 +2762,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
 	return false;
 }
 
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
+			   int len, int size);
 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
 {
@@ -2791,7 +2803,9 @@ struct softnet_data {
 	struct Qdisc		*output_queue;
 	struct Qdisc		**output_queue_tailp;
 	struct sk_buff		*completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+	struct sk_buff_head	xfrm_backlog;
+#endif
 #ifdef CONFIG_RPS
 	/* input_queue_head should be written by cpu owning this struct,
 	 * and only read by other cpus. Worth using a cache line.
@@ -3302,7 +3316,9 @@ int netdev_rx_handler_register(struct net_device *dev,
 void netdev_rx_handler_unregister(struct net_device *dev);
 
 bool dev_valid_name(const char *name);
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
+		bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf *, int);
 int dev_ethtool(struct net *net, struct ifreq *);
 unsigned int dev_get_flags(const struct net_device *);
 int __dev_change_flags(struct net_device *, unsigned int flags);
@@ -3315,6 +3331,7 @@ int dev_get_alias(const struct net_device *, char *, size_t);
 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 int __dev_set_mtu(struct net_device *, int);
 int dev_set_mtu(struct net_device *, int);
+int dev_change_tx_queue_len(struct net_device *, unsigned long);
 void dev_set_group(struct net_device *, int);
 int dev_set_mac_address(struct net_device *, struct sockaddr *);
 int dev_change_carrier(struct net_device *, bool new_carrier);
@@ -3323,14 +3340,15 @@ int dev_get_phys_port_id(struct net_device *dev,
 int dev_get_phys_port_name(struct net_device *dev,
 			   char *name, size_t len);
 int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 				    struct netdev_queue *txq, int *ret);
 
 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
 		      int fd, u32 flags);
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id);
+void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+		     struct netdev_bpf *xdp);
 
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4399,11 +4417,11 @@ do {								\
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)			\
-	WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
+	WARN(1, "netdevice: %s%s: " format, netdev_name(dev),	\
 	     netdev_reg_state(dev), ##args)
 
-#define netdev_WARN_ONCE(dev, condition, format, arg...)		\
-	WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev)	\
+#define netdev_WARN_ONCE(dev, format, args...)				\
+	WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev),	\
 		  netdev_reg_state(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b24e9b101651..85a1a0b32c66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -67,6 +67,7 @@ struct nf_hook_ops {
 	struct net_device	*dev;
 	void			*priv;
 	u_int8_t		pf;
+	bool			nat_hook;
 	unsigned int		hooknum;
 	/* Hooks are ordered in ascending priority. */
 	int			priority;
@@ -77,17 +78,28 @@ struct nf_hook_entry {
 	void				*priv;
 };
 
+struct nf_hook_entries_rcu_head {
+	struct rcu_head head;
+	void	*allocation;
+};
+
 struct nf_hook_entries {
 	u16				num_hook_entries;
 	/* padding */
 	struct nf_hook_entry		hooks[];
 
-	/* trailer: pointers to original orig_ops of each hook.
-	 *
-	 * This is not part of struct nf_hook_entry since its only
-	 * needed in slow path (hook register/unregister).
+	/* trailer: pointers to original orig_ops of each hook,
+	 * followed by rcu_head and scratch space used for freeing
+	 * the structure via call_rcu.
 	 *
+	 *   This is not part of struct nf_hook_entry since its only
+	 *   needed in slow path (hook register/unregister):
 	 * const struct nf_hook_ops     *orig_ops[]
+	 *
+	 *   For the same reason, we store this at end -- its
+	 *   only needed when a hook is deleted, not during
+	 *   packet path processing:
+	 * struct nf_hook_entries_rcu_head     head
 	 */
 };
 
@@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 			  struct net_device *indev, struct net_device *outdev,
 			  int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 {
-	struct nf_hook_entries *hook_head;
+	struct nf_hook_entries *hook_head = NULL;
 	int ret = 1;
 
 #ifdef HAVE_JUMP_LABEL
@@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 #endif
 
 	rcu_read_lock();
-	hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+	switch (pf) {
+	case NFPROTO_IPV4:
+		hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+		break;
+	case NFPROTO_IPV6:
+		hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+		break;
+	case NFPROTO_ARP:
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+		hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
+#endif
+		break;
+	case NFPROTO_BRIDGE:
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+		hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
+#endif
+		break;
+#if IS_ENABLED(CONFIG_DECNET)
+	case NFPROTO_DECNET:
+		hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
+		break;
+#endif
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
 	if (hook_head) {
 		struct nf_hook_state state;
 
@@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 struct flowi;
 struct nf_queue_entry;
 
-struct nf_afinfo {
-	unsigned short	family;
-	__sum16		(*checksum)(struct sk_buff *skb, unsigned int hook,
-				    unsigned int dataoff, u_int8_t protocol);
-	__sum16		(*checksum_partial)(struct sk_buff *skb,
-					    unsigned int hook,
-					    unsigned int dataoff,
-					    unsigned int len,
-					    u_int8_t protocol);
-	int		(*route)(struct net *net, struct dst_entry **dst,
-				 struct flowi *fl, bool strict);
-	void		(*saveroute)(const struct sk_buff *skb,
-				     struct nf_queue_entry *entry);
-	int		(*reroute)(struct net *net, struct sk_buff *skb,
-				   const struct nf_queue_entry *entry);
-	int		route_key_size;
-};
-
-extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
-static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
-{
-	return rcu_dereference(nf_afinfo[family]);
-}
-
-static inline __sum16
-nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
-	    u_int8_t protocol, unsigned short family)
-{
-	const struct nf_afinfo *afinfo;
-	__sum16 csum = 0;
-
-	rcu_read_lock();
-	afinfo = nf_get_afinfo(family);
-	if (afinfo)
-		csum = afinfo->checksum(skb, hook, dataoff, protocol);
-	rcu_read_unlock();
-	return csum;
-}
-
-static inline __sum16
-nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
-		    unsigned int dataoff, unsigned int len,
-		    u_int8_t protocol, unsigned short family)
-{
-	const struct nf_afinfo *afinfo;
-	__sum16 csum = 0;
-
-	rcu_read_lock();
-	afinfo = nf_get_afinfo(family);
-	if (afinfo)
-		csum = afinfo->checksum_partial(skb, hook, dataoff, len,
-						protocol);
-	rcu_read_unlock();
-	return csum;
-}
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+		    unsigned int dataoff, u_int8_t protocol,
+		    unsigned short family);
 
-int nf_register_afinfo(const struct nf_afinfo *afinfo);
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			    unsigned int dataoff, unsigned int len,
+			    u_int8_t protocol, unsigned short family);
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+	     bool strict, unsigned short family);
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
 
 #include <net/flow.h>
 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 8e42253e5d4d..34fc80f3eb90 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -122,6 +122,8 @@ struct ip_set_ext {
 	u64 bytes;
 	char *comment;
 	u32 timeout;
+	u8 packets_op;
+	u8 bytes_op;
 };
 
 struct ip_set;
@@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
 				 struct ip_set_ext *ext);
 extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
 				 const void *e, bool active);
+extern bool ip_set_match_extensions(struct ip_set *set,
+				    const struct ip_set_ext *ext,
+				    struct ip_set_ext *mext,
+				    u32 flags, void *data);
 
 static inline int
 ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
index bb6fba480118..3d33a2c3f39f 100644
--- a/include/linux/netfilter/ipset/ip_set_counter.h
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter)
 	return (u64)atomic64_read(&(counter)->packets);
 }
 
+static inline bool
+ip_set_match_counter(u64 counter, u64 match, u8 op)
+{
+	switch (op) {
+	case IPSET_COUNTER_NONE:
+		return true;
+	case IPSET_COUNTER_EQ:
+		return counter == match;
+	case IPSET_COUNTER_NE:
+		return counter != match;
+	case IPSET_COUNTER_LT:
+		return counter < match;
+	case IPSET_COUNTER_GT:
+		return counter > match;
+	}
+	return false;
+}
+
 static inline void
 ip_set_update_counter(struct ip_set_counter *counter,
-		      const struct ip_set_ext *ext,
-		      struct ip_set_ext *mext, u32 flags)
+		      const struct ip_set_ext *ext, u32 flags)
 {
 	if (ext->packets != ULLONG_MAX &&
 	    !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
 		ip_set_add_bytes(ext->bytes, counter);
 		ip_set_add_packets(ext->packets, counter);
 	}
-	if (flags & IPSET_FLAG_MATCH_COUNTERS) {
-		mext->packets = ip_set_get_packets(counter);
-		mext->bytes = ip_set_get_bytes(counter);
-	}
 }
 
 static inline bool
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 33f7530f96b9..1313b35c3ab7 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 
 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 				    const char *name);
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+					    const char *name);
 void xt_table_unlock(struct xt_table *t);
 
 int xt_proto_init(struct net *net, u_int8_t af);
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index dc6111adea06..8dddfb151f00 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -4,7 +4,17 @@
 
 #include <uapi/linux/netfilter.h>
 
+/* in/out/forward only */
+#define NF_ARP_NUMHOOKS 3
+
+/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
+#define NF_DN_NUMHOOKS 7
+
+#if IS_ENABLED(CONFIG_DECNET)
 /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS 8
+#define NF_MAX_HOOKS	NF_DN_NUMHOOKS
+#else
+#define NF_MAX_HOOKS	NF_INET_NUMHOOKS
+#endif
 
 #endif
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 98c03b2462b5..b31dabfdb453 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,53 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip_rt_info {
+	__be32 daddr;
+	__be32 saddr;
+	u_int8_t tos;
+	u_int32_t mark;
+};
+
 int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+
+struct nf_queue_entry;
+
+#ifdef CONFIG_INET
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 		       unsigned int dataoff, u_int8_t protocol);
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			       unsigned int dataoff, unsigned int len,
+			       u_int8_t protocol);
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+		bool strict);
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
+#else
+static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+				     unsigned int dataoff, u_int8_t protocol)
+{
+	return 0;
+}
+static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
+					     unsigned int hook,
+					     unsigned int dataoff,
+					     unsigned int len,
+					     u_int8_t protocol)
+{
+	return 0;
+}
+static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
+			      struct flowi *fl, bool strict)
+{
+	return -EOPNOTSUPP;
+}
+static inline int nf_ip_reroute(struct sk_buff *skb,
+				const struct nf_queue_entry *entry)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* CONFIG_INET */
+
 #endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 47c6b04c28c0..288c597e75b3 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,6 +9,17 @@
 
 #include <uapi/linux/netfilter_ipv6.h>
 
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip6_rt_info {
+	struct in6_addr daddr;
+	struct in6_addr saddr;
+	u_int32_t mark;
+};
+
+struct nf_queue_entry;
+
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
  * if IPv6 is a module.
@@ -19,6 +30,14 @@ struct nf_ipv6_ops {
 	void (*route_input)(struct sk_buff *skb);
 	int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
 			int (*output)(struct net *, struct sock *, struct sk_buff *));
+	__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
+			    unsigned int dataoff, u_int8_t protocol);
+	__sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
+				    unsigned int dataoff, unsigned int len,
+				    u_int8_t protocol);
+	int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
+		     bool strict);
+	int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
 };
 
 #ifdef CONFIG_NETFILTER
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c170c9250c8b..0314e0716c30 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1072,6 +1072,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
 int pci_set_cacheline_size(struct pci_dev *dev);
 #define HAVE_PCI_SET_MWI
 int __must_check pci_set_mwi(struct pci_dev *dev);
+int __must_check pcim_set_mwi(struct pci_dev *dev);
 int pci_try_set_mwi(struct pci_dev *dev);
 void pci_clear_mwi(struct pci_dev *dev);
 void pci_intx(struct pci_dev *dev, int enable);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index dc82a07cb4fd..5a0c3e53e7c2 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -59,6 +59,7 @@
 
 #define PHY_HAS_INTERRUPT	0x00000001
 #define PHY_IS_INTERNAL		0x00000002
+#define PHY_RST_AFTER_CLK_EN	0x00000004
 #define MDIO_DEVICE_IS_PHY	0x80000000
 
 /* Interface Mode definitions */
@@ -468,7 +469,6 @@ struct phy_device {
 	/* Interrupt and Polling infrastructure */
 	struct work_struct phy_queue;
 	struct delayed_work state_queue;
-	atomic_t irq_disable;
 
 	struct mutex lock;
 
@@ -497,19 +497,19 @@ struct phy_device {
  * flags: A bitfield defining certain other features this PHY
  *   supports (like interrupts)
  *
- * The drivers must implement config_aneg and read_status.  All
- * other functions are optional. Note that none of these
- * functions should be called from interrupt time.  The goal is
- * for the bus read/write functions to be able to block when the
- * bus transaction is happening, and be freed up by an interrupt
- * (The MPC85xx has this ability, though it is not currently
- * supported in the driver).
+ * All functions are optional. If config_aneg or read_status
+ * are not implemented, the phy core uses the genphy versions.
+ * Note that none of these functions should be called from
+ * interrupt time. The goal is for the bus read/write functions
+ * to be able to block when the bus transaction is happening,
+ * and be freed up by an interrupt (The MPC85xx has this ability,
+ * though it is not currently supported in the driver).
  */
 struct phy_driver {
 	struct mdio_driver_common mdiodrv;
 	u32 phy_id;
 	char *name;
-	unsigned int phy_id_mask;
+	u32 phy_id_mask;
 	u32 features;
 	u32 flags;
 	const void *driver_data;
@@ -634,6 +634,9 @@ struct phy_driver {
 	int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
 			 u16 val);
 
+	int (*read_page)(struct phy_device *dev);
+	int (*write_page)(struct phy_device *dev, int page);
+
 	/* Get the size and type of the eeprom contained within a plug-in
 	 * module */
 	int (*module_info)(struct phy_device *dev,
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
 size_t phy_speeds(unsigned int *speeds, size_t size,
 		  unsigned long *mask, size_t maxbit);
 
+void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+
 /**
  * phy_read_mmd - Convenience function for reading a register
  * from an MMD on a given PHY.
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
 }
 
 /**
+ * __phy_read - convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_read(struct phy_device *phydev, u32 regnum)
+{
+	return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
+}
+
+/**
  * phy_write - Convenience function for writing a given PHY register
  * @phydev: the phy_device struct
  * @regnum: register number to write
@@ -731,6 +748,72 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
 }
 
 /**
+ * __phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
+			       val);
+}
+
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+
+/**
+ * __phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return __phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum,
+				   u16 val)
+{
+	return __phy_modify(phydev, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ */
+static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return phy_modify(phydev, regnum, val, 0);
+}
+
+/**
  * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
  * @phydev: the phy_device struct
  *
@@ -763,6 +846,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
 };
 
 /**
+ * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ *   negotiation
+ * @mode: one of &enum phy_interface_t
+ *
+ * Returns true if the phy interface mode uses the 16-bit negotiation
+ * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
+ */
+static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
+{
+	return mode == PHY_INTERFACE_MODE_1000BASEX ||
+	       mode == PHY_INTERFACE_MODE_2500BASEX;
+}
+
+/**
  * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
  * is RGMII (all variants)
  * @phydev: the phy_device struct
@@ -794,6 +891,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
  */
 int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
 
+int phy_save_page(struct phy_device *phydev);
+int phy_select_page(struct phy_device *phydev, int page);
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+		     u16 mask, u16 set);
+
 struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
 				     bool is_c45,
 				     struct phy_c45_device_ids *c45_ids);
@@ -840,13 +945,11 @@ int phy_aneg_done(struct phy_device *phydev);
 
 int phy_stop_interrupts(struct phy_device *phydev);
 int phy_restart_aneg(struct phy_device *phydev);
+int phy_reset_after_clk_enable(struct phy_device *phydev);
 
-static inline int phy_read_status(struct phy_device *phydev)
+static inline void phy_device_reset(struct phy_device *phydev, int value)
 {
-	if (!phydev->drv)
-		return -EIO;
-
-	return phydev->drv->read_status(phydev);
+	mdio_device_reset(&phydev->mdio, value);
 }
 
 #define phydev_err(_phydev, format, args...)	\
@@ -889,6 +992,18 @@ int genphy_c45_read_lpa(struct phy_device *phydev);
 int genphy_c45_read_pma(struct phy_device *phydev);
 int genphy_c45_pma_setup_forced(struct phy_device *phydev);
 int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+int genphy_c45_read_mdix(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev)
+{
+	if (!phydev->drv)
+		return -EIO;
+
+	if (phydev->drv->read_status)
+		return phydev->drv->read_status(phydev);
+	else
+		return genphy_read_status(phydev);
+}
 
 void phy_driver_unregister(struct phy_driver *drv);
 void phy_drivers_unregister(struct phy_driver *drv, int n);
@@ -898,7 +1013,7 @@ int phy_drivers_register(struct phy_driver *new_driver, int n,
 void phy_state_machine(struct work_struct *work);
 void phy_change(struct phy_device *phydev);
 void phy_change_work(struct work_struct *work);
-void phy_mac_interrupt(struct phy_device *phydev, int new_link);
+void phy_mac_interrupt(struct phy_device *phydev);
 void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
 void phy_trigger_machine(struct phy_device *phydev, bool sync);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index cf6392de6eb0..ee54453a40a0 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
 			int (*link_update)(struct net_device *,
 					   struct fixed_phy_status *));
-extern int fixed_phy_update_state(struct phy_device *phydev,
-			   const struct fixed_phy_status *status,
-			   const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
 				struct fixed_phy_status *status,
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
 {
 	return -ENODEV;
 }
-static inline int fixed_phy_update_state(struct phy_device *phydev,
-			   const struct fixed_phy_status *status,
-			   const struct fixed_phy_status *changed)
-{
-	return -ENODEV;
-}
 #endif /* CONFIG_FIXED_PHY */
 
 #endif /* __PHY_FIXED_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index af67edd4ae38..bd137c273d38 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -7,6 +7,7 @@
 
 struct device_node;
 struct ethtool_cmd;
+struct fwnode_handle;
 struct net_device;
 
 enum {
@@ -20,19 +21,31 @@ enum {
 
 	MLO_AN_PHY = 0,	/* Conventional PHY */
 	MLO_AN_FIXED,	/* Fixed-link mode */
-	MLO_AN_SGMII,	/* Cisco SGMII protocol */
-	MLO_AN_8023Z,	/* 1000base-X protocol */
+	MLO_AN_INBAND,	/* In-band protocol */
 };
 
 static inline bool phylink_autoneg_inband(unsigned int mode)
 {
-	return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z;
+	return mode == MLO_AN_INBAND;
 }
 
+/**
+ * struct phylink_link_state - link state structure
+ * @advertising: ethtool bitmask containing advertised link modes
+ * @lp_advertising: ethtool bitmask containing link partner advertised link
+ *   modes
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed, one of the SPEED_* constants.
+ * @duplex: link duplex mode, one of DUPLEX_* constants.
+ * @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @link: true if the link is up.
+ * @an_enabled: true if autonegotiation is enabled/desired.
+ * @an_complete: true if autonegotiation has completed.
+ */
 struct phylink_link_state {
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
-	phy_interface_t interface;	/* PHY_INTERFACE_xxx */
+	phy_interface_t interface;
 	int speed;
 	int duplex;
 	int pause;
@@ -41,72 +54,145 @@ struct phylink_link_state {
 	unsigned int an_complete:1;
 };
 
+/**
+ * struct phylink_mac_ops - MAC operations structure.
+ * @validate: Validate and update the link configuration.
+ * @mac_link_state: Read the current link state from the hardware.
+ * @mac_config: configure the MAC for the selected mode and state.
+ * @mac_an_restart: restart 802.3z BaseX autonegotiation.
+ * @mac_link_down: take the link down.
+ * @mac_link_up: allow the link to come up.
+ *
+ * The individual methods are described more fully below.
+ */
 struct phylink_mac_ops {
-	/**
-	 * validate: validate and update the link configuration
-	 * @ndev: net_device structure associated with MAC
-	 * @config: configuration to validate
-	 *
-	 * Update the %config->supported and %config->advertised masks
-	 * clearing bits that can not be supported.
-	 *
-	 * Note: the PHY may be able to transform from one connection
-	 * technology to another, so, eg, don't clear 1000BaseX just
-	 * because the MAC is unable to support it.  This is more about
-	 * clearing unsupported speeds and duplex settings.
-	 *
-	 * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
-	 * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
-	 * based on %config->advertised and/or %config->speed.
-	 */
 	void (*validate)(struct net_device *ndev, unsigned long *supported,
 			 struct phylink_link_state *state);
-
-	/* Read the current link state from the hardware */
-	int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
-
-	/* Configure the MAC */
-	/**
-	 * mac_config: configure the MAC for the selected mode and state
-	 * @ndev: net_device structure for the MAC
-	 * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
-	 * @state: state structure
-	 *
-	 * The action performed depends on the currently selected mode:
-	 *
-	 * %MLO_AN_FIXED, %MLO_AN_PHY:
-	 *   set the specified speed, duplex, pause mode, and phy interface
-	 *   mode in the provided @state.
-	 * %MLO_AN_8023Z:
-	 *   place the link in 1000base-X mode, advertising the parameters
-	 *   given in advertising in @state.
-	 * %MLO_AN_SGMII:
-	 *   place the link in Cisco SGMII mode - there is no advertisment
-	 *   to make as the PHY communicates the speed and duplex to the
-	 *   MAC over the in-band control word.  Configuration of the pause
-	 *   mode is as per MLO_AN_PHY since this is not included.
-	 */
+	int (*mac_link_state)(struct net_device *ndev,
+			      struct phylink_link_state *state);
 	void (*mac_config)(struct net_device *ndev, unsigned int mode,
 			   const struct phylink_link_state *state);
-
-	/**
-	 * mac_an_restart: restart 802.3z BaseX autonegotiation
-	 * @ndev: net_device structure for the MAC
-	 */
 	void (*mac_an_restart)(struct net_device *ndev);
-
-	void (*mac_link_down)(struct net_device *, unsigned int mode);
-	void (*mac_link_up)(struct net_device *, unsigned int mode,
-			    struct phy_device *);
+	void (*mac_link_down)(struct net_device *ndev, unsigned int mode);
+	void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
+			    struct phy_device *phy);
 };
 
-struct phylink *phylink_create(struct net_device *, struct device_node *,
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * validate - Validate and update the link configuration
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Clear bits in the @supported and @state->advertising masks that
+ * are not supportable by the MAC.
+ *
+ * Note that the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to BaseX mode. This is more about
+ * clearing unsupported speeds and duplex settings.
+ *
+ * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
+ * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
+ * based on @state->advertising and/or @state->speed and update
+ * @state->interface accordingly.
+ */
+void validate(struct net_device *ndev, unsigned long *supported,
+	      struct phylink_link_state *state);
+
+/**
+ * mac_link_state() - Read the current link state from the hardware
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current link state from the MAC, reporting the current
+ * speed in @state->speed, duplex mode in @state->duplex, pause mode
+ * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link
+ * up state in @state->link.
+ */
+int mac_link_state(struct net_device *ndev,
+		   struct phylink_link_state *state);
+
+/**
+ * mac_config() - configure the MAC for the selected mode and state
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ *   Configure the specified @state->speed, @state->duplex and
+ *   @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode.
+ *
+ * %MLO_AN_INBAND:
+ *   place the link in an inband negotiation mode (such as 802.3z
+ *   1000base-X or Cisco SGMII mode depending on the @state->interface
+ *   mode). In both cases, link state management (whether the link
+ *   is up or not) is performed by the MAC, and reported via the
+ *   mac_link_state() callback. Changes in link state must be made
+ *   by calling phylink_mac_change().
+ *
+ *   If in 802.3z mode, the link speed is fixed, dependent on the
+ *   @state->interface. Duplex is negotiated, and pause is advertised
+ *   according to @state->an_enabled, @state->pause and
+ *   @state->advertising flags. Beware of MACs which only support full
+ *   duplex at gigabit and higher speeds.
+ *
+ *   If in Cisco SGMII mode, the link speed and duplex mode are passed
+ *   in the serial bitstream 16-bit configuration word, and the MAC
+ *   should be configured to read these bits and acknowledge the
+ *   configuration word. Nothing is advertised by the MAC. The MAC is
+ *   responsible for reading the configuration word and configuring
+ *   itself accordingly.
+ */
+void mac_config(struct net_device *ndev, unsigned int mode,
+		const struct phylink_link_state *state);
+
+/**
+ * mac_an_restart() - restart 802.3z BaseX autonegotiation
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ */
+void mac_an_restart(struct net_device *ndev);
+
+/**
+ * mac_link_down() - take the link down
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), force the link down and disable any
+ * Energy Efficient Ethernet MAC configuration.
+ */
+void mac_link_down(struct net_device *ndev, unsigned int mode);
+
+/**
+ * mac_link_up() - allow the link to come up
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ * @phy: any attached phy
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), allow the link to come up. If @phy
+ * is non-%NULL, configure Energy Efficient Ethernet by calling
+ * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ */
+void mac_link_up(struct net_device *ndev, unsigned int mode,
+		 struct phy_device *phy);
+#endif
+
+struct phylink *phylink_create(struct net_device *, struct fwnode_handle *,
 	phy_interface_t iface, const struct phylink_mac_ops *ops);
 void phylink_destroy(struct phylink *);
 
 int phylink_connect_phy(struct phylink *, struct phy_device *);
-int phylink_of_phy_connect(struct phylink *, struct device_node *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
 void phylink_disconnect_phy(struct phylink *);
+int phylink_fixed_state_cb(struct phylink *,
+			   void (*cb)(struct net_device *dev,
+				      struct phylink_link_state *));
 
 void phylink_mac_change(struct phylink *, bool up);
 
@@ -128,7 +214,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
 int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
 int phylink_ethtool_get_module_eeprom(struct phylink *,
 				      struct ethtool_eeprom *, u8 *);
-int phylink_init_eee(struct phylink *, bool);
 int phylink_get_eee_err(struct phylink *);
 int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
 int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 2ff18c9840a7..d31cb6215905 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd);
 #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
 extern void *ns_get_path(struct path *path, struct task_struct *task,
 			const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+			    void *private_data);
 
 extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
 			const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/property.h b/include/linux/property.h
index 6653ed4b99f9..1106bc62dd8c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent(
 	struct fwnode_handle *fwnode);
 struct fwnode_handle *fwnode_get_next_child_node(
 	const struct fwnode_handle *fwnode, struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_next_available_child_node(
+	const struct fwnode_handle *fwnode, struct fwnode_handle *child);
 
 #define fwnode_for_each_child_node(fwnode, child)			\
 	for (child = fwnode_get_next_child_node(fwnode, NULL); child;	\
 	     child = fwnode_get_next_child_node(fwnode, child))
 
+#define fwnode_for_each_available_child_node(fwnode, child)		       \
+	for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
+	     child = fwnode_get_next_available_child_node(fwnode, child))
+
 struct fwnode_handle *device_get_next_child_node(
 	struct device *dev, struct fwnode_handle *child);
 
@@ -103,6 +109,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev,
 struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
 void fwnode_handle_put(struct fwnode_handle *fwnode);
 
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+
 unsigned int device_get_child_node_count(struct device *dev);
 
 static inline bool device_property_read_bool(struct device *dev,
@@ -281,6 +289,9 @@ int device_get_phy_mode(struct device *dev);
 
 void *device_get_mac_address(struct device *dev, char *addr, int alen);
 
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
+			     char *addr, int alen);
 struct fwnode_handle *fwnode_graph_get_next_endpoint(
 	const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
 struct fwnode_handle *
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index d72b2e7dd500..1883d6137e9b 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -45,9 +45,10 @@ struct ptr_ring {
 };
 
 /* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax().  If ring is ever resized, callers must hold
- * producer_lock - see e.g. ptr_ring_full.  Otherwise, if callers don't hold
- * producer_lock, the next call to __ptr_ring_produce may fail.
+ * for example cpu_relax().
+ *
+ * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
+ * see e.g. ptr_ring_full.
  */
 static inline bool __ptr_ring_full(struct ptr_ring *r)
 {
@@ -113,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
 	/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
 	smp_wmb();
 
-	r->queue[r->producer++] = ptr;
+	WRITE_ONCE(r->queue[r->producer++], ptr);
 	if (unlikely(r->producer >= r->size))
 		r->producer = 0;
 	return 0;
@@ -169,32 +170,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
 	return ret;
 }
 
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
- * If ring is never resized, and if the pointer is merely
- * tested, there's no need to take the lock - see e.g.  __ptr_ring_empty.
- * However, if called outside the lock, and if some other CPU
- * consumes ring entries at the same time, the value returned
- * is not guaranteed to be correct.
- * In this case - to avoid incorrectly detecting the ring
- * as empty - the CPU consuming the ring entries is responsible
- * for either consuming all ring entries until the ring is empty,
- * or synchronizing with some other CPU and causing it to
- * execute __ptr_ring_peek and/or consume the ring enteries
- * after the synchronization point.
- */
 static inline void *__ptr_ring_peek(struct ptr_ring *r)
 {
 	if (likely(r->size))
-		return r->queue[r->consumer_head];
+		return READ_ONCE(r->queue[r->consumer_head]);
 	return NULL;
 }
 
-/* See __ptr_ring_peek above for locking rules. */
+/*
+ * Test ring empty status without taking any locks.
+ *
+ * NB: This is only safe to call if ring is never resized.
+ *
+ * However, if some other CPU consumes ring entries at the same time, the value
+ * returned is not guaranteed to be correct.
+ *
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * re-test __ptr_ring_empty and/or consume the ring enteries
+ * after the synchronization point.
+ *
+ * Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax().
+ */
 static inline bool __ptr_ring_empty(struct ptr_ring *r)
 {
-	return !__ptr_ring_peek(r);
+	if (likely(r->size))
+		return !r->queue[READ_ONCE(r->consumer_head)];
+	return true;
 }
 
 static inline bool ptr_ring_empty(struct ptr_ring *r)
@@ -248,22 +253,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
 	/* Fundamentally, what we want to do is update consumer
 	 * index and zero out the entry so producer can reuse it.
 	 * Doing it naively at each consume would be as simple as:
-	 *       r->queue[r->consumer++] = NULL;
-	 *       if (unlikely(r->consumer >= r->size))
-	 *               r->consumer = 0;
+	 *       consumer = r->consumer;
+	 *       r->queue[consumer++] = NULL;
+	 *       if (unlikely(consumer >= r->size))
+	 *               consumer = 0;
+	 *       r->consumer = consumer;
 	 * but that is suboptimal when the ring is full as producer is writing
 	 * out new entries in the same cache line.  Defer these updates until a
 	 * batch of entries has been consumed.
 	 */
-	int head = r->consumer_head++;
+	/* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
+	 * to work correctly.
+	 */
+	int consumer_head = r->consumer_head;
+	int head = consumer_head++;
 
 	/* Once we have processed enough entries invalidate them in
 	 * the ring all at once so producer can reuse their space in the ring.
 	 * We also do this when we reach end of the ring - not mandatory
 	 * but helps keep the implementation simple.
 	 */
-	if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
-		     r->consumer_head >= r->size)) {
+	if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
+		     consumer_head >= r->size)) {
 		/* Zero out entries in the reverse order: this way we touch the
 		 * cache line that producer might currently be reading the last;
 		 * producer won't make progress and touch other cache lines
@@ -271,12 +282,14 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
 		 */
 		while (likely(head >= r->consumer_tail))
 			r->queue[head--] = NULL;
-		r->consumer_tail = r->consumer_head;
+		r->consumer_tail = consumer_head;
 	}
-	if (unlikely(r->consumer_head >= r->size)) {
-		r->consumer_head = 0;
+	if (unlikely(consumer_head >= r->size)) {
+		consumer_head = 0;
 		r->consumer_tail = 0;
 	}
+	/* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+	WRITE_ONCE(r->consumer_head, consumer_head);
 }
 
 static inline void *__ptr_ring_consume(struct ptr_ring *r)
@@ -527,7 +540,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
 			goto done;
 		}
 		r->queue[head] = batch[--n];
-		r->consumer_tail = r->consumer_head = head;
+		r->consumer_tail = head;
+		/* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+		WRITE_ONCE(r->consumer_head, head);
 	}
 
 done:
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 39e2a2ac2471..2b3b350e07b7 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -32,14 +32,15 @@
 
 #ifndef _COMMON_HSI_H
 #define _COMMON_HSI_H
+
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
 
 /* dma_addr_t manip */
-#define PTR_LO(x)               ((u32)(((uintptr_t)(x)) & 0xffffffff))
-#define PTR_HI(x)               ((u32)((((uintptr_t)(x)) >> 16) >> 16))
+#define PTR_LO(x)		((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x)		((u32)((((uintptr_t)(x)) >> 16) >> 16))
 #define DMA_LO_LE(x)		cpu_to_le32(lower_32_bits(x))
 #define DMA_HI_LE(x)		cpu_to_le32(upper_32_bits(x))
 #define DMA_REGPAIR_LE(x, val)	do { \
@@ -47,39 +48,45 @@
 					(x).lo = DMA_LO_LE((val)); \
 				} while (0)
 
-#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+#define HILO_GEN(hi, lo, type)		((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) \
+	HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
+#define HILO_64_REGPAIR(regpair) ({ \
+	typeof(regpair) __regpair = (regpair); \
+	HILO_64(__regpair.hi, __regpair.lo); })
 #define HILO_DMA_REGPAIR(regpair)	((dma_addr_t)HILO_64_REGPAIR(regpair))
 
 #ifndef __COMMON_HSI__
 #define __COMMON_HSI__
 
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
 
-#define X_FINAL_CLEANUP_AGG_INT 1
+#define X_FINAL_CLEANUP_AGG_INT			1
 
-#define EVENT_RING_PAGE_SIZE_BYTES          4096
+#define EVENT_RING_PAGE_SIZE_BYTES		4096
 
-#define NUM_OF_GLOBAL_QUEUES                            128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE        64
+#define NUM_OF_GLOBAL_QUEUES			128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE	64
 
-#define ISCSI_CDU_TASK_SEG_TYPE       0
-#define FCOE_CDU_TASK_SEG_TYPE        0
-#define RDMA_CDU_TASK_SEG_TYPE        1
+#define ISCSI_CDU_TASK_SEG_TYPE			0
+#define FCOE_CDU_TASK_SEG_TYPE			0
+#define RDMA_CDU_TASK_SEG_TYPE			1
 
-#define FW_ASSERT_GENERAL_ATTN_IDX    32
+#define FW_ASSERT_GENERAL_ATTN_IDX		32
 
-#define MAX_PINNED_CCFC                 32
+#define MAX_PINNED_CCFC				32
 
 /* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 8
-#define YSTORM_QZONE_SIZE 0
-#define PSTORM_QZONE_SIZE 0
-
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG	7
+#define TSTORM_QZONE_SIZE	8
+#define MSTORM_QZONE_SIZE	16
+#define USTORM_QZONE_SIZE	8
+#define XSTORM_QZONE_SIZE	8
+#define YSTORM_QZONE_SIZE	0
+#define PSTORM_QZONE_SIZE	0
+
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG		7
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT	16
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE	48
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD	112
@@ -102,8 +109,8 @@
 #define MAX_NUM_LL2_TX_STATS_COUNTERS	48
 
 #define FW_MAJOR_VERSION	8
-#define FW_MINOR_VERSION	20
-#define FW_REVISION_VERSION	0
+#define FW_MINOR_VERSION	33
+#define FW_REVISION_VERSION	1
 #define FW_ENGINEERING_VERSION	0
 
 /***********************/
@@ -115,10 +122,10 @@
 #define MAX_NUM_PORTS_BB	(2)
 #define MAX_NUM_PORTS		(MAX_NUM_PORTS_K2)
 
-#define MAX_NUM_PFS_K2	(16)
-#define MAX_NUM_PFS_BB	(8)
-#define MAX_NUM_PFS	(MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+#define MAX_NUM_PFS_K2		(16)
+#define MAX_NUM_PFS_BB		(8)
+#define MAX_NUM_PFS		(MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP	(16) /* On both engines */
 
 #define MAX_NUM_VFS_K2	(192)
 #define MAX_NUM_VFS_BB	(120)
@@ -141,29 +148,14 @@
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
 #define NUM_PHYS_TCS_4PORT_K2	(4)
 #define NUM_OF_PHYS_TCS		(8)
-
+#define PURE_LB_TC		NUM_OF_PHYS_TCS
 #define NUM_TCS_4PORT_K2	(NUM_PHYS_TCS_4PORT_K2 + 1)
 #define NUM_OF_TCS		(NUM_OF_PHYS_TCS + 1)
 
-#define LB_TC			(NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO		(8)
-
-#define MAX_NUM_VOQS_K2		(NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB		(NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS		(MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS		(NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
 /* CIDs */
-#define NUM_OF_CONNECTION_TYPES	(8)
-#define NUM_OF_LCIDS		(320)
-#define NUM_OF_LTIDS		(320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4	(375e6)
-#define STORM_CLK_FREQ_E4	(1000e6)
-#define CLK25M_CLK_FREQ_E4	(25e6)
+#define NUM_OF_CONNECTION_TYPES_E4	(8)
+#define NUM_OF_LCIDS			(320)
+#define NUM_OF_LTIDS			(320)
 
 /* Global PXP windows (GTT) */
 #define NUM_OF_GTT		19
@@ -172,17 +164,17 @@
 #define GTT_DWORD_SIZE		BIT(GTT_DWORD_SIZE_BITS)
 
 /* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION	10
 
 /*****************/
 /* CDU CONSTANTS */
 /*****************/
 
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT			(17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK			(0x1ffff)
 
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT	(12)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK	(0xfff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT		(12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK		(0xfff)
 
 #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT			(0)
 #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT	(1)
@@ -201,45 +193,45 @@
 #define DQ_DEMS_TOE_LOCAL_ADV_WND	4
 #define DQ_DEMS_ROCE_CQ_CONS		7
 
-/* XCM agg val selection */
-#define DQ_XCM_AGG_VAL_SEL_WORD2  0
-#define DQ_XCM_AGG_VAL_SEL_WORD3  1
-#define DQ_XCM_AGG_VAL_SEL_WORD4  2
-#define DQ_XCM_AGG_VAL_SEL_WORD5  3
-#define DQ_XCM_AGG_VAL_SEL_REG3   4
-#define DQ_XCM_AGG_VAL_SEL_REG4   5
-#define DQ_XCM_AGG_VAL_SEL_REG5   6
-#define DQ_XCM_AGG_VAL_SEL_REG6   7
-
-/* XCM agg val selection */
-#define	DQ_XCM_CORE_TX_BD_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD3
-#define	DQ_XCM_CORE_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define	DQ_XCM_CORE_SPQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define	DQ_XCM_ETH_EDPM_NUM_BDS_CMD	DQ_XCM_AGG_VAL_SEL_WORD2
-#define	DQ_XCM_ETH_TX_BD_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD3
-#define	DQ_XCM_ETH_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define	DQ_XCM_ETH_GO_TO_BD_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_FCOE_SQ_CONS_CMD             DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_FCOE_SQ_PROD_CMD             DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_FCOE_X_FERQ_PROD_CMD         DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_ISCSI_SQ_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ISCSI_SQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD	DQ_XCM_AGG_VAL_SEL_REG6
-#define DQ_XCM_ROCE_SQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2	0
+#define DQ_XCM_AGG_VAL_SEL_WORD3	1
+#define DQ_XCM_AGG_VAL_SEL_WORD4	2
+#define DQ_XCM_AGG_VAL_SEL_WORD5	3
+#define DQ_XCM_AGG_VAL_SEL_REG3		4
+#define DQ_XCM_AGG_VAL_SEL_REG4		5
+#define DQ_XCM_AGG_VAL_SEL_REG5		6
+#define DQ_XCM_AGG_VAL_SEL_REG6		7
+
+/* XCM agg val selection (FW) */
+#define	DQ_XCM_CORE_TX_BD_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD3
+#define	DQ_XCM_CORE_TX_BD_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define	DQ_XCM_CORE_SPQ_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define	DQ_XCM_ETH_EDPM_NUM_BDS_CMD		DQ_XCM_AGG_VAL_SEL_WORD2
+#define	DQ_XCM_ETH_TX_BD_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD3
+#define	DQ_XCM_ETH_TX_BD_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define	DQ_XCM_ETH_GO_TO_BD_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD			DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD			DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD		DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD			DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD		DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG4
 
 /* UCM agg val selection (HW) */
 #define	DQ_UCM_AGG_VAL_SEL_WORD0	0
 #define	DQ_UCM_AGG_VAL_SEL_WORD1	1
 #define	DQ_UCM_AGG_VAL_SEL_WORD2	2
 #define	DQ_UCM_AGG_VAL_SEL_WORD3	3
-#define	DQ_UCM_AGG_VAL_SEL_REG0	4
-#define	DQ_UCM_AGG_VAL_SEL_REG1	5
-#define	DQ_UCM_AGG_VAL_SEL_REG2	6
-#define	DQ_UCM_AGG_VAL_SEL_REG3	7
+#define	DQ_UCM_AGG_VAL_SEL_REG0		4
+#define	DQ_UCM_AGG_VAL_SEL_REG1		5
+#define	DQ_UCM_AGG_VAL_SEL_REG2		6
+#define	DQ_UCM_AGG_VAL_SEL_REG3		7
 
 /* UCM agg val selection (FW) */
 #define DQ_UCM_ETH_PMD_TX_CONS_CMD	DQ_UCM_AGG_VAL_SEL_WORD2
@@ -263,7 +255,7 @@
 #define DQ_TCM_ROCE_RQ_PROD_CMD	\
 	DQ_TCM_AGG_VAL_SEL_WORD0
 
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (HW) */
 #define	DQ_XCM_AGG_FLG_SHIFT_BIT14	0
 #define	DQ_XCM_AGG_FLG_SHIFT_BIT15	1
 #define	DQ_XCM_AGG_FLG_SHIFT_CF12	2
@@ -273,20 +265,20 @@
 #define	DQ_XCM_AGG_FLG_SHIFT_CF22	6
 #define	DQ_XCM_AGG_FLG_SHIFT_CF23	7
 
-/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_FCOE_SLOW_PATH_CMD           BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_DQ_FLUSH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ISCSI_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_TOE_DQ_FLUSH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_TOE_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_CORE_DQ_CF_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 
 /* UCM agg counter flag selection (HW) */
 #define	DQ_UCM_AGG_FLG_SHIFT_CF0	0
@@ -317,9 +309,9 @@
 #define DQ_TCM_AGG_FLG_SHIFT_CF6	6
 #define DQ_TCM_AGG_FLG_SHIFT_CF7	7
 /* TCM agg counter flag selection (FW) */
-#define DQ_TCM_FCOE_FLUSH_Q0_CMD            BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
-#define DQ_TCM_FCOE_DUMMY_TIMER_CMD         BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
-#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_ISCSI_FLUSH_Q0_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_TOE_FLUSH_Q0_CMD		BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
@@ -327,18 +319,18 @@
 #define DQ_TCM_IWARP_POST_RQ_CF_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 
 /* PWM address mapping */
-#define DQ_PWM_OFFSET_DPM_BASE	0x0
-#define DQ_PWM_OFFSET_DPM_END	0x27
+#define DQ_PWM_OFFSET_DPM_BASE		0x0
+#define DQ_PWM_OFFSET_DPM_END		0x27
 #define DQ_PWM_OFFSET_XCM16_BASE	0x40
 #define DQ_PWM_OFFSET_XCM32_BASE	0x44
 #define DQ_PWM_OFFSET_UCM16_BASE	0x48
 #define DQ_PWM_OFFSET_UCM32_BASE	0x4C
-#define DQ_PWM_OFFSET_UCM16_4	0x50
+#define DQ_PWM_OFFSET_UCM16_4		0x50
 #define DQ_PWM_OFFSET_TCM16_BASE	0x58
 #define DQ_PWM_OFFSET_TCM32_BASE	0x5C
-#define DQ_PWM_OFFSET_XCM_FLAGS	0x68
-#define DQ_PWM_OFFSET_UCM_FLAGS	0x69
-#define DQ_PWM_OFFSET_TCM_FLAGS	0x6B
+#define DQ_PWM_OFFSET_XCM_FLAGS		0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS		0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS		0x6B
 
 #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD		(DQ_PWM_OFFSET_XCM16_BASE + 2)
 #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT	(DQ_PWM_OFFSET_UCM32_BASE)
@@ -347,10 +339,11 @@
 #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS	(DQ_PWM_OFFSET_UCM_FLAGS)
 #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 1)
 #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 3)
-#define	DQ_REGION_SHIFT	(12)
+
+#define	DQ_REGION_SHIFT			(12)
 
 /* DPM */
-#define	DQ_DPM_WQE_BUFF_SIZE	(320)
+#define	DQ_DPM_WQE_BUFF_SIZE		(320)
 
 /* Conn type ranges */
 #define	DQ_CONN_TYPE_RANGE_SHIFT	(4)
@@ -359,29 +352,30 @@
 /* QM CONSTANTS  */
 /*****************/
 
-/* number of TX queues in the QM */
+/* Number of TX queues in the QM */
 #define MAX_QM_TX_QUEUES_K2	512
 #define MAX_QM_TX_QUEUES_BB	448
 #define MAX_QM_TX_QUEUES	MAX_QM_TX_QUEUES_K2
 
-/* number of Other queues in the QM */
+/* Number of Other queues in the QM */
 #define MAX_QM_OTHER_QUEUES_BB	64
 #define MAX_QM_OTHER_QUEUES_K2	128
 #define MAX_QM_OTHER_QUEUES	MAX_QM_OTHER_QUEUES_K2
 
-/* number of queues in a PF queue group */
+/* Number of queues in a PF queue group */
 #define QM_PF_QUEUE_GROUP_SIZE	8
 
-/* the size of a single queue element in bytes */
-#define QM_PQ_ELEMENT_SIZE                      4
+/* The size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE	4
 
-/* base number of Tx PQs in the CM PQ representation.
- * should be used when storing PQ IDs in CM PQ registers and context
+/* Base number of Tx PQs in the CM PQ representation.
+ * Should be used when storing PQ IDs in CM PQ registers and context.
  */
-#define CM_TX_PQ_BASE	0x200
+#define CM_TX_PQ_BASE		0x200
 
-/* number of global Vport/QCN rate limiters */
+/* Number of global Vport/QCN rate limiters */
 #define MAX_QM_GLOBAL_RLS	256
+
 /* QM registers data */
 #define QM_LINE_CRD_REG_WIDTH		16
 #define QM_LINE_CRD_REG_SIGN_BIT	BIT((QM_LINE_CRD_REG_WIDTH - 1))
@@ -400,7 +394,7 @@
 #define CAU_FSM_ETH_TX  1
 
 /* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB    12
+#define PIS_PER_SB_E4	12
 
 #define CAU_HC_STOPPED_STATE	3
 #define CAU_HC_DISABLE_STATE	4
@@ -432,8 +426,7 @@
 
 #define IGU_CMD_INT_ACK_BASE		0x0400
 #define IGU_CMD_INT_ACK_UPPER		(IGU_CMD_INT_ACK_BASE +	\
-					 MAX_TOT_SB_PER_PATH -	\
-					 1)
+					 MAX_TOT_SB_PER_PATH - 1)
 #define IGU_CMD_INT_ACK_RESERVED_UPPER	0x05ff
 
 #define IGU_CMD_ATTN_BIT_UPD_UPPER	0x05f0
@@ -447,8 +440,7 @@
 
 #define IGU_CMD_PROD_UPD_BASE			0x0600
 #define IGU_CMD_PROD_UPD_UPPER			(IGU_CMD_PROD_UPD_BASE +\
-						 MAX_TOT_SB_PER_PATH - \
-						 1)
+						 MAX_TOT_SB_PER_PATH - 1)
 #define IGU_CMD_PROD_UPD_RESERVED_UPPER		0x07ff
 
 /*****************/
@@ -514,129 +506,126 @@
 	 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
 
 /* PF BAR */
-#define PXP_BAR0_START_GRC	0x0000
-#define PXP_BAR0_GRC_LENGTH	0x1C00000
-#define PXP_BAR0_END_GRC	(PXP_BAR0_START_GRC + \
-				 PXP_BAR0_GRC_LENGTH - 1)
-
-#define PXP_BAR0_START_IGU	0x1C00000
-#define PXP_BAR0_IGU_LENGTH	0x10000
-#define PXP_BAR0_END_IGU	(PXP_BAR0_START_IGU + \
-				 PXP_BAR0_IGU_LENGTH - 1)
-
-#define PXP_BAR0_START_TSDM	0x1C80000
-#define PXP_BAR0_SDM_LENGTH	0x40000
+#define PXP_BAR0_START_GRC		0x0000
+#define PXP_BAR0_GRC_LENGTH		0x1C00000
+#define PXP_BAR0_END_GRC		(PXP_BAR0_START_GRC + \
+					 PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU		0x1C00000
+#define PXP_BAR0_IGU_LENGTH		0x10000
+#define PXP_BAR0_END_IGU		(PXP_BAR0_START_IGU + \
+					 PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM		0x1C80000
+#define PXP_BAR0_SDM_LENGTH		0x40000
 #define PXP_BAR0_SDM_RESERVED_LENGTH	0x40000
-#define PXP_BAR0_END_TSDM	(PXP_BAR0_START_TSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM		(PXP_BAR0_START_TSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_MSDM	0x1D00000
-#define PXP_BAR0_END_MSDM	(PXP_BAR0_START_MSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_MSDM		0x1D00000
+#define PXP_BAR0_END_MSDM		(PXP_BAR0_START_MSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_USDM	0x1D80000
-#define PXP_BAR0_END_USDM	(PXP_BAR0_START_USDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_USDM		0x1D80000
+#define PXP_BAR0_END_USDM		(PXP_BAR0_START_USDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_XSDM	0x1E00000
-#define PXP_BAR0_END_XSDM	(PXP_BAR0_START_XSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_XSDM		0x1E00000
+#define PXP_BAR0_END_XSDM		(PXP_BAR0_START_XSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_YSDM	0x1E80000
-#define PXP_BAR0_END_YSDM	(PXP_BAR0_START_YSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_YSDM		0x1E80000
+#define PXP_BAR0_END_YSDM		(PXP_BAR0_START_YSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_PSDM	0x1F00000
-#define PXP_BAR0_END_PSDM	(PXP_BAR0_START_PSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_PSDM		0x1F00000
+#define PXP_BAR0_END_PSDM		(PXP_BAR0_START_PSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_FIRST_INVALID_ADDRESS	(PXP_BAR0_END_PSDM + 1)
 
 /* VF BAR */
-#define PXP_VF_BAR0	0
-
-#define PXP_VF_BAR0_START_GRC	0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH	0x200
-#define PXP_VF_BAR0_END_GRC	(PXP_VF_BAR0_START_GRC + \
-				 PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU                   0
-#define PXP_VF_BAR0_IGU_LENGTH                  0x3000
-#define PXP_VF_BAR0_END_IGU                     (PXP_VF_BAR0_START_IGU + \
-						 PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ                    0x3000
-#define PXP_VF_BAR0_DQ_LENGTH                   0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           (PXP_VF_BAR0_START_DQ +	\
-						 PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
-						 + 4)
-#define PXP_VF_BAR0_END_DQ                      (PXP_VF_BAR0_START_DQ +	\
-						 PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B             (PXP_VF_BAR0_START_TSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B           0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B             (PXP_VF_BAR0_START_MSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B           0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B             (PXP_VF_BAR0_START_USDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B           0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B             (PXP_VF_BAR0_START_XSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B           0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B             (PXP_VF_BAR0_START_YSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B           0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B             (PXP_VF_BAR0_START_PSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH           32
-
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN		12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER		1024
+#define PXP_VF_BAR0			0
+
+#define PXP_VF_BAR0_START_IGU		0
+#define PXP_VF_BAR0_IGU_LENGTH		0x3000
+#define PXP_VF_BAR0_END_IGU		(PXP_VF_BAR0_START_IGU + \
+					 PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ		0x3000
+#define PXP_VF_BAR0_DQ_LENGTH		0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET	0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS	(PXP_VF_BAR0_START_DQ +	\
+					 PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS	(PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+					 + 4)
+#define PXP_VF_BAR0_END_DQ		(PXP_VF_BAR0_START_DQ +	\
+					 PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B	0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B	0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B	(PXP_VF_BAR0_START_TSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B	0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B	(PXP_VF_BAR0_START_MSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B	0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B	(PXP_VF_BAR0_START_USDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B	0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B	(PXP_VF_BAR0_START_XSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B	0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B	(PXP_VF_BAR0_START_YSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B	0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B	(PXP_VF_BAR0_START_PSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC		0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH		0x200
+#define PXP_VF_BAR0_END_GRC		(PXP_VF_BAR0_START_GRC + \
+					 PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A	0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A	0x10000
+
+#define PXP_VF_BAR0_START_IGU2		0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH		0xD000
+#define PXP_VF_BAR0_END_IGU2		(PXP_VF_BAR0_START_IGU2 + \
+					 PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH	32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN	12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER	1024
 
 /* ILT Records */
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM	320
+
 /*****************/
 /* PRM CONSTANTS */
 /*****************/
-#define PRM_DMA_PAD_BYTES_NUM  2
+#define PRM_DMA_PAD_BYTES_NUM	2
+
 /*****************/
 /* SDMs CONSTANTS  */
 /*****************/
 
-#define SDM_OP_GEN_TRIG_NONE                    0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD             1
-#define SDM_OP_GEN_TRIG_AGG_INT                 2
-#define SDM_OP_GEN_TRIG_LOADER                  4
+#define SDM_OP_GEN_TRIG_NONE		0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD	1
+#define SDM_OP_GEN_TRIG_AGG_INT		2
+#define SDM_OP_GEN_TRIG_LOADER		4
 #define SDM_OP_GEN_TRIG_INDICATE_ERROR  6
 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT   9
 
@@ -644,26 +633,26 @@
 /* Completion types */
 /********************/
 
-#define SDM_COMP_TYPE_NONE              0
-#define SDM_COMP_TYPE_WAKE_THREAD       1
-#define SDM_COMP_TYPE_AGG_INT           2
-#define SDM_COMP_TYPE_CM                3
-#define SDM_COMP_TYPE_LOADER            4
-#define SDM_COMP_TYPE_PXP               5
-#define SDM_COMP_TYPE_INDICATE_ERROR    6
-#define SDM_COMP_TYPE_RELEASE_THREAD    7
-#define SDM_COMP_TYPE_RAM               8
-#define SDM_COMP_TYPE_INC_ORDER_CNT     9
+#define SDM_COMP_TYPE_NONE		0
+#define SDM_COMP_TYPE_WAKE_THREAD	1
+#define SDM_COMP_TYPE_AGG_INT		2
+#define SDM_COMP_TYPE_CM		3
+#define SDM_COMP_TYPE_LOADER		4
+#define SDM_COMP_TYPE_PXP		5
+#define SDM_COMP_TYPE_INDICATE_ERROR	6
+#define SDM_COMP_TYPE_RELEASE_THREAD	7
+#define SDM_COMP_TYPE_RAM		8
+#define SDM_COMP_TYPE_INC_ORDER_CNT	9
 
 /*****************/
-/* PBF Constants */
+/* PBF CONSTANTS */
 /*****************/
 
 /* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES	3328
 
 /* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
+#define BTB_MAX_BLOCKS		1440
 
 /*****************/
 /* PRS CONSTANTS */
@@ -671,14 +660,7 @@
 
 #define PRS_GFT_CAM_LINES_NO_MATCH	31
 
-/* Async data KCQ CQE */
-struct async_data {
-	__le32	cid;
-	__le16	itid;
-	u8	error_code;
-	u8	fw_debug_param;
-};
-
+/* Interrupt coalescing TimeSet */
 struct coalescing_timeset {
 	u8 value;
 #define	COALESCING_TIMESET_TIMESET_MASK		0x7F
@@ -692,23 +674,32 @@ struct common_queue_zone {
 	__le16 reserved;
 };
 
+/* ETH Rx producers data */
 struct eth_rx_prod_data {
 	__le16 bd_prod;
 	__le16 cqe_prod;
 };
 
-struct regpair {
-	__le32	lo;
-	__le32	hi;
+struct tcp_ulp_connect_done_params {
+	__le16 mss;
+	u8 snd_wnd_scale;
+	u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK		0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT		0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK	0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT	1
 };
 
-struct vf_pf_channel_eqe_data {
-	struct regpair msg_addr;
+struct iscsi_connect_done_results {
+	__le16 icid;
+	__le16 conn_id;
+	struct tcp_ulp_connect_done_params params;
 };
 
 struct iscsi_eqe_data {
-	__le32 cid;
+	__le16 icid;
 	__le16 conn_id;
+	__le16 reserved;
 	u8 error_code;
 	u8 error_pdu_opcode_reserved;
 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK		0x3F
@@ -719,52 +710,6 @@ struct iscsi_eqe_data {
 #define ISCSI_EQE_DATA_RESERVED0_SHIFT			7
 };
 
-struct rdma_eqe_destroy_qp {
-	__le32 cid;
-	u8 reserved[4];
-};
-
-union rdma_eqe_data {
-	struct regpair async_handle;
-	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-};
-
-struct malicious_vf_eqe_data {
-	u8 vf_id;
-	u8 err_id;
-	__le16 reserved[3];
-};
-
-struct initial_cleanup_eqe_data {
-	u8 vf_id;
-	u8 reserved[7];
-};
-
-/* Event Data Union */
-union event_ring_data {
-	u8 bytes[8];
-	struct vf_pf_channel_eqe_data vf_pf_channel;
-	struct iscsi_eqe_data iscsi_info;
-	union rdma_eqe_data rdma_data;
-	struct malicious_vf_eqe_data malicious_vf;
-	struct initial_cleanup_eqe_data vf_init_cleanup;
-};
-
-/* Event Ring Entry */
-struct event_ring_entry {
-	u8			protocol_id;
-	u8			opcode;
-	__le16			reserved0;
-	__le16			echo;
-	u8			fw_return_code;
-	u8			flags;
-#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
-#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
-	union event_ring_data	data;
-};
-
 /* Multi function mode */
 enum mf_mode {
 	ERROR_MODE /* Unsupported mode */,
@@ -781,13 +726,31 @@ enum protocol_type {
 	PROTOCOLID_CORE,
 	PROTOCOLID_ETH,
 	PROTOCOLID_IWARP,
-	PROTOCOLID_RESERVED5,
+	PROTOCOLID_RESERVED0,
 	PROTOCOLID_PREROCE,
 	PROTOCOLID_COMMON,
-	PROTOCOLID_RESERVED6,
+	PROTOCOLID_RESERVED1,
 	MAX_PROTOCOL_TYPE
 };
 
+struct regpair {
+	__le32 lo;
+	__le32 hi;
+};
+
+/* RoCE Destroy Event Data */
+struct rdma_eqe_destroy_qp {
+	__le32 cid;
+	u8 reserved[4];
+};
+
+/* RDMA Event Data Union */
+union rdma_eqe_data {
+	struct regpair async_handle;
+	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
+/* Ustorm Queue Zone */
 struct ustorm_eth_queue_zone {
 	struct coalescing_timeset int_coalescing_timeset;
 	u8 reserved[3];
@@ -798,62 +761,71 @@ struct ustorm_queue_zone {
 	struct common_queue_zone common;
 };
 
-/* status block structure */
+/* Status block structure */
 struct cau_pi_entry {
-	u32 prod;
-#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
-#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
-#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
-#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
-#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
-#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
-#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+	__le32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK	0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT	0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK	0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT	16
+#define CAU_PI_ENTRY_FSM_SEL_MASK	0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT	23
+#define CAU_PI_ENTRY_RESERVED_MASK	0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT	24
 };
 
-/* status block structure */
+/* Status block structure */
 struct cau_sb_entry {
-	u32 data;
-#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
-#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
-#define CAU_SB_ENTRY_STATE0_MASK       0xF
-#define CAU_SB_ENTRY_STATE0_SHIFT      24
-#define CAU_SB_ENTRY_STATE1_MASK       0xF
-#define CAU_SB_ENTRY_STATE1_SHIFT      28
-	u32 params;
-#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
-#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
-#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
-#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
-#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
-#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
-#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
-#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
-#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
-#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
-#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
-#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
-#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
-#define CAU_SB_ENTRY_TPH_MASK          0x1
-#define CAU_SB_ENTRY_TPH_SHIFT         31
+	__le32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK	0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT	0
+#define CAU_SB_ENTRY_STATE0_MASK	0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT	24
+#define CAU_SB_ENTRY_STATE1_MASK	0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT	28
+	__le32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK	0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT	0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK	0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT	7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK	0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT	14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK	0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT	16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK	0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT	18
+#define CAU_SB_ENTRY_VF_VALID_MASK	0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT	26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK	0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT	27
+#define CAU_SB_ENTRY_TPH_MASK		0x1
+#define CAU_SB_ENTRY_TPH_SHIFT		31
 };
 
-/* core doorbell data */
+/* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+	IGU_COMMAND_TYPE_NOP = 0,
+	IGU_COMMAND_TYPE_SET = 1,
+	MAX_COMMAND_TYPE_BIT
+};
+
+/* Core doorbell data */
 struct core_db_data {
 	u8 params;
-#define CORE_DB_DATA_DEST_MASK         0x3
-#define CORE_DB_DATA_DEST_SHIFT        0
-#define CORE_DB_DATA_AGG_CMD_MASK      0x3
-#define CORE_DB_DATA_AGG_CMD_SHIFT     2
-#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
-#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
-#define CORE_DB_DATA_RESERVED_MASK     0x1
-#define CORE_DB_DATA_RESERVED_SHIFT    5
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
-	u8	agg_flags;
-	__le16	spq_prod;
+#define CORE_DB_DATA_DEST_MASK		0x3
+#define CORE_DB_DATA_DEST_SHIFT		0
+#define CORE_DB_DATA_AGG_CMD_MASK	0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT	2
+#define CORE_DB_DATA_BYPASS_EN_MASK	0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT	4
+#define CORE_DB_DATA_RESERVED_MASK	0x1
+#define CORE_DB_DATA_RESERVED_SHIFT	5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT	6
+	u8 agg_flags;
+	__le16 spq_prod;
 };
 
 /* Enum of doorbell aggregative command selection */
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge {
 	struct regpair addr;
 	__le16 nbytes;
 	__le16 bitfields;
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK	0x1FF
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
-#define DB_L2_DPM_SGE_RESERVED0_MASK	0x3
-#define DB_L2_DPM_SGE_RESERVED0_SHIFT	9
-#define DB_L2_DPM_SGE_ST_VALID_MASK	0x1
-#define DB_L2_DPM_SGE_ST_VALID_SHIFT	11
-#define DB_L2_DPM_SGE_RESERVED1_MASK	0xF
-#define DB_L2_DPM_SGE_RESERVED1_SHIFT	12
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK		0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT	0
+#define DB_L2_DPM_SGE_RESERVED0_MASK		0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT		9
+#define DB_L2_DPM_SGE_ST_VALID_MASK		0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT		11
+#define DB_L2_DPM_SGE_RESERVED1_MASK		0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT		12
 	__le32 reserved2;
 };
 
 /* Structure for doorbell address, in legacy mode */
 struct db_legacy_addr {
 	__le32 addr;
-#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
-#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-#define DB_LEGACY_ADDR_DEMS_MASK       0x7
-#define DB_LEGACY_ADDR_DEMS_SHIFT      2
-#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
-#define DB_LEGACY_ADDR_ICID_SHIFT      5
+#define DB_LEGACY_ADDR_RESERVED0_MASK	0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT	0
+#define DB_LEGACY_ADDR_DEMS_MASK	0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT	2
+#define DB_LEGACY_ADDR_ICID_MASK	0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT	5
 };
 
 /* Structure for doorbell address, in PWM mode */
 struct db_pwm_addr {
 	__le32 addr;
 #define DB_PWM_ADDR_RESERVED0_MASK	0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-#define DB_PWM_ADDR_OFFSET_MASK	0x7F
+#define DB_PWM_ADDR_RESERVED0_SHIFT	0
+#define DB_PWM_ADDR_OFFSET_MASK		0x7F
 #define DB_PWM_ADDR_OFFSET_SHIFT	3
-#define DB_PWM_ADDR_WID_MASK	0x3
-#define DB_PWM_ADDR_WID_SHIFT	10
-#define DB_PWM_ADDR_DPI_MASK	0xFFFF
-#define DB_PWM_ADDR_DPI_SHIFT	12
+#define DB_PWM_ADDR_WID_MASK		0x3
+#define DB_PWM_ADDR_WID_SHIFT		10
+#define DB_PWM_ADDR_DPI_MASK		0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT		12
 #define DB_PWM_ADDR_RESERVED1_MASK	0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+#define DB_PWM_ADDR_RESERVED1_SHIFT	28
 };
 
-/* Parameters to RoCE firmware, passed in EDPM doorbell */
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
 struct db_rdma_dpm_params {
 	__le32 params;
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK		0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT		0
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK	0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT	6
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK		0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT		8
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK	0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT	16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK	0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT	27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK	0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK		0x1
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT		29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK	0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT	30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK			0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT			0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK		0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT		6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK			0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT			8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK		0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT		16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK		0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT		27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK		0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT		28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK			0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT			29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK		0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT		30
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK	0x1
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT	31
 };
 
-/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst.
+ */
 struct db_rdma_dpm_data {
 	__le16 icid;
 	__le16 prod_val;
@@ -987,22 +961,22 @@ enum igu_int_cmd {
 
 /* IGU producer or consumer update command */
 struct igu_prod_cons_update {
-	u32 sb_id_and_flags;
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
-#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
-#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
-	u32 reserved1;
+	__le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK		0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT		0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK		0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT		24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK		0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT		25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK	0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT	27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK		0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT		28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK		0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT		29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK		0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT		31
+	__le32 reserved1;
 };
 
 /* Igu segments access for default status block only */
@@ -1012,38 +986,63 @@ enum igu_seg_access {
 	MAX_IGU_SEG_ACCESS
 };
 
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+	e_l3_type_unknown,
+	e_l3_type_ipv4,
+	e_l3_type_ipv6,
+	MAX_L3_TYPE
+};
+
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+	e_l4_protocol_none,
+	e_l4_protocol_tcp,
+	e_l4_protocol_udp,
+	MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
 struct parsing_and_err_flags {
 	__le16 flags;
-#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
-#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK			0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT			0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK			0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT			2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT			4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT		5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT	6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT			7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT		8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT			9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT		10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT			11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT		12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT		13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK	0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT	14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT		15
 };
 
+/* Parsing error flags bitmap */
 struct parsing_err_flags {
 	__le16 flags;
 #define PARSING_ERR_FLAGS_MAC_ERROR_MASK				0x1
@@ -1080,266 +1079,260 @@ struct parsing_err_flags {
 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT			15
 };
 
+/* Pb context */
 struct pb_context {
 	__le32 crc[4];
 };
 
+/* Concrete Function ID */
 struct pxp_concrete_fid {
 	__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK     0xF
-#define PXP_CONCRETE_FID_PFID_SHIFT    0
-#define PXP_CONCRETE_FID_PORT_MASK     0x3
-#define PXP_CONCRETE_FID_PORT_SHIFT    4
-#define PXP_CONCRETE_FID_PATH_MASK     0x1
-#define PXP_CONCRETE_FID_PATH_SHIFT    6
-#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
-#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK     0xFF
-#define PXP_CONCRETE_FID_VFID_SHIFT    8
+#define PXP_CONCRETE_FID_PFID_MASK	0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT	0
+#define PXP_CONCRETE_FID_PORT_MASK	0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT	4
+#define PXP_CONCRETE_FID_PATH_MASK	0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT	6
+#define PXP_CONCRETE_FID_VFVALID_MASK	0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT	7
+#define PXP_CONCRETE_FID_VFID_MASK	0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT	8
 };
 
+/* Concrete Function ID */
 struct pxp_pretend_concrete_fid {
 	__le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
-#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
-#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
-#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK	0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT	0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK	0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT	4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK	0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT	7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK	0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT	8
 };
 
+/* Function ID */
 union pxp_pretend_fid {
 	struct pxp_pretend_concrete_fid concrete_fid;
-	__le16				opaque_fid;
+	__le16 opaque_fid;
 };
 
-/* Pxp Pretend Command Register. */
+/* Pxp Pretend Command Register */
 struct pxp_pretend_cmd {
-	union pxp_pretend_fid	fid;
-	__le16			control;
-#define PXP_PRETEND_CMD_PATH_MASK              0x1
-#define PXP_PRETEND_CMD_PATH_SHIFT             0
-#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
-#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
-#define PXP_PRETEND_CMD_PORT_MASK              0x3
-#define PXP_PRETEND_CMD_PORT_SHIFT             2
-#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
-#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
-#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
-#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
-#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
-#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
-#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+	union pxp_pretend_fid fid;
+	__le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK		0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT		0
+#define PXP_PRETEND_CMD_USE_PORT_MASK		0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT		1
+#define PXP_PRETEND_CMD_PORT_MASK		0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT		2
+#define PXP_PRETEND_CMD_RESERVED0_MASK		0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT		4
+#define PXP_PRETEND_CMD_RESERVED1_MASK		0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT		8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK	0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT	12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK	0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT	13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK	0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT	14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK	0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT	15
 };
 
-/* PTT Record in PXP Admin Window. */
+/* PTT Record in PXP Admin Window */
 struct pxp_ptt_entry {
-	__le32			offset;
-#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
-#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
-#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
-#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
-	struct pxp_pretend_cmd	pretend;
+	__le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK	0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT	0
+#define PXP_PTT_ENTRY_RESERVED0_MASK	0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT	23
+	struct pxp_pretend_cmd pretend;
 };
 
-/* VF Zone A Permission Register. */
+/* VF Zone A Permission Register */
 struct pxp_vf_zone_a_permission {
 	__le32 control;
-#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK	0xFF
-#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT	0
-#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK	0x1
-#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT	8
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK	0x7F
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK	0xFFFF
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK		0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT		0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK		0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT		8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK		0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT	9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK		0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT	16
 };
 
-/* RSS hash type */
+/* Rdif context */
 struct rdif_task_context {
 	__le32 initial_ref_tag;
 	__le16 app_tag_value;
 	__le16 app_tag_mask;
 	u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK            0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT           0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK      0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT     1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK           0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT          2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK         0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT        3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK          0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT         4
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT               6
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK         0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT        7
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK		0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT		0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK	0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT	1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK		0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT		2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK	0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT	3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK		0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT		4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK			0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT		6
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK	0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT	7
 	u8 partial_dif_data[7];
 	__le16 partial_crc_value;
 	__le16 partial_checksum_value;
 	__le32 offset_in_io;
 	__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK           0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT          0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT         1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT         2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK            0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT           3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT          4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT          5
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK            0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT           6
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK           0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT          9
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK           0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT          11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK               0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT              12
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK        0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT       13
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT  14
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT  15
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK			0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT			0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT		1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT		2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK			0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT			3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT			4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT			5
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK			0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT			6
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK			0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT			9
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK			0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT			11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK			0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT			12
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK		0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT		13
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK	0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT	14
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK	0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT	15
 	__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK    0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT   0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK  0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK               0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT              8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK        0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT       9
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK              0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT             10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK               0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT              14
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK		0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT		0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK	0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT	4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK			0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT			8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK		0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT		9
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK			0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT			10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK			0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT			14
 	__le32 reserved2;
 };
 
-/* RSS hash type */
-enum rss_hash_type {
-	RSS_HASH_TYPE_DEFAULT	= 0,
-	RSS_HASH_TYPE_IPV4	= 1,
-	RSS_HASH_TYPE_TCP_IPV4	= 2,
-	RSS_HASH_TYPE_IPV6	= 3,
-	RSS_HASH_TYPE_TCP_IPV6	= 4,
-	RSS_HASH_TYPE_UDP_IPV4	= 5,
-	RSS_HASH_TYPE_UDP_IPV6	= 6,
-	MAX_RSS_HASH_TYPE
-};
-
-/* status block structure */
-struct status_block {
-	__le16	pi_array[PIS_PER_SB];
+/* Status block structure */
+struct status_block_e4 {
+	__le16	pi_array[PIS_PER_SB_E4];
 	__le32	sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT     0
-#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
-#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+#define STATUS_BLOCK_E4_SB_NUM_MASK	0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT	0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK	0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT	9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK	0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT	16
 	__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK		0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT	0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK		0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT		24
 };
 
+/* Tdif context */
 struct tdif_task_context {
 	__le32 initial_ref_tag;
 	__le16 app_tag_value;
 	__le16 app_tag_mask;
-	__le16 partial_crc_valueB;
-	__le16 partial_checksum_valueB;
+	__le16 partial_crc_value_b;
+	__le16 partial_checksum_value_b;
 	__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT   0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT              8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK         0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT        9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK                0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT               10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK	0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT	0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK	0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT	4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK			0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT			8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK			0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT		9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK			0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT			10
 	u8 reserved1;
 	u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK             0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT            0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK       0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT      1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK            0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT           2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK          0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT         3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK           0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT          4
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT               7
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT			0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK		0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT		1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK			0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT			2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK		0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT		3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK			0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT			4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK				0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT			6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK			0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT			7
 	__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK            0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT           0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT          1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT          2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK             0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT            3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT           4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT           5
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK             0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT            6
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK            0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT           9
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK            0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT           11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT               12
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK         0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT        13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT   14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT              22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK        0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT       23
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK               0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT              24
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT   28
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT   29
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK          0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT         30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT               31
-	__le32 offset_in_iob;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK			0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT			0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT		1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT		2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK			0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT			3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT			4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT			5
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK			0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT			6
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK			0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT			9
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK			0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT			11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK			0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT			12
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK		0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT		13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK	0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT	14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK	0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT	18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK			0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT			22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK		0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT		23
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK			0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT			24
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK	0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT	28
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK	0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT	29
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK		0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT		30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK			0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT			31
+	__le32 offset_in_io_b;
 	__le16 partial_crc_value_a;
-	__le16 partial_checksum_valuea_;
-	__le32 offset_in_ioa;
+	__le16 partial_checksum_value_a;
+	__le32 offset_in_io_a;
 	u8 partial_dif_data_a[8];
 	u8 partial_dif_data_b[8];
 };
 
+/* Timers context */
 struct timers_context {
 	__le32 logical_client_0;
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK	0x7FFFFFF
@@ -1385,6 +1378,7 @@ struct timers_context {
 #define TIMERS_CONTEXT_RESERVED7_SHIFT			29
 };
 
+/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
 enum tunnel_next_protocol {
 	e_unknown = 0,
 	e_l2 = 1,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cb06e6e368e1..9db02856623b 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -36,150 +36,168 @@
 /********************/
 /* ETH FW CONSTANTS */
 /********************/
-#define ETH_HSI_VER_MAJOR                   3
-#define ETH_HSI_VER_MINOR	10
+
+#define ETH_HSI_VER_MAJOR		3
+#define ETH_HSI_VER_MINOR		10
 
 #define ETH_HSI_VER_NO_PKT_LEN_TUNN	5
 
-#define ETH_CACHE_LINE_SIZE                 64
-#define ETH_RX_CQE_GAP	32
-#define ETH_MAX_RAMROD_PER_CON                          8
-#define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS                        2
-
-#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
-#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
-
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
-#define ETH_TX_MAX_BDS_PER_LSO_PACKET	255
-#define ETH_TX_MAX_LSO_HDR_NBD                                          4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN	(9700 - (4 + 4 + 12 + 8))
-#define ETH_TX_MAX_LSO_HDR_BYTES                    510
-#define ETH_TX_LSO_WINDOW_BDS_NUM	(18 - 1)
-#define ETH_TX_LSO_WINDOW_MIN_LEN	9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN	0xFE000
-#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES	320
-#define ETH_TX_INACTIVE_SAME_AS_LAST	0xFFFF
-
-#define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+#define ETH_CACHE_LINE_SIZE		64
+#define ETH_RX_CQE_GAP			32
+#define ETH_MAX_RAMROD_PER_CON		8
+#define ETH_TX_BD_PAGE_SIZE_BYTES	4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES	4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES	4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS	2
+
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET	253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET	251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT			1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET		18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET			255
+#define ETH_TX_MAX_LSO_HDR_NBD				4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT			3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT	3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT		2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE		2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN		(9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES			510
+#define ETH_TX_LSO_WINDOW_BDS_NUM			(18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN			9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN			0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES			320
+#define ETH_TX_INACTIVE_SAME_AS_LAST			0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS			MAX_NUM_VPORTS
 #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
 	(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
 #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
 	(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
 
 /* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT	5
-#define ETH_RX_BD_THRESHOLD	12
+#define ETH_RX_MAX_BUFF_PER_PKT		5
+#define ETH_RX_BD_THRESHOLD		12
 
-/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS                                     512
-#define ETH_NUM_VLAN_FILTERS                            512
+/* Num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS		512
+#define ETH_NUM_VLAN_FILTERS		512
 
-/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
-#define ETH_MULTICAST_MAC_BINS                          256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS          (ETH_MULTICAST_MAC_BINS / 32)
+/* Approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED	0
+#define ETH_MULTICAST_MAC_BINS		256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS	(ETH_MULTICAST_MAC_BINS / 32)
 
-/*  ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT                          10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM           128
-#define ETH_RSS_KEY_SIZE_REGS                       10
-#define ETH_RSS_ENGINE_NUM_K2               207
-#define ETH_RSS_ENGINE_NUM_BB               127
+/* Ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT		10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM	128
+#define ETH_RSS_KEY_SIZE_REGS		10
+#define ETH_RSS_ENGINE_NUM_K2		207
+#define ETH_RSS_ENGINE_NUM_BB		127
 
 /* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM              64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+#define ETH_TPA_MAX_AGGS_NUM		64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE	ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE	6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE	4
 
 /* Control frame check constants */
 #define ETH_CTL_FRAME_ETH_TYPE_NUM	4
 
+/* GFS constants */
+#define ETH_GFT_TRASH_CAN_VPORT		0x1FF
+
+/* Destination port mode */
+enum dest_port_mode {
+	DEST_PORT_PHY,
+	DEST_PORT_LOOPBACK,
+	DEST_PORT_PHY_LOOPBACK,
+	DEST_PORT_DROP,
+	MAX_DEST_PORT_MODE
+};
+
+/* Ethernet address type */
+enum eth_addr_type {
+	BROADCAST_ADDRESS,
+	MULTICAST_ADDRESS,
+	UNICAST_ADDRESS,
+	UNKNOWN_ADDRESS,
+	MAX_ETH_ADDR_TYPE
+};
+
 struct eth_tx_1st_bd_flags {
 	u8 bitfields;
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        0
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
-#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT		0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK	0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT	1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT		2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT		3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT	4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK			0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT			5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT		6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT		7
 };
 
-/* The parsing information data fo rthe first tx bd of a given packet. */
+/* The parsing information data fo rthe first tx bd of a given packet */
 struct eth_tx_data_1st_bd {
 	__le16 vlan;
 	u8 nbds;
 	struct eth_tx_1st_bd_flags bd_flags;
 	__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK  0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT         1
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK    0x3FFF
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT   2
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK	0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT	0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK	0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT	1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK		0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT	2
 };
 
-/* The parsing information data for the second tx bd of a given packet. */
+/* The parsing information data for the second tx bd of a given packet */
 struct eth_tx_data_2nd_bd {
 	__le16 tunn_ip_size;
 	__le16	bitfields1;
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
-#define ETH_TX_DATA_2ND_BD_START_BD_MASK                  0x1
-#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                 8
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                9
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
-#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   14
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      15
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK	0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT	0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK		0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT		4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK			0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT			6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT			8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK			0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT			9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT		11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT			12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT			13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK				0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT				14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK		0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT		15
 	__le16 bitfields2;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK		0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT		0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK			0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT			13
 };
 
-/* Firmware data for L2-EDPM packet. */
+/* Firmware data for L2-EDPM packet */
 struct eth_edpm_fw_data {
 	struct eth_tx_data_1st_bd data_1st_bd;
 	struct eth_tx_data_2nd_bd data_2nd_bd;
 	__le32 reserved;
 };
 
-struct eth_fast_path_cqe_fw_debug {
-	__le16 reserved2;
-};
-
-/*  tunneling parsing flags */
+/* Tunneling parsing flags */
 struct eth_tunnel_parsing_flags {
 	u8 flags;
 #define	ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK		0x3
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags {
 /* PMD flow control bits */
 struct eth_pmd_flow_flags {
 	u8 flags;
-#define ETH_PMD_FLOW_FLAGS_VALID_MASK	0x1
-#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT	0
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK	0x1
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT	1
-#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
-#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK		0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT		0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK		0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT		1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK	0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT	2
 };
 
-/* Regular ETH Rx FP CQE. */
+/* Regular ETH Rx FP CQE */
 struct eth_fast_path_rx_reg_cqe {
 	u8 type;
 	u8 bitfields;
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
-#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK	0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT	0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK		0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT		3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK		0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT	7
 	__le16 pkt_len;
 	struct parsing_and_err_flags pars_flags;
 	__le16 vlan_tag;
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
 	u8 placement_offset;
 	struct eth_tunnel_parsing_flags tunnel_pars_flags;
 	u8 bd_num;
-	u8 reserved[9];
-	struct eth_fast_path_cqe_fw_debug fw_debug;
-	u8 reserved1[3];
+	u8 reserved;
+	__le16 flow_id;
+	u8 reserved1[11];
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-continue ETH Rx FP CQE. */
+/* TPA-continue ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_cont_cqe {
 	u8 type;
 	u8 tpa_agg_index;
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-end ETH Rx FP CQE. */
+/* TPA-end ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_end_cqe {
 	u8 type;
 	u8 tpa_agg_index;
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-start ETH Rx FP CQE. */
+/* TPA-start ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_start_cqe {
 	u8 type;
 	u8 bitfields;
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK	0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT	0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK			0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT			3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK		0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT		7
 	__le16 seg_len;
 	struct parsing_and_err_flags pars_flags;
 	__le16 vlan_tag;
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
 	u8 tpa_agg_index;
 	u8 header_len;
 	__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
-	struct eth_fast_path_cqe_fw_debug fw_debug;
+	__le16 flow_id;
 	u8 reserved;
 	struct eth_pmd_flow_flags pmd_flags;
 };
@@ -295,24 +313,24 @@ struct eth_rx_bd {
 	struct regpair addr;
 };
 
-/* regular ETH Rx SP CQE */
+/* Regular ETH Rx SP CQE */
 struct eth_slow_path_rx_cqe {
-	u8	type;
-	u8	ramrod_cmd_id;
-	u8	error_flag;
-	u8	reserved[25];
-	__le16	echo;
-	u8	reserved1;
+	u8 type;
+	u8 ramrod_cmd_id;
+	u8 error_flag;
+	u8 reserved[25];
+	__le16 echo;
+	u8 reserved1;
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* union for all ETH Rx CQE types */
+/* Union for all ETH Rx CQE types */
 union eth_rx_cqe {
-	struct eth_fast_path_rx_reg_cqe		fast_path_regular;
-	struct eth_fast_path_rx_tpa_start_cqe	fast_path_tpa_start;
-	struct eth_fast_path_rx_tpa_cont_cqe	fast_path_tpa_cont;
-	struct eth_fast_path_rx_tpa_end_cqe	fast_path_tpa_end;
-	struct eth_slow_path_rx_cqe		slow_path;
+	struct eth_fast_path_rx_reg_cqe fast_path_regular;
+	struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+	struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+	struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+	struct eth_slow_path_rx_cqe slow_path;
 };
 
 /* ETH Rx CQE type */
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type {
 	MAX_ETH_RX_TUNN_TYPE
 };
 
-/*  Aggregation end reason. */
+/* Aggregation end reason. */
 enum eth_tpa_end_reason {
 	ETH_AGG_END_UNUSED,
 	ETH_AGG_END_SP_UPDATE,
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason {
 
 /* The first tx bd of a given packet */
 struct eth_tx_1st_bd {
-	struct regpair			addr;
-	__le16				nbytes;
-	struct eth_tx_data_1st_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_1st_bd data;
 };
 
 /* The second tx bd of a given packet */
 struct eth_tx_2nd_bd {
-	struct regpair			addr;
-	__le16				nbytes;
-	struct eth_tx_data_2nd_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_2nd_bd data;
 };
 
-/* The parsing information data for the third tx bd of a given packet. */
+/* The parsing information data for the third tx bd of a given packet */
 struct eth_tx_data_3rd_bd {
 	__le16 lso_mss;
 	__le16 bitfields;
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
-#define ETH_TX_DATA_3RD_BD_START_BD_MASK        0x1
-#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
-#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
-#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK	0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT	0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK		0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT	4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK	0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT	8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK	0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT	9
 	u8 tunn_l4_hdr_start_offset_w;
 	u8 tunn_hdr_size_w;
 };
 
 /* The third tx bd of a given packet */
 struct eth_tx_3rd_bd {
-	struct regpair			addr;
-	__le16				nbytes;
-	struct eth_tx_data_3rd_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_3rd_bd data;
 };
 
-/* Complementary information for the regular tx bd of a given packet. */
+/* Complementary information for the regular tx bd of a given packet */
 struct eth_tx_data_bd {
-	__le16	reserved0;
-	__le16	bitfields;
-#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
-#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
-#define ETH_TX_DATA_BD_START_BD_MASK   0x1
-#define ETH_TX_DATA_BD_START_BD_SHIFT  8
-#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
-#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+	__le16 reserved0;
+	__le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK	0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT	0
+#define ETH_TX_DATA_BD_START_BD_MASK	0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT	8
+#define ETH_TX_DATA_BD_RESERVED2_MASK	0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT	9
 	__le16 reserved3;
 };
 
 /* The common non-special TX BD ring element */
 struct eth_tx_bd {
-	struct regpair	addr;
-	__le16		nbytes;
-	struct eth_tx_data_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_bd data;
 };
 
 union eth_tx_bd_types {
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone {
 /* ETH doorbell data */
 struct eth_db_data {
 	u8 params;
-#define ETH_DB_DATA_DEST_MASK         0x3
-#define ETH_DB_DATA_DEST_SHIFT        0
-#define ETH_DB_DATA_AGG_CMD_MASK      0x3
-#define ETH_DB_DATA_AGG_CMD_SHIFT     2
-#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
-#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
-#define ETH_DB_DATA_RESERVED_MASK     0x1
-#define ETH_DB_DATA_RESERVED_SHIFT    5
-#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ETH_DB_DATA_DEST_MASK		0x3
+#define ETH_DB_DATA_DEST_SHIFT		0
+#define ETH_DB_DATA_AGG_CMD_MASK	0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT	2
+#define ETH_DB_DATA_BYPASS_EN_MASK	0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT	4
+#define ETH_DB_DATA_RESERVED_MASK	0x1
+#define ETH_DB_DATA_RESERVED_SHIFT	5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT	6
 	u8 agg_flags;
 	__le16 bd_prod;
 };
 
+/* RSS hash type */
+enum rss_hash_type {
+	RSS_HASH_TYPE_DEFAULT = 0,
+	RSS_HASH_TYPE_IPV4 = 1,
+	RSS_HASH_TYPE_TCP_IPV4 = 2,
+	RSS_HASH_TYPE_IPV6 = 3,
+	RSS_HASH_TYPE_TCP_IPV6 = 4,
+	RSS_HASH_TYPE_UDP_IPV4 = 5,
+	RSS_HASH_TYPE_UDP_IPV6 = 6,
+	MAX_RSS_HASH_TYPE
+};
+
 #endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 12fc9e788eea..22077c586853 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -8,217 +8,78 @@
 
 #ifndef __FCOE_COMMON__
 #define __FCOE_COMMON__
+
 /*********************/
 /* FCOE FW CONSTANTS */
 /*********************/
 
 #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN	12
 
-struct fcoe_abts_pkt {
-	__le32 abts_rsp_fc_payload_lo;
-	__le16 abts_rsp_rx_id;
-	u8 abts_rsp_rctl;
-	u8 reserved2;
-};
-
-/* FCoE additional WQE (Sq/XferQ) information */
-union fcoe_additional_info_union {
-	__le32 previous_tid;
-	__le32 parent_tid;
-	__le32 burst_length;
-	__le32 seq_rec_updated_offset;
-};
-
-struct fcoe_exp_ro {
-	__le32 data_offset;
-	__le32 reserved;
-};
-
-union fcoe_cleanup_addr_exp_ro_union {
-	struct regpair abts_rsp_fc_payload_hi;
-	struct fcoe_exp_ro exp_ro;
-};
-
-/* FCoE Ramrod Command IDs */
-enum fcoe_completion_status {
-	FCOE_COMPLETION_STATUS_SUCCESS,
-	FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
-	FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
-	MAX_FCOE_COMPLETION_STATUS
-};
-
-struct fc_addr_nw {
-	u8 addr_lo;
-	u8 addr_mid;
-	u8 addr_hi;
-};
-
-/* FCoE connection offload */
-struct fcoe_conn_offload_ramrod_data {
-	struct regpair sq_pbl_addr;
-	struct regpair sq_curr_page_addr;
-	struct regpair sq_next_page_addr;
-	struct regpair xferq_pbl_addr;
-	struct regpair xferq_curr_page_addr;
-	struct regpair xferq_next_page_addr;
-	struct regpair respq_pbl_addr;
-	struct regpair respq_curr_page_addr;
-	struct regpair respq_next_page_addr;
-	__le16 dst_mac_addr_lo;
-	__le16 dst_mac_addr_mid;
-	__le16 dst_mac_addr_hi;
-	__le16 src_mac_addr_lo;
-	__le16 src_mac_addr_mid;
-	__le16 src_mac_addr_hi;
-	__le16 tx_max_fc_pay_len;
-	__le16 e_d_tov_timer_val;
-	__le16 rx_max_fc_pay_len;
-	__le16 vlan_tag;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK              0xFFF
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT             0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK                  0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT                 12
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK             0x7
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT            13
-	__le16 physical_q0;
-	__le16 rec_rr_tov_timer_val;
-	struct fc_addr_nw s_id;
-	u8 max_conc_seqs_c3;
-	struct fc_addr_nw d_id;
-	u8 flags;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK  0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK           0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT          1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK          0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT         2
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK          0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT         3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK                 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT                4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK            0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT           6
-	__le16 conn_id;
-	u8 def_q_idx;
-	u8 reserved[5];
-};
-
-/* FCoE terminate connection request */
-struct fcoe_conn_terminate_ramrod_data {
-	struct regpair terminate_params_addr;
-};
-
-struct fcoe_slow_sgl_ctx {
-	struct regpair base_sgl_addr;
-	__le16 curr_sge_off;
-	__le16 remainder_num_sges;
-	__le16 curr_sgl_index;
-	__le16 reserved;
-};
-
-union fcoe_dix_desc_ctx {
-	struct fcoe_slow_sgl_ctx dix_sgl;
-	struct scsi_sge cached_dix_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+struct protection_info_ctx {
+	__le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK		0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT	0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK		0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT		2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK	0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT	3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK	0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT	4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK	0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT	8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK		0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT		9
+	u8 dix_block_size;
+	u8 dst_size;
 };
 
-struct fcoe_fast_sgl_ctx {
-	struct regpair sgl_start_addr;
-	__le32 sgl_byte_offset;
-	__le16 task_reuse_cnt;
-	__le16 init_offset_in_first_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+union protection_info_union_ctx {
+	struct protection_info_ctx info;
+	__le32 value;
 };
 
+/* FCP CMD payload */
 struct fcoe_fcp_cmd_payload {
 	__le32 opaque[8];
 };
 
+/* FCP RSP payload */
 struct fcoe_fcp_rsp_payload {
 	__le32 opaque[6];
 };
 
-struct fcoe_fcp_xfer_payload {
-	__le32 opaque[3];
-};
-
-/* FCoE firmware function init */
-struct fcoe_init_func_ramrod_data {
-	struct scsi_init_func_params func_params;
-	struct scsi_init_func_queues q_params;
-	__le16 mtu;
-	__le16 sq_num_pages_in_pbl;
-	__le32 reserved;
-};
-
-/* FCoE: Mode of the connection: Target or Initiator or both */
-enum fcoe_mode_type {
-	FCOE_INITIATOR_MODE = 0x0,
-	FCOE_TARGET_MODE = 0x1,
-	FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
-	MAX_FCOE_MODE_TYPE
-};
-
-struct fcoe_rx_stat {
-	struct regpair fcoe_rx_byte_cnt;
-	struct regpair fcoe_rx_data_pkt_cnt;
-	struct regpair fcoe_rx_xfer_pkt_cnt;
-	struct regpair fcoe_rx_other_pkt_cnt;
-	__le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
-	__le32 fcoe_silent_drop_pkt_rq_full_cnt;
-	__le32 fcoe_silent_drop_pkt_crc_error_cnt;
-	__le32 fcoe_silent_drop_pkt_task_invalid_cnt;
-	__le32 fcoe_silent_drop_total_pkt_cnt;
-	__le32 rsrv;
-};
-
-struct fcoe_stat_ramrod_data {
-	struct regpair stat_params_addr;
-};
-
-struct protection_info_ctx {
-	__le16 flags;
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK        0x3
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT       0
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK           0x1
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT          2
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK  0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK     0xF
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT    4
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
-#define PROTECTION_INFO_CTX_RESERVED0_MASK             0x7F
-#define PROTECTION_INFO_CTX_RESERVED0_SHIFT            9
-	u8 dix_block_size;
-	u8 dst_size;
-};
-
-union protection_info_union_ctx {
-	struct protection_info_ctx info;
-	__le32 value;
-};
-
+/* FCP RSP payload */
 struct fcp_rsp_payload_padded {
 	struct fcoe_fcp_rsp_payload rsp_payload;
 	__le32 reserved[2];
 };
 
+/* FCP RSP payload */
+struct fcoe_fcp_xfer_payload {
+	__le32 opaque[3];
+};
+
+/* FCP RSP payload */
 struct fcp_xfer_payload_padded {
 	struct fcoe_fcp_xfer_payload xfer_payload;
 	__le32 reserved[5];
 };
 
+/* Task params */
 struct fcoe_tx_data_params {
 	__le32 data_offset;
 	__le32 offset_in_io;
 	u8 flags;
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK  0x1
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK           0x1
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT          1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK       0x1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT      2
-#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK           0x1F
-#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT          3
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK	0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT	0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK		0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT		1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK		0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT		2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK		0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT		3
 	u8 dif_residual;
 	__le16 seq_cnt;
 	__le16 single_sge_saved_offset;
@@ -227,6 +88,7 @@ struct fcoe_tx_data_params {
 	__le16 reserved3;
 };
 
+/* Middle path parameters: FC header fields provided by the driver */
 struct fcoe_tx_mid_path_params {
 	__le32 parameter;
 	u8 r_ctl;
@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params {
 	__le16 ox_id;
 };
 
+/* Task params */
 struct fcoe_tx_params {
 	struct fcoe_tx_data_params data;
 	struct fcoe_tx_mid_path_params mid_path;
 };
 
+/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
 union fcoe_tx_info_union_ctx {
 	struct fcoe_fcp_cmd_payload fcp_cmd_payload;
 	struct fcp_rsp_payload_padded fcp_rsp_payload;
@@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx {
 	struct fcoe_tx_params tx_params;
 };
 
+/* Data sgl */
+struct fcoe_slow_sgl_ctx {
+	struct regpair base_sgl_addr;
+	__le16 curr_sge_off;
+	__le16 remainder_num_sges;
+	__le16 curr_sgl_index;
+	__le16 reserved;
+};
+
+/* Union of DIX SGL \ cached DIX sges */
+union fcoe_dix_desc_ctx {
+	struct fcoe_slow_sgl_ctx dix_sgl;
+	struct scsi_sge cached_dix_sge;
+};
+
+/* The fcoe storm task context of Ystorm */
 struct ystorm_fcoe_task_st_ctx {
 	u8 task_type;
 	u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x1
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x7F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK	0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT	0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK		0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT		1
 	u8 cached_dix_sge;
 	u8 expect_first_xfer;
 	__le32 num_pbf_zero_write;
@@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx {
 	u8 reserved2[8];
 };
 
-struct ystorm_fcoe_task_ag_ctx {
+struct e4_ystorm_fcoe_task_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	__le16 word0;
 	u8 flags0;
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK     0xF
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT    0
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT       4
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT       5
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT       6
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT       7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK		0xF
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT		6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT		7
 	u8 flags1;
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK         0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT        0
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK         0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT        2
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK       0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT      6
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK       0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT      7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT		0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT		2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK	0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT	4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT		6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT		7
 	u8 flags2;
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT       0
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT    1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT    2
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT    3
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT    4
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT    5
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT    6
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT    7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT		0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT	7
 	u8 byte2;
 	__le32 reg0;
 	u8 byte3;
@@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx {
 	__le32 reg2;
 };
 
-struct tstorm_fcoe_task_ag_ctx {
+struct e4_tstorm_fcoe_task_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	__le16 icid;
 	u8 flags0;
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK                0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT               5
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK     0x1
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT    6
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK               0x1
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT              7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT	6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT			7
 	u8 flags1;
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT       0
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK                0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT               1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT      2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK           0x3
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT          4
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK                 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT                6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT	0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT		1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK	0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT	2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK	0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT	4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT		6
 	u8 flags2;
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK      0x3
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT     0
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT      2
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK         0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT        4
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK     0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT    6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT		0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT		2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT		4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT	6
 	u8 flags3;
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT      0
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT   2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT       3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK               0x1
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT              4
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK   0x1
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT  5
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT   6
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK      0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT     7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT		0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT	2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT		3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT			4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT	6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT		7
 	u8 flags4;
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK  0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT   1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT            2
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT            3
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT            4
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT            5
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT            6
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT            7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT	0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT	1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT		2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT		3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT		4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT		5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT		6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT		7
 	u8 cleanup_state;
 	__le16 last_sent_tid;
 	__le32 rec_rr_tov_exp_timeout;
@@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx {
 	__le32 data_offset_next;
 };
 
+/* Cached data sges */
+struct fcoe_exp_ro {
+	__le32 data_offset;
+	__le32 reserved;
+};
+
+/* Union of Cleanup address \ expected relative offsets */
+union fcoe_cleanup_addr_exp_ro_union {
+	struct regpair abts_rsp_fc_payload_hi;
+	struct fcoe_exp_ro exp_ro;
+};
+
+/* Fields coppied from ABTSrsp pckt */
+struct fcoe_abts_pkt {
+	__le32 abts_rsp_fc_payload_lo;
+	__le16 abts_rsp_rx_id;
+	u8 abts_rsp_rctl;
+	u8 reserved2;
+};
+
+/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
 struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
 	union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
 	__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK        0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK        0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT	0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT	1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK		0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT	2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT	3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT	4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT	5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK		0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT	6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK		0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT		8
 	__le16 seq_cnt;
 	u8 seq_id;
 	u8 ooo_rx_seq_id;
@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
 	__le16 reserved1;
 };
 
+/* FW read only part The fcoe task storm context of Tstorm */
 struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
 	u8 task_type;
 	u8 dev_type;
@@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
 	__le32 rsrv;
 };
 
+/** The fcoe task storm context of Tstorm */
 struct tstorm_fcoe_task_st_ctx {
 	struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
 	struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
 };
 
-struct mstorm_fcoe_task_ag_ctx {
+struct e4_mstorm_fcoe_task_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	__le16 icid;
 	u8 flags0;
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK    0xF
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT   0
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT      4
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK         0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT        5
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK               0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT              6
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK               0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT              7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK		0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT		5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT			6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT			7
 	u8 flags1;
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK      0x3
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT     0
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK                0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT               2
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK                0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT               4
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK   0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT  6
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK              0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT             7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK		0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT		0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK			0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT			2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK			0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT			4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK	0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT	6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT			7
 	u8 flags2;
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK              0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT             0
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT           1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT           2
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT           3
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT           4
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT           5
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK  0x1
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT           7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT			0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT		1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT		2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT		3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT		4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT		5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK	0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT	6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT		7
 	u8 cleanup_state;
 	__le32 received_bytes;
 	u8 byte3;
@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx {
 	__le32 reg2;
 };
 
+/* The fcoe task storm context of Mstorm */
 struct mstorm_fcoe_task_st_ctx {
 	struct regpair rsp_buf_addr;
 	__le32 rsrv[2];
@@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx {
 	__le32 data_buffer_offset;
 	__le16 parent_id;
 	__le16 flags;
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK     0xF
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT    0
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK        0x3
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT       4
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK           0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT          6
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK  0x1
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK        0x3
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT       8
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK    0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT   11
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK         0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT        12
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK           0x1
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT          13
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK              0x3
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT             14
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK		0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT		0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK		0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT		4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT		6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK	0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT	7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK		0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT		8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK	0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT	10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT	11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT		12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT		13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK			0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT			14
 	struct scsi_cached_sges data_desc;
 };
 
-struct ustorm_fcoe_task_ag_ctx {
+struct e4_ustorm_fcoe_task_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	__le16 icid;
 	u8 flags0;
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK             0x1
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT            5
-#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT             6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK			0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT			6
 	u8 flags1;
-#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT             0
-#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT             2
-#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT             4
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK     0x3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT    6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT		0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT		2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT		4
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK	0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT	6
 	u8 flags2;
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT           0
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT           1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT           2
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT           3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK  0x1
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT         5
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT         6
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT         7
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT			0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT			1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT	4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT		5
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT		6
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT		7
 	u8 flags3;
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT         0
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT         1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT         2
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT         3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK   0xF
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT  4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT	0
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT	1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT	2
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT	3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK	0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT	4
 	__le32 dif_err_intervals;
 	__le32 dif_error_1st_interval;
 	__le32 global_cq_num;
@@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx {
 	__le32 reg5;
 };
 
-struct fcoe_task_context {
+/* FCoE task context */
+struct e4_fcoe_task_context {
 	struct ystorm_fcoe_task_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2];
 	struct tdif_task_context tdif_context;
-	struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
-	struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+	struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+	struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
 	struct timers_context timer_context;
 	struct tstorm_fcoe_task_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
-	struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+	struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
 	struct mstorm_fcoe_task_st_ctx mstorm_st_context;
-	struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
 	struct rdif_task_context rdif_context;
 };
 
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+	__le32 previous_tid;
+	__le32 parent_tid;
+	__le32 burst_length;
+	__le32 seq_rec_updated_offset;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+	FCOE_COMPLETION_STATUS_SUCCESS,
+	FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+	FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+	MAX_FCOE_COMPLETION_STATUS
+};
+
+/* FC address (SID/DID) network presentation */
+struct fc_addr_nw {
+	u8 addr_lo;
+	u8 addr_mid;
+	u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+	struct regpair sq_pbl_addr;
+	struct regpair sq_curr_page_addr;
+	struct regpair sq_next_page_addr;
+	struct regpair xferq_pbl_addr;
+	struct regpair xferq_curr_page_addr;
+	struct regpair xferq_next_page_addr;
+	struct regpair respq_pbl_addr;
+	struct regpair respq_curr_page_addr;
+	struct regpair respq_next_page_addr;
+	__le16 dst_mac_addr_lo;
+	__le16 dst_mac_addr_mid;
+	__le16 dst_mac_addr_hi;
+	__le16 src_mac_addr_lo;
+	__le16 src_mac_addr_mid;
+	__le16 src_mac_addr_hi;
+	__le16 tx_max_fc_pay_len;
+	__le16 e_d_tov_timer_val;
+	__le16 rx_max_fc_pay_len;
+	__le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK	0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT	0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT		12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK	0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT	13
+	__le16 physical_q0;
+	__le16 rec_rr_tov_timer_val;
+	struct fc_addr_nw s_id;
+	u8 max_conc_seqs_c3;
+	struct fc_addr_nw d_id;
+	u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK	0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT	0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT		1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT		2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT		3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK	0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT	4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK			0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT		5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT		7
+	__le16 conn_id;
+	u8 def_q_idx;
+	u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+	struct regpair terminate_params_addr;
+};
+
+/* FCoE device type */
+enum fcoe_device_type {
+	FCOE_TASK_DEV_TYPE_DISK,
+	FCOE_TASK_DEV_TYPE_TAPE,
+	MAX_FCOE_DEVICE_TYPE
+};
+
+/* Data sgl */
+struct fcoe_fast_sgl_ctx {
+	struct regpair sgl_start_addr;
+	__le32 sgl_byte_offset;
+	__le16 task_reuse_cnt;
+	__le16 init_offset_in_first_sge;
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+	struct scsi_init_func_params func_params;
+	struct scsi_init_func_queues q_params;
+	__le16 mtu;
+	__le16 sq_num_pages_in_pbl;
+	__le32 reserved[3];
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+	FCOE_INITIATOR_MODE = 0x0,
+	FCOE_TARGET_MODE = 0x1,
+	FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+	MAX_FCOE_MODE_TYPE
+};
+
+/* Per PF FCoE receive path statistics - tStorm RAM structure */
+struct fcoe_rx_stat {
+	struct regpair fcoe_rx_byte_cnt;
+	struct regpair fcoe_rx_data_pkt_cnt;
+	struct regpair fcoe_rx_xfer_pkt_cnt;
+	struct regpair fcoe_rx_other_pkt_cnt;
+	__le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+	__le32 fcoe_silent_drop_pkt_rq_full_cnt;
+	__le32 fcoe_silent_drop_pkt_crc_error_cnt;
+	__le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+	__le32 fcoe_silent_drop_total_pkt_cnt;
+	__le32 rsrv;
+};
+
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+	SEND_FCOE_CMD,
+	SEND_FCOE_MIDPATH,
+	SEND_FCOE_ABTS_REQUEST,
+	FCOE_EXCHANGE_CLEANUP,
+	FCOE_SEQUENCE_RECOVERY,
+	SEND_FCOE_XFER_RDY,
+	SEND_FCOE_RSP,
+	SEND_FCOE_RSP_WITH_SENSE_DATA,
+	SEND_FCOE_TARGET_DATA,
+	SEND_FCOE_INITIATOR_DATA,
+	SEND_FCOE_XFER_CONTINUATION_RDY,
+	SEND_FCOE_TARGET_ABTS_RSP,
+	MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+/* FCoe statistics request */
+struct fcoe_stat_ramrod_data {
+	struct regpair stat_params_addr;
+};
+
+/* FCoE task type */
+enum fcoe_task_type {
+	FCOE_TASK_TYPE_WRITE_INITIATOR,
+	FCOE_TASK_TYPE_READ_INITIATOR,
+	FCOE_TASK_TYPE_MIDPATH,
+	FCOE_TASK_TYPE_UNSOLICITED,
+	FCOE_TASK_TYPE_ABTS,
+	FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+	FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+	FCOE_TASK_TYPE_WRITE_TARGET,
+	FCOE_TASK_TYPE_READ_TARGET,
+	FCOE_TASK_TYPE_RSP,
+	FCOE_TASK_TYPE_RSP_SENSE_DATA,
+	FCOE_TASK_TYPE_ABTS_TARGET,
+	FCOE_TASK_TYPE_ENUM_SIZE,
+	MAX_FCOE_TASK_TYPE
+};
+
+/* Per PF FCoE transmit path statistics - pStorm RAM structure */
 struct fcoe_tx_stat {
 	struct regpair fcoe_tx_byte_cnt;
 	struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +690,55 @@ struct fcoe_tx_stat {
 	struct regpair fcoe_tx_other_pkt_cnt;
 };
 
+/* FCoE SQ/XferQ element */
 struct fcoe_wqe {
 	__le16 task_id;
 	__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK       0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT      0
-#define FCOE_WQE_SGL_MODE_MASK       0x1
-#define FCOE_WQE_SGL_MODE_SHIFT      4
-#define FCOE_WQE_CONTINUATION_MASK   0x1
-#define FCOE_WQE_CONTINUATION_SHIFT  5
-#define FCOE_WQE_SEND_AUTO_RSP_MASK  0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
-#define FCOE_WQE_RESERVED_MASK       0x1
-#define FCOE_WQE_RESERVED_SHIFT      7
-#define FCOE_WQE_NUM_SGES_MASK       0xF
-#define FCOE_WQE_NUM_SGES_SHIFT      8
-#define FCOE_WQE_RESERVED1_MASK      0xF
-#define FCOE_WQE_RESERVED1_SHIFT     12
+#define FCOE_WQE_REQ_TYPE_MASK		0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT		0
+#define FCOE_WQE_SGL_MODE_MASK		0x1
+#define FCOE_WQE_SGL_MODE_SHIFT		4
+#define FCOE_WQE_CONTINUATION_MASK	0x1
+#define FCOE_WQE_CONTINUATION_SHIFT	5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK	0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT	6
+#define FCOE_WQE_RESERVED_MASK		0x1
+#define FCOE_WQE_RESERVED_SHIFT		7
+#define FCOE_WQE_NUM_SGES_MASK		0xF
+#define FCOE_WQE_NUM_SGES_SHIFT		8
+#define FCOE_WQE_RESERVED1_MASK		0xF
+#define FCOE_WQE_RESERVED1_SHIFT	12
 	union fcoe_additional_info_union additional_info_union;
 };
 
+/* FCoE XFRQ element */
 struct xfrqe_prot_flags {
 	u8 flags;
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK             0x1
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT            4
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK          0x3
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT         5
-#define XFRQE_PROT_FLAGS_RESERVED_MASK                0x1
-#define XFRQE_PROT_FLAGS_RESERVED_SHIFT               7
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK	0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT	0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK		0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT		4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK		0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT		5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK			0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT			7
 };
 
+/* FCoE doorbell data */
 struct fcoe_db_data {
 	u8 params;
-#define FCOE_DB_DATA_DEST_MASK         0x3
-#define FCOE_DB_DATA_DEST_SHIFT        0
-#define FCOE_DB_DATA_AGG_CMD_MASK      0x3
-#define FCOE_DB_DATA_AGG_CMD_SHIFT     2
-#define FCOE_DB_DATA_BYPASS_EN_MASK    0x1
-#define FCOE_DB_DATA_BYPASS_EN_SHIFT   4
-#define FCOE_DB_DATA_RESERVED_MASK     0x1
-#define FCOE_DB_DATA_RESERVED_SHIFT    5
-#define FCOE_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define FCOE_DB_DATA_DEST_MASK		0x3
+#define FCOE_DB_DATA_DEST_SHIFT		0
+#define FCOE_DB_DATA_AGG_CMD_MASK	0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT	2
+#define FCOE_DB_DATA_BYPASS_EN_MASK	0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT	4
+#define FCOE_DB_DATA_RESERVED_MASK	0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT	5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT	6
 	u8 agg_flags;
 	__le16 sq_prod;
 };
+
 #endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 85e086cba639..4cc9b37b8d95 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -32,47 +32,48 @@
 
 #ifndef __ISCSI_COMMON__
 #define __ISCSI_COMMON__
+
 /**********************/
 /* ISCSI FW CONSTANTS */
 /**********************/
 
 /* iSCSI HSI constants */
-#define ISCSI_DEFAULT_MTU       (1500)
+#define ISCSI_DEFAULT_MTU	(1500)
 
 /* KWQ (kernel work queue) layer codes */
-#define ISCSI_SLOW_PATH_LAYER_CODE   (6)
+#define ISCSI_SLOW_PATH_LAYER_CODE	(6)
 
 /* iSCSI parameter defaults */
-#define ISCSI_DEFAULT_HEADER_DIGEST         (0)
-#define ISCSI_DEFAULT_DATA_DIGEST           (0)
-#define ISCSI_DEFAULT_INITIAL_R2T           (1)
-#define ISCSI_DEFAULT_IMMEDIATE_DATA        (1)
-#define ISCSI_DEFAULT_MAX_PDU_LENGTH        (0x2000)
-#define ISCSI_DEFAULT_FIRST_BURST_LENGTH    (0x10000)
-#define ISCSI_DEFAULT_MAX_BURST_LENGTH      (0x40000)
-#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T   (1)
+#define ISCSI_DEFAULT_HEADER_DIGEST		(0)
+#define ISCSI_DEFAULT_DATA_DIGEST		(0)
+#define ISCSI_DEFAULT_INITIAL_R2T		(1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA		(1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH		(0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH	(0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH		(0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T	(1)
 
 /* iSCSI parameter limits */
-#define ISCSI_MIN_VAL_MAX_PDU_LENGTH        (0x200)
-#define ISCSI_MAX_VAL_MAX_PDU_LENGTH        (0xffffff)
-#define ISCSI_MIN_VAL_BURST_LENGTH          (0x200)
-#define ISCSI_MAX_VAL_BURST_LENGTH          (0xffffff)
-#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
-#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH		(0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH		(0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH		(0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH		(0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T	(1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T	(0xff)
 
-#define ISCSI_AHS_CNTL_SIZE 4
+#define ISCSI_AHS_CNTL_SIZE	4
 
-#define ISCSI_WQE_NUM_SGES_SLOWIO           (0xf)
+#define ISCSI_WQE_NUM_SGES_SLOWIO	(0xf)
 
 /* iSCSI reserved params */
 #define ISCSI_ITT_ALL_ONES	(0xffffffff)
 #define ISCSI_TTT_ALL_ONES	(0xffffffff)
 
-#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
-#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+#define ISCSI_OPTION_1_OFF_CHIP_TCP	1
+#define ISCSI_OPTION_2_ON_CHIP_TCP	2
 
-#define ISCSI_INITIATOR_MODE 0
-#define ISCSI_TARGET_MODE 1
+#define ISCSI_INITIATOR_MODE	0
+#define ISCSI_TARGET_MODE	1
 
 /* iSCSI request op codes */
 #define ISCSI_OPCODE_NOP_OUT		(0)
@@ -84,41 +85,48 @@
 #define ISCSI_OPCODE_LOGOUT_REQUEST	(6)
 
 /* iSCSI response/messages op codes */
-#define ISCSI_OPCODE_NOP_IN             (0x20)
-#define ISCSI_OPCODE_SCSI_RESPONSE      (0x21)
-#define ISCSI_OPCODE_TMF_RESPONSE       (0x22)
-#define ISCSI_OPCODE_LOGIN_RESPONSE     (0x23)
-#define ISCSI_OPCODE_TEXT_RESPONSE      (0x24)
-#define ISCSI_OPCODE_DATA_IN            (0x25)
-#define ISCSI_OPCODE_LOGOUT_RESPONSE    (0x26)
-#define ISCSI_OPCODE_R2T                (0x31)
-#define ISCSI_OPCODE_ASYNC_MSG          (0x32)
-#define ISCSI_OPCODE_REJECT             (0x3f)
+#define ISCSI_OPCODE_NOP_IN		(0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE	(0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE	(0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE	(0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE	(0x24)
+#define ISCSI_OPCODE_DATA_IN		(0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE	(0x26)
+#define ISCSI_OPCODE_R2T		(0x31)
+#define ISCSI_OPCODE_ASYNC_MSG		(0x32)
+#define ISCSI_OPCODE_REJECT		(0x3f)
 
 /* iSCSI stages */
-#define ISCSI_STAGE_SECURITY_NEGOTIATION            (0)
-#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION   (1)
-#define ISCSI_STAGE_FULL_FEATURE_PHASE              (3)
+#define ISCSI_STAGE_SECURITY_NEGOTIATION		(0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION	(1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE			(3)
 
 /* iSCSI CQE errors */
-#define CQE_ERROR_BITMAP_DATA_DIGEST          (0x08)
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN  (0x10)
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED       (0x20)
+#define CQE_ERROR_BITMAP_DATA_DIGEST		(0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN	(0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED		(0x20)
+
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+	__le16 bd_opaque;
+	__le16 tq_tid;
+};
 
+/* ISCSI SGL entry */
 struct cqe_error_bitmap {
 	u8 cqe_error_status_bits;
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK         0x7
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT        0
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK      0x1
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT     3
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK  0x1
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK   0x1
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT  5
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK        0x1
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT       6
-#define CQE_ERROR_BITMAP_RESERVED2_MASK            0x1
-#define CQE_ERROR_BITMAP_RESERVED2_SHIFT           7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK		0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT		0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK		0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT		3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK	0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT	4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK	0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT	5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK		0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT		6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK			0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT		7
 };
 
 union cqe_error_status {
@@ -126,86 +134,133 @@ union cqe_error_status {
 	struct cqe_error_bitmap error_bits;
 };
 
+/* iSCSI Login Response PDU header */
 struct data_hdr {
 	__le32 data[12];
 };
 
-struct iscsi_async_msg_hdr {
-	__le16 reserved0;
-	u8 flags_attr;
-#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK           0x7F
-#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT          0
-#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK         0x1
-#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT        7
-	u8 opcode;
-	__le32 hdr_second_dword;
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
-	struct regpair lun;
-	__le32 all_ones;
-	__le32 reserved1;
-	__le32 stat_sn;
-	__le32 exp_cmd_sn;
-	__le32 max_cmd_sn;
-	__le16 param1_rsrv;
-	u8 async_vcode;
-	u8 async_event;
-	__le16 param3_rsrv;
-	__le16 param2_rsrv;
-	__le32 reserved7;
+struct lun_mapper_addr_reserved {
+	struct regpair lun_mapper_addr;
+	u8 reserved0[8];
+};
+
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+	__le32 initial_ref_tag;
+	__le16 application_tag;
+	__le16 application_tag_mask;
+	__le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT		0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT		1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT		2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT		3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT		4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT		5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT		6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT		7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK		0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT		8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK		0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT		10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK	0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT	14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK	0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT	15
+	u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK			0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT			0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT		1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK	0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT	2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT		3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK		0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT		4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK			0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT			6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT	7
+	u8 reserved_zero[5];
+};
+
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+	struct lun_mapper_addr_reserved lun_mapper_address;
+	struct dif_on_immediate_params def_dif_conf;
+};
+
+/* Union of data/r2t sequence number */
+union iscsi_seq_num {
+	__le16 data_sn;
+	__le16 r2t_sn;
 };
 
-struct iscsi_cmd_hdr {
-	__le16 reserved1;
-	u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK		0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT	0
-#define ISCSI_CMD_HDR_RSRV_MASK		0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT	3
-#define ISCSI_CMD_HDR_WRITE_MASK	0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT	5
-#define ISCSI_CMD_HDR_READ_MASK		0x1
-#define ISCSI_CMD_HDR_READ_SHIFT	6
-#define ISCSI_CMD_HDR_FINAL_MASK	0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT	7
-	u8 hdr_first_byte;
-#define ISCSI_CMD_HDR_OPCODE_MASK	0x3F
-#define ISCSI_CMD_HDR_OPCODE_SHIFT	0
-#define ISCSI_CMD_HDR_IMM_MASK		0x1
-#define ISCSI_CMD_HDR_IMM_SHIFT		6
-#define ISCSI_CMD_HDR_RSRV1_MASK	0x1
-#define ISCSI_CMD_HDR_RSRV1_SHIFT	7
-	__le32 hdr_second_dword;
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
-	struct regpair lun;
-	__le32 itt;
-	__le32 expected_transfer_length;
-	__le32 cmd_sn;
-	__le32 exp_stat_sn;
-	__le32 cdb[4];
+/* iSCSI DIF flags */
+struct iscsi_dif_flags {
+	u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK	0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT	0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK		0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT		4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK		0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT		5
 };
 
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_state {
+	struct scsi_cached_sges data_desc;
+	struct scsi_sgl_params sgl_params;
+	__le32 exp_r2t_sn;
+	__le32 buffer_offset;
+	union iscsi_seq_num seq_num;
+	struct iscsi_dif_flags dif_flags;
+	u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK		0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT	0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK		0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT		1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK	0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT	2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK		0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT		3
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_rxmit_opt {
+	__le32 fast_rxmit_sge_offset;
+	__le32 scan_start_buffer_offset;
+	__le32 fast_rxmit_buffer_offset;
+	u8 scan_start_sgl_index;
+	u8 fast_rxmit_sgl_index;
+	__le16 reserved;
+};
+
+/* iSCSI Common PDU header */
 struct iscsi_common_hdr {
 	u8 hdr_status;
 	u8 hdr_response;
 	u8 hdr_flags;
 	u8 hdr_first_byte;
-#define ISCSI_COMMON_HDR_OPCODE_MASK         0x3F
-#define ISCSI_COMMON_HDR_OPCODE_SHIFT        0
-#define ISCSI_COMMON_HDR_IMM_MASK            0x1
-#define ISCSI_COMMON_HDR_IMM_SHIFT           6
-#define ISCSI_COMMON_HDR_RSRV_MASK           0x1
-#define ISCSI_COMMON_HDR_RSRV_SHIFT          7
+#define ISCSI_COMMON_HDR_OPCODE_MASK		0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT		0
+#define ISCSI_COMMON_HDR_IMM_MASK		0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT		6
+#define ISCSI_COMMON_HDR_RSRV_MASK		0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT		7
 	__le32 hdr_second_dword;
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun_reserved;
 	__le32 itt;
 	__le32 ttt;
@@ -215,86 +270,60 @@ struct iscsi_common_hdr {
 	__le32 data[3];
 };
 
-struct iscsi_conn_offload_params {
-	struct regpair sq_pbl_addr;
-	struct regpair r2tq_pbl_addr;
-	struct regpair xhq_pbl_addr;
-	struct regpair uhq_pbl_addr;
-	__le32 initial_ack;
-	__le16 physical_q0;
-	__le16 physical_q1;
-	u8 flags;
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK  0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK	0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT	2
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK	0x1F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT	3
-	u8 pbl_page_size_log;
-	u8 pbe_page_size_log;
-	u8 default_cq;
-	__le32 stat_sn;
-};
-
-struct iscsi_slow_path_hdr {
-	u8 op_code;
-	u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK   0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT  0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK  0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK   0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT  7
-};
-
-struct iscsi_conn_update_ramrod_params {
-	struct iscsi_slow_path_hdr hdr;
-	__le16 conn_id;
-	__le32 fw_cid;
-	u8 flags;
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK           0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT          0
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK           0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT          1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK     0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      6
-	u8 reserved0[3];
-	__le32 max_seq_size;
-	__le32 max_send_pdu_length;
-	__le32 max_recv_pdu_length;
-	__le32 first_seq_length;
+/* iSCSI Command PDU header */
+struct iscsi_cmd_hdr {
+	__le16 reserved1;
+	u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK			0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT		0
+#define ISCSI_CMD_HDR_RSRV_MASK			0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT		3
+#define ISCSI_CMD_HDR_WRITE_MASK		0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT		5
+#define ISCSI_CMD_HDR_READ_MASK			0x1
+#define ISCSI_CMD_HDR_READ_SHIFT		6
+#define ISCSI_CMD_HDR_FINAL_MASK		0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT		7
+	u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK		0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT		0
+#define ISCSI_CMD_HDR_IMM_MASK			0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT			6
+#define ISCSI_CMD_HDR_RSRV1_MASK		0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT		7
+	__le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK		0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT	24
+	struct regpair lun;
+	__le32 itt;
+	__le32 expected_transfer_length;
+	__le32 cmd_sn;
 	__le32 exp_stat_sn;
+	__le32 cdb[4];
 };
 
+/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
 struct iscsi_ext_cdb_cmd_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK          0x7
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT         0
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK          0x3
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT         3
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK         0x1
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT        5
-#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK          0x1
-#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT         6
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK         0x1
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT        7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK		0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT	0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK		0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT	3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK	0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT	5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK		0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT	6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK	0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK  0xFFFFFF
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK      0xFF
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT     24
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK		0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK		0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT		24
 	struct regpair lun;
 	__le32 itt;
 	__le32 expected_transfer_length;
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr {
 	struct scsi_sge cdb_sge;
 };
 
+/* iSCSI login request PDU header */
 struct iscsi_login_req_hdr {
 	u8 version_min;
 	u8 version_max;
 	u8 flags_attr;
-#define ISCSI_LOGIN_REQ_HDR_NSG_MASK            0x3
-#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT           0
-#define ISCSI_LOGIN_REQ_HDR_CSG_MASK            0x3
-#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT           2
-#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK           0x3
-#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT          4
-#define ISCSI_LOGIN_REQ_HDR_C_MASK              0x1
-#define ISCSI_LOGIN_REQ_HDR_C_SHIFT             6
-#define ISCSI_LOGIN_REQ_HDR_T_MASK              0x1
-#define ISCSI_LOGIN_REQ_HDR_T_SHIFT             7
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK	0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT	0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK	0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT	2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK	0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT	4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK	0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT	6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK	0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT	24
 	__le32 isid_tabc;
 	__le16 tsih;
 	__le16 isid_d;
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr {
 	__le32 reserved2[4];
 };
 
+/* iSCSI logout request PDU header */
 struct iscsi_logout_req_hdr {
 	__le16 reserved0;
 	u8 reason_code;
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr {
 	__le32 reserved4[4];
 };
 
+/* iSCSI Data-out PDU header */
 struct iscsi_data_out_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
-#define ISCSI_DATA_OUT_HDR_RSRV_MASK   0x7F
-#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT  0
-#define ISCSI_DATA_OUT_HDR_FINAL_MASK  0x1
-#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK	0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT	0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK	0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT	7
 	u8 opcode;
 	__le32 reserved2;
 	struct regpair lun;
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr {
 	__le32 reserved5;
 };
 
+/* iSCSI Data-in PDU header */
 struct iscsi_data_in_hdr {
 	u8 status_rsvd;
 	u8 reserved1;
 	u8 flags;
-#define ISCSI_DATA_IN_HDR_STATUS_MASK     0x1
-#define ISCSI_DATA_IN_HDR_STATUS_SHIFT    0
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK  0x1
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK   0x1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT  2
-#define ISCSI_DATA_IN_HDR_RSRV_MASK       0x7
-#define ISCSI_DATA_IN_HDR_RSRV_SHIFT      3
-#define ISCSI_DATA_IN_HDR_ACK_MASK        0x1
-#define ISCSI_DATA_IN_HDR_ACK_SHIFT       6
-#define ISCSI_DATA_IN_HDR_FINAL_MASK      0x1
-#define ISCSI_DATA_IN_HDR_FINAL_SHIFT     7
+#define ISCSI_DATA_IN_HDR_STATUS_MASK		0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT		0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK	0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT	1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK		0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT	2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK		0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT		3
+#define ISCSI_DATA_IN_HDR_ACK_MASK		0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT		6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK		0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT		7
 	u8 opcode;
 	__le32 reserved2;
 	struct regpair lun;
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr {
 	__le32 residual_count;
 };
 
+/* iSCSI R2T PDU header */
 struct iscsi_r2t_hdr {
 	u8 reserved0[3];
 	u8 opcode;
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr {
 	__le32 desired_data_trns_len;
 };
 
+/* iSCSI NOP-out PDU header */
 struct iscsi_nop_out_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
-#define ISCSI_NOP_OUT_HDR_RSRV_MASK    0x7F
-#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT   0
-#define ISCSI_NOP_OUT_HDR_CONST1_MASK  0x1
-#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK	0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT	0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK	0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT	7
 	u8 opcode;
 	__le32 reserved2;
 	struct regpair lun;
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr {
 	__le32 reserved6;
 };
 
+/* iSCSI NOP-in PDU header */
 struct iscsi_nop_in_hdr {
 	__le16 reserved0;
 	u8 flags_attr;
-#define ISCSI_NOP_IN_HDR_RSRV_MASK           0x7F
-#define ISCSI_NOP_IN_HDR_RSRV_SHIFT          0
-#define ISCSI_NOP_IN_HDR_CONST1_MASK         0x1
-#define ISCSI_NOP_IN_HDR_CONST1_SHIFT        7
+#define ISCSI_NOP_IN_HDR_RSRV_MASK	0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT	0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK	0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 ttt;
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr {
 	__le32 reserved7;
 };
 
+/* iSCSI Login Response PDU header */
 struct iscsi_login_response_hdr {
 	u8 version_active;
 	u8 version_max;
 	u8 flags_attr;
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK            0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT           0
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK            0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT           2
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK           0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT          4
-#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK              0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT             6
-#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK              0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT             7
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK	0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT	0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK	0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT	2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK	0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT	4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK		0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT	6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK		0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	__le32 isid_tabc;
 	__le16 tsih;
 	__le16 isid_d;
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr {
 	__le32 reserved4[2];
 };
 
+/* iSCSI Logout Response PDU header */
 struct iscsi_logout_response_hdr {
 	u8 reserved1;
 	u8 response;
 	u8 flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	__le32 reserved2[2];
 	__le32 itt;
 	__le32 reserved3;
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr {
 	__le32 reserved5[1];
 };
 
+/* iSCSI Text Request PDU header */
 struct iscsi_text_request_hdr {
 	__le16 reserved0;
 	u8 flags_attr;
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK           0x3F
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT          0
-#define ISCSI_TEXT_REQUEST_HDR_C_MASK              0x1
-#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT             6
-#define ISCSI_TEXT_REQUEST_HDR_F_MASK              0x1
-#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT             7
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK	0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT	0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK		0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT		6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK		0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT		7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 ttt;
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr {
 	__le32 reserved4[4];
 };
 
+/* iSCSI Text Response PDU header */
 struct iscsi_text_response_hdr {
 	__le16 reserved1;
 	u8 flags;
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK           0x3F
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT          0
-#define ISCSI_TEXT_RESPONSE_HDR_C_MASK              0x1
-#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT             6
-#define ISCSI_TEXT_RESPONSE_HDR_F_MASK              0x1
-#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT             7
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK	0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT	0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK		0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT		6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK		0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT		7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 ttt;
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr {
 	__le32 reserved4[3];
 };
 
+/* iSCSI TMF Request PDU header */
 struct iscsi_tmf_request_hdr {
 	__le16 reserved0;
 	u8 function;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK		0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 rtt;
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr {
 	u8 hdr_flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair reserved0;
 	__le32 itt;
 	__le32 reserved1;
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr {
 	__le32 reserved4[3];
 };
 
+/* iSCSI Response PDU header */
 struct iscsi_response_hdr {
 	u8 hdr_status;
 	u8 hdr_response;
 	u8 hdr_flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 snack_tag;
@@ -618,16 +660,17 @@ struct iscsi_response_hdr {
 	__le32 residual_count;
 };
 
+/* iSCSI Reject PDU header */
 struct iscsi_reject_hdr {
 	u8 reserved4;
 	u8 hdr_reason;
 	u8 hdr_flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair reserved0;
 	__le32 all_ones;
 	__le32 reserved2;
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr {
 	__le32 reserved3[2];
 };
 
+/* iSCSI Asynchronous Message PDU header */
+struct iscsi_async_msg_hdr {
+	__le16 reserved0;
+	u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK		0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT		0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK		0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT	7
+	u8 opcode;
+	__le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT	24
+	struct regpair lun;
+	__le32 all_ones;
+	__le32 reserved1;
+	__le32 stat_sn;
+	__le32 exp_cmd_sn;
+	__le32 max_cmd_sn;
+	__le16 param1_rsrv;
+	u8 async_vcode;
+	u8 async_event;
+	__le16 param3_rsrv;
+	__le16 param2_rsrv;
+	__le32 reserved7;
+};
+
+/* PDU header part of Ystorm task context */
 union iscsi_task_hdr {
 	struct iscsi_common_hdr common;
 	struct data_hdr data;
@@ -661,6 +733,348 @@ union iscsi_task_hdr {
 	struct iscsi_async_msg_hdr async_msg;
 };
 
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_st_ctx {
+	struct ystorm_iscsi_task_state state;
+	struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
+	union iscsi_task_hdr pdu_hdr;
+};
+
+struct e4_ystorm_iscsi_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 word0;
+	u8 flags0;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK	0xF
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT		6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT		7
+	u8 flags1;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT		0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT		2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK	0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT	4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT		6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT		7
+	u8 flags2;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT		0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 byte2;
+	__le32 TTT;
+	u8 byte3;
+	u8 byte4;
+	__le16 word1;
+};
+
+struct e4_mstorm_iscsi_task_ag_ctx {
+	u8 cdu_validation;
+	u8 byte1;
+	__le16 task_cid;
+	u8 flags0;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK	0xF
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK			0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT			6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT	7
+	u8 flags1;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK	0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT	0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK			0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT			2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK			0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT			4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT	6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK			0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT			7
+	u8 flags2;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT		0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 byte2;
+	__le32 reg0;
+	u8 byte3;
+	u8 byte4;
+	__le16 word1;
+};
+
+struct e4_ustorm_iscsi_task_ag_ctx {
+	u8 reserved;
+	u8 state;
+	__le16 icid;
+	u8 flags0;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK	0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK		0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT		6
+	u8 flags1;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK	0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT	0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK	0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT	2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT		4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK	0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT	6
+	u8 flags2;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT	0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT	1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT		2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT	4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT	5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT		6
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT	7
+	u8 flags3;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT		0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT		1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT		2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT		3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK		0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT	4
+	__le32 dif_err_intervals;
+	__le32 dif_error_1st_interval;
+	__le32 rcv_cont_len;
+	__le32 exp_cont_len;
+	__le32 total_data_acked;
+	__le32 exp_data_acked;
+	u8 next_tid_valid;
+	u8 byte3;
+	__le16 word1;
+	__le16 next_tid;
+	__le16 word3;
+	__le32 hdr_residual_count;
+	__le32 exp_r2t_sn;
+};
+
+/* The iscsi storm task context of Mstorm */
+struct mstorm_iscsi_task_st_ctx {
+	struct scsi_cached_sges data_desc;
+	struct scsi_sgl_params sgl_params;
+	__le32 rem_task_size;
+	__le32 data_buffer_offset;
+	u8 task_type;
+	struct iscsi_dif_flags dif_flags;
+	__le16 dif_task_icid;
+	struct regpair sense_db;
+	__le32 expected_itt;
+	__le32 reserved1;
+};
+
+struct iscsi_reg1 {
+	__le32 reg1_map;
+#define ISCSI_REG1_NUM_SGES_MASK	0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT	0
+#define ISCSI_REG1_RESERVED1_MASK	0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT	4
+};
+
+struct tqe_opaque {
+	__le16 opaque[2];
+};
+
+/* The iscsi storm task context of Ustorm */
+struct ustorm_iscsi_task_st_ctx {
+	__le32 rem_rcv_len;
+	__le32 exp_data_transfer_len;
+	__le32 exp_data_sn;
+	struct regpair lun;
+	struct iscsi_reg1 reg1;
+	u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT	0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK		0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT	1
+	struct iscsi_dif_flags dif_flags;
+	__le16 reserved3;
+	struct tqe_opaque tqe_opaque_list;
+	__le32 reserved5;
+	__le32 reserved6;
+	__le32 reserved7;
+	u8 task_type;
+	u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT	0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK	0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT	1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT		2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK			0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT		3
+	u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK			0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT		0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT		2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT		3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK	0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT	4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT		5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT		6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK			0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT		7
+	u8 cq_rss_number;
+};
+
+/* iscsi task context */
+struct e4_iscsi_task_context {
+	struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+	struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+	struct regpair ystorm_ag_padding[2];
+	struct tdif_task_context tdif_context;
+	struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+	struct regpair mstorm_ag_padding[2];
+	struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+	struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+	struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+	struct rdif_task_context rdif_context;
+};
+
+/* iSCSI connection offload params passed by driver to FW in ISCSI offload
+ * ramrod.
+ */
+struct iscsi_conn_offload_params {
+	struct regpair sq_pbl_addr;
+	struct regpair r2tq_pbl_addr;
+	struct regpair xhq_pbl_addr;
+	struct regpair uhq_pbl_addr;
+	__le32 initial_ack;
+	__le16 physical_q0;
+	__le16 physical_q1;
+	u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT	0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT	1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT	2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK	0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT	3
+	u8 pbl_page_size_log;
+	u8 pbe_page_size_log;
+	u8 default_cq;
+	__le32 stat_sn;
+};
+
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+	struct regpair iscsi_tcp_tx_packets_cnt;
+	struct regpair iscsi_tcp_tx_bytes_cnt;
+	struct regpair iscsi_tcp_tx_rxmit_cnt;
+	struct regpair iscsi_tcp_rx_packets_cnt;
+	struct regpair iscsi_tcp_rx_bytes_cnt;
+	struct regpair iscsi_tcp_rx_dup_ack_cnt;
+	__le32 iscsi_tcp_rx_chksum_err_cnt;
+	__le32 reserved;
+};
+
+/* spe message header */
+struct iscsi_slow_path_hdr {
+	u8 op_code;
+	u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK	0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT	0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK	0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT	4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK	0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT	7
+};
+
+/* iSCSI connection update params passed by driver to FW in ISCSI update
+ *ramrod.
+ */
+struct iscsi_conn_update_ramrod_params {
+	struct iscsi_slow_path_hdr hdr;
+	__le16 conn_id;
+	__le32 fw_cid;
+	u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK		0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT		0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK		0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT		1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT	2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT	3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT	4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT	5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT	6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT	7
+	u8 reserved0[3];
+	__le32 max_seq_size;
+	__le32 max_send_pdu_length;
+	__le32 max_recv_pdu_length;
+	__le32 first_seq_length;
+	__le32 exp_stat_sn;
+	union dif_configuration_params dif_on_imme_params;
+};
+
+/* iSCSI CQ element */
 struct iscsi_cqe_common {
 	__le16 conn_id;
 	u8 cqe_type;
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common {
 	union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 struct iscsi_cqe_solicited {
 	__le16 conn_id;
 	u8 cqe_type;
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited {
 	u8 fw_dbg_field;
 	u8 caused_conn_err;
 	u8 reserved0[3];
-	__le32 reserved1[1];
+	__le32 data_truncated_bytes;
 	union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 struct iscsi_cqe_unsolicited {
 	__le16 conn_id;
 	u8 cqe_type;
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited {
 	__le16 reserved0;
 	u8 reserved1;
 	u8 unsol_cqe_type;
-	struct regpair rqe_opaque;
+	__le16 rqe_opaque;
+	__le16 reserved2[3];
 	union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 union iscsi_cqe {
 	struct iscsi_cqe_common cqe_common;
 	struct iscsi_cqe_solicited cqe_solicited;
 	struct iscsi_cqe_unsolicited cqe_unsolicited;
 };
 
+/* iSCSI CQE type */
 enum iscsi_cqes_type {
 	ISCSI_CQE_TYPE_SOLICITED = 1,
 	ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type {
 	MAX_ISCSI_CQES_TYPE
 };
 
+/* iSCSI CQE type */
 enum iscsi_cqe_unsolicited_type {
 	ISCSI_CQE_UNSOLICITED_NONE,
 	ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type {
 	MAX_ISCSI_CQE_UNSOLICITED_TYPE
 };
 
-
+/* iscsi debug modes */
 struct iscsi_debug_modes {
 	u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK         0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT        0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK            0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT           1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT             2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK          0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT         3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK  0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
-};
-
-struct iscsi_dif_flags {
-	u8 flags;
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK             0x1
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT            4
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK          0x7
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT         5
-};
-
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT			0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT			1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT			2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT			3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK		0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT		4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT			5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK	0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT	6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT			7
+};
+
+/* iSCSI kernel completion queue IDs */
 enum iscsi_eqe_opcode {
 	ISCSI_EVENT_TYPE_INIT_FUNC = 0,
 	ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode {
 	ISCSI_EVENT_TYPE_CLEAR_SQ,
 	ISCSI_EVENT_TYPE_TERMINATE_CONN,
 	ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+	ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
 	ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
 	ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
-	RESERVED9,
 	ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
 	ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
 	ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode {
 	MAX_ISCSI_EQE_OPCODE
 };
 
+/* iSCSI EQE and CQE completion status */
 enum iscsi_error_types {
 	ISCSI_STATUS_NONE = 0,
 	ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1235,7 @@ enum iscsi_error_types {
 	MAX_ISCSI_ERROR_TYPES
 };
 
-
+/* iSCSI Ramrod Command IDs */
 enum iscsi_ramrod_cmd_id {
 	ISCSI_RAMROD_CMD_ID_UNUSED = 0,
 	ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id {
 	ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
 	ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
 	ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+	ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
 	MAX_ISCSI_RAMROD_CMD_ID
 };
 
-struct iscsi_reg1 {
-	__le32 reg1_map;
-#define ISCSI_REG1_NUM_SGES_MASK   0xF
-#define ISCSI_REG1_NUM_SGES_SHIFT  0
-#define ISCSI_REG1_RESERVED1_MASK  0xFFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 4
-};
-
-union iscsi_seq_num {
-	__le16 data_sn;
-	__le16 r2t_sn;
-};
-
+/* iSCSI connection termination request */
 struct iscsi_spe_conn_mac_update {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update {
 	u8 reserved0[2];
 };
 
+/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
 struct iscsi_spe_conn_offload {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload {
 	struct tcp_offload_params tcp;
 };
 
+/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
 struct iscsi_spe_conn_offload_option2 {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 {
 	struct tcp_offload_params_opt2 tcp;
 };
 
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+	struct iscsi_slow_path_hdr hdr;
+	__le16 conn_id;
+	__le32 fw_cid;
+	u8 reset_stats;
+	u8 reserved0[7];
+	struct regpair stats_cnts_addr;
+};
+
+/* iSCSI connection termination request */
 struct iscsi_spe_conn_termination {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination {
 	struct regpair query_params_addr;
 };
 
+/* iSCSI firmware function destroy parameters */
 struct iscsi_spe_func_dstry {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 reserved0;
 	__le32 reserved1;
 };
 
+/* iSCSI firmware function init parameters */
 struct iscsi_spe_func_init {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 half_way_close_timeout;
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init {
 	u8 num_r2tq_pages_in_ring;
 	u8 num_uhq_pages_in_ring;
 	u8 ll2_rx_queue_id;
-	u8 ooo_enable;
+	u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK	0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT	0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK	0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT	1
 	struct iscsi_debug_modes debug_mode;
 	__le16 reserved1;
 	__le32 reserved2;
-	__le32 reserved3;
-	__le32 reserved4;
 	struct scsi_init_func_params func_params;
 	struct scsi_init_func_queues q_params;
 };
 
-struct ystorm_iscsi_task_state {
-	struct scsi_cached_sges data_desc;
-	struct scsi_sgl_params sgl_params;
-	__le32 exp_r2t_sn;
-	__le32 buffer_offset;
-	union iscsi_seq_num seq_num;
-	struct iscsi_dif_flags dif_flags;
-	u8 flags;
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK  0x1
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK     0x1
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT    1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK   0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT  2
-};
-
-struct ystorm_iscsi_task_rxmit_opt {
-	__le32 fast_rxmit_sge_offset;
-	__le32 scan_start_buffer_offset;
-	__le32 fast_rxmit_buffer_offset;
-	u8 scan_start_sgl_index;
-	u8 fast_rxmit_sgl_index;
-	__le16 reserved;
-};
-
-struct ystorm_iscsi_task_st_ctx {
-	struct ystorm_iscsi_task_state state;
-	struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
-	union iscsi_task_hdr pdu_hdr;
-};
-
-struct ystorm_iscsi_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 word0;
-	u8 flags0;
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK     0xF
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT    0
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT       4
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT       5
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT      6
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT       7
-	u8 flags1;
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK         0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT        0
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK         0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT        2
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT      6
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT      7
-	u8 flags2;
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT       0
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT    1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT    2
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT    3
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT    4
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT    5
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT    6
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT    7
-	u8 byte2;
-	__le32 TTT;
-	u8 byte3;
-	u8 byte4;
-	__le16 word1;
-};
-
-struct mstorm_iscsi_task_ag_ctx {
-	u8 cdu_validation;
-	u8 byte1;
-	__le16 task_cid;
-	u8 flags0;
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT               5
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT              6
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK   0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT  7
-	u8 flags1;
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK     0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT    0
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK                 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT                2
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK                 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT                4
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK  0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT              7
-	u8 flags2;
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT              0
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT            1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT            2
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT            3
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT            4
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT            5
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT            6
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT            7
-	u8 byte2;
-	__le32 reg0;
-	u8 byte3;
-	u8 byte4;
-	__le16 word1;
-};
-
-struct ustorm_iscsi_task_ag_ctx {
-	u8 reserved;
-	u8 state;
-	__le16 icid;
-	u8 flags0;
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK        0xF
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                   0x1
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                  5
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK          0x3
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT         6
-	u8 flags1;
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK              0x3
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT             0
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK               0x3
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT              2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK                    0x3
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT                   4
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK           0x3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT          6
-	u8 flags2;
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK       0x1
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT      0
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK     0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT    1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK            0x1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT           2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK                  0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT                 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK        0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT       4
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK  0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT               6
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK    0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT   7
-	u8 flags3;
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT               0
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT               1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT               2
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT               3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK         0xF
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT        4
-	__le32 dif_err_intervals;
-	__le32 dif_error_1st_interval;
-	__le32 rcv_cont_len;
-	__le32 exp_cont_len;
-	__le32 total_data_acked;
-	__le32 exp_data_acked;
-	u8 next_tid_valid;
-	u8 byte3;
-	__le16 word1;
-	__le16 next_tid;
-	__le16 word3;
-	__le32 hdr_residual_count;
-	__le32 exp_r2t_sn;
-};
-
-struct mstorm_iscsi_task_st_ctx {
-	struct scsi_cached_sges data_desc;
-	struct scsi_sgl_params sgl_params;
-	__le32 rem_task_size;
-	__le32 data_buffer_offset;
-	u8 task_type;
-	struct iscsi_dif_flags dif_flags;
-	u8 reserved0[2];
-	struct regpair sense_db;
-	__le32 expected_itt;
-	__le32 reserved1;
-};
-
-struct ustorm_iscsi_task_st_ctx {
-	__le32 rem_rcv_len;
-	__le32 exp_data_transfer_len;
-	__le32 exp_data_sn;
-	struct regpair lun;
-	struct iscsi_reg1 reg1;
-	u8 flags2;
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK             0x1
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
-	struct iscsi_dif_flags dif_flags;
-	__le16 reserved3;
-	__le32 reserved4;
-	__le32 reserved5;
-	__le32 reserved6;
-	__le32 reserved7;
-	u8 task_type;
-	u8 error_flags;
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK     0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT    0
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK  0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK       0x1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT      2
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK             0x1F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT            3
-	u8 flags;
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK             0x3
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT            0
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK            0x1
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK  0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT       5
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT            7
-	u8 cq_rss_number;
-};
-
-struct iscsi_task_context {
-	struct ystorm_iscsi_task_st_ctx ystorm_st_context;
-	struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
-	struct regpair ystorm_ag_padding[2];
-	struct tdif_task_context tdif_context;
-	struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
-	struct regpair mstorm_ag_padding[2];
-	struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
-	struct mstorm_iscsi_task_st_ctx mstorm_st_context;
-	struct ustorm_iscsi_task_st_ctx ustorm_st_context;
-	struct rdif_task_context rdif_context;
-};
-
+/* iSCSI task type */
 enum iscsi_task_type {
 	ISCSI_TASK_TYPE_INITIATOR_WRITE,
 	ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1342,57 @@ enum iscsi_task_type {
 	ISCSI_TASK_TYPE_TARGET_READ,
 	ISCSI_TASK_TYPE_TARGET_RESPONSE,
 	ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+	ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
 	MAX_ISCSI_TASK_TYPE
 };
 
+/* iSCSI DesiredDataTransferLength/ttt union */
 union iscsi_ttt_txlen_union {
 	__le32 desired_tx_len;
 	__le32 ttt;
 };
 
+/* iSCSI uHQ element */
 struct iscsi_uhqe {
 	__le32 reg1;
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK     0xFFFFF
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT    0
-#define ISCSI_UHQE_LOCAL_COMP_MASK          0x1
-#define ISCSI_UHQE_LOCAL_COMP_SHIFT         20
-#define ISCSI_UHQE_TOGGLE_BIT_MASK          0x1
-#define ISCSI_UHQE_TOGGLE_BIT_SHIFT         21
-#define ISCSI_UHQE_PURE_PAYLOAD_MASK        0x1
-#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT       22
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK  0x1
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
-#define ISCSI_UHQE_TASK_ID_HI_MASK          0xFF
-#define ISCSI_UHQE_TASK_ID_HI_SHIFT         24
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK		0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT	0
+#define ISCSI_UHQE_LOCAL_COMP_MASK		0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT		20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK		0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT		21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK		0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT		22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK	0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT	23
+#define ISCSI_UHQE_TASK_ID_HI_MASK		0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT		24
 	__le32 reg2;
-#define ISCSI_UHQE_BUFFER_OFFSET_MASK       0xFFFFFF
-#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT      0
-#define ISCSI_UHQE_TASK_ID_LO_MASK          0xFF
-#define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK	0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT	0
+#define ISCSI_UHQE_TASK_ID_LO_MASK	0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT	24
 };
 
-
+/* iSCSI WQ element */
 struct iscsi_wqe {
 	__le16 task_id;
 	u8 flags;
-#define ISCSI_WQE_WQE_TYPE_MASK        0x7
-#define ISCSI_WQE_WQE_TYPE_SHIFT       0
-#define ISCSI_WQE_NUM_SGES_MASK  0xF
-#define ISCSI_WQE_NUM_SGES_SHIFT 3
-#define ISCSI_WQE_RESPONSE_MASK        0x1
-#define ISCSI_WQE_RESPONSE_SHIFT       7
+#define ISCSI_WQE_WQE_TYPE_MASK		0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT	0
+#define ISCSI_WQE_NUM_SGES_MASK		0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT	3
+#define ISCSI_WQE_RESPONSE_MASK		0x1
+#define ISCSI_WQE_RESPONSE_SHIFT	7
 	struct iscsi_dif_flags prot_flags;
 	__le32 contlen_cdbsize;
-#define ISCSI_WQE_CONT_LEN_MASK  0xFFFFFF
-#define ISCSI_WQE_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_CDB_SIZE_MASK  0xFF
-#define ISCSI_WQE_CDB_SIZE_SHIFT 24
+#define ISCSI_WQE_CONT_LEN_MASK		0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT	0
+#define ISCSI_WQE_CDB_SIZE_MASK		0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT	24
 };
 
+/* iSCSI wqe type */
 enum iscsi_wqe_type {
 	ISCSI_WQE_TYPE_NORMAL,
 	ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type {
 	MAX_ISCSI_WQE_TYPE
 };
 
+/* iSCSI xHQ element */
 struct iscsi_xhqe {
 	union iscsi_ttt_txlen_union ttt_or_txlen;
 	__le32 exp_stat_sn;
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe {
 	u8 total_ahs_length;
 	u8 opcode;
 	u8 flags;
-#define ISCSI_XHQE_FINAL_MASK       0x1
-#define ISCSI_XHQE_FINAL_SHIFT      0
-#define ISCSI_XHQE_STATUS_BIT_MASK  0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
-#define ISCSI_XHQE_NUM_SGES_MASK    0xF
-#define ISCSI_XHQE_NUM_SGES_SHIFT   2
-#define ISCSI_XHQE_RESERVED0_MASK   0x3
-#define ISCSI_XHQE_RESERVED0_SHIFT  6
+#define ISCSI_XHQE_FINAL_MASK		0x1
+#define ISCSI_XHQE_FINAL_SHIFT		0
+#define ISCSI_XHQE_STATUS_BIT_MASK	0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT	1
+#define ISCSI_XHQE_NUM_SGES_MASK	0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT	2
+#define ISCSI_XHQE_RESERVED0_MASK	0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT	6
 	union iscsi_seq_num seq_num;
 	__le16 reserved1;
 };
 
+/* Per PF iSCSI receive path statistics - mStorm RAM structure */
 struct mstorm_iscsi_stats_drv {
 	struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+	struct regpair iscsi_rx_dup_ack_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
 struct pstorm_iscsi_stats_drv {
 	struct regpair iscsi_tx_bytes_cnt;
 	struct regpair iscsi_tx_packet_cnt;
 };
 
+/* Per PF iSCSI receive path statistics - tStorm RAM structure */
 struct tstorm_iscsi_stats_drv {
 	struct regpair iscsi_rx_bytes_cnt;
 	struct regpair iscsi_rx_packet_cnt;
 	struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+	struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+	struct regpair iscsi_rx_tcp_pkt_cnt;
+	struct regpair iscsi_rx_pure_ack_cnt;
 	__le32 iscsi_cmdq_threshold_cnt;
 	__le32 iscsi_rq_threshold_cnt;
 	__le32 iscsi_immq_threshold_cnt;
 };
 
+/* Per PF iSCSI receive path statistics - uStorm RAM structure */
 struct ustorm_iscsi_stats_drv {
 	struct regpair iscsi_rx_data_pdu_cnt;
 	struct regpair iscsi_rx_r2t_pdu_cnt;
 	struct regpair iscsi_rx_total_pdu_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
 struct xstorm_iscsi_stats_drv {
 	struct regpair iscsi_tx_go_to_slow_start_event_cnt;
 	struct regpair iscsi_tx_fast_retransmit_event_cnt;
+	struct regpair iscsi_tx_pure_ack_cnt;
+	struct regpair iscsi_tx_delayed_ack_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
 struct ystorm_iscsi_stats_drv {
 	struct regpair iscsi_tx_data_pdu_cnt;
 	struct regpair iscsi_tx_r2t_pdu_cnt;
 	struct regpair iscsi_tx_total_pdu_cnt;
+	struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+	struct regpair iscsi_tx_tcp_pkt_cnt;
 };
 
-struct tstorm_iscsi_task_ag_ctx {
+struct e4_tstorm_iscsi_task_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	__le16 word0;
 	u8 flags0;
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK  0xF
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT    4
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT    5
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT    6
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT    7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK	0xF
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT		6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT		7
 	u8 flags1;
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT    0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT    1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT     2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT     4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT     6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT	1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT	6
 	u8 flags2;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT     0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT     2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT     4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT     6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT	6
 	u8 flags3;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT     0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT   2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT   3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT   4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT   5
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT   6
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT   7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT	3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT	5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT	6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT   0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT   1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT		0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT		1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT	3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT	5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT	6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT	7
 	u8 byte2;
 	__le16 word1;
 	__le32 reg0;
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx {
 	__le32 reg1;
 	__le32 reg2;
 };
+
+/* iSCSI doorbell data */
 struct iscsi_db_data {
 	u8 params;
-#define ISCSI_DB_DATA_DEST_MASK         0x3
-#define ISCSI_DB_DATA_DEST_SHIFT        0
-#define ISCSI_DB_DATA_AGG_CMD_MASK      0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT     2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK    0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT   4
-#define ISCSI_DB_DATA_RESERVED_MASK     0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT    5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ISCSI_DB_DATA_DEST_MASK		0x3
+#define ISCSI_DB_DATA_DEST_SHIFT	0
+#define ISCSI_DB_DATA_AGG_CMD_MASK	0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT	2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK	0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT	4
+#define ISCSI_DB_DATA_RESERVED_MASK	0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT	5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT	6
 	u8 agg_flags;
 	__le16 sq_prod;
 };
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index b8b3e1cfae90..c6cfd39cd910 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -29,9 +29,12 @@
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+
 #ifndef __IWARP_COMMON__
 #define __IWARP_COMMON__
+
 #include <linux/qed/rdma_common.h>
+
 /************************/
 /* IWARP FW CONSTANTS	*/
 /************************/
@@ -40,14 +43,14 @@
 #define IWARP_PASSIVE_MODE 1
 
 #define IWARP_SHARED_QUEUE_PAGE_SIZE		(0x8000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET   (0x4000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET   (0x5000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET	(0x4000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE	(0x1000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET	(0x5000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE	(0x3000)
 
-#define IWARP_REQ_MAX_INLINE_DATA_SIZE          (128)
-#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE        (176)
+#define IWARP_REQ_MAX_INLINE_DATA_SIZE		(128)
+#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE	(176)
 
-#define IWARP_MAX_QPS                           (64 * 1024)
+#define IWARP_MAX_QPS				(64 * 1024)
 
 #endif /* __IWARP_COMMON__ */
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index d60de4a39810..147d08ccf813 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
 	void *p_handle;
 };
 
+enum qed_filter_config_mode {
+	QED_FILTER_CONFIG_MODE_DISABLE,
+	QED_FILTER_CONFIG_MODE_5_TUPLE,
+	QED_FILTER_CONFIG_MODE_L4_PORT,
+	QED_FILTER_CONFIG_MODE_IP_DEST,
+};
+
+struct qed_ntuple_filter_params {
+	/* Physically mapped address containing header of buffer to be used
+	 * as filter.
+	 */
+	dma_addr_t addr;
+
+	/* Length of header in bytes */
+	u16 length;
+
+	/* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+	u16 qid;
+
+	/* Identifier can either be according to vport-id or vfid */
+	bool b_is_vf;
+	u8 vport_id;
+	u8 vf_id;
+
+	/* true iff this filter is to be added. Else to be removed */
+	bool b_is_add;
+};
+
 struct qed_dev_eth_info {
 	struct qed_dev_info common;
 
@@ -316,13 +345,12 @@ struct qed_eth_ops {
 	int (*tunn_config)(struct qed_dev *cdev,
 			   struct qed_tunn_params *params);
 
-	int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
-				    dma_addr_t mapping, u16 length,
-				    u16 vport_id, u16 rx_queue_id,
-				    bool add_filter);
+	int (*ntuple_filter_config)(struct qed_dev *cdev,
+				    void *cookie,
+				    struct qed_ntuple_filter_params *params);
 
 	int (*configure_arfs_searcher)(struct qed_dev *cdev,
-				       bool en_searcher);
+				       enum qed_filter_config_mode mode);
 	int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
 };
 
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cc646ca97974..15e398c7230e 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
 struct qed_iscsi_pf_params {
 	u64 glbl_q_params_addr;
-	u64 bdq_pbl_base_addr[2];
-	u32 max_cwnd;
+	u64 bdq_pbl_base_addr[3];
 	u16 cq_num_entries;
 	u16 cmdq_num_entries;
 	u32 two_msl_timer;
-	u16 dup_ack_threshold;
 	u16 tx_sws_timer;
-	u16 min_rto;
-	u16 min_rto_rt;
-	u16 max_rto;
 
 	/* The following parameters are used during HW-init
 	 * and these parameters need to be passed as arguments
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
 
 	/* The following parameters are used during protocol-init */
 	u16 half_way_close_timeout;
-	u16 bdq_xoff_threshold[2];
-	u16 bdq_xon_threshold[2];
+	u16 bdq_xoff_threshold[3];
+	u16 bdq_xon_threshold[3];
 	u16 cmdq_xoff_threshold;
 	u16 cmdq_xon_threshold;
 	u16 rq_buffer_size;
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
 	u8 gl_cmd_pi;
 	u8 debug_mode;
 	u8 ll2_ooo_queue_id;
-	u8 ooo_enable;
 
 	u8 is_target;
-	u8 bdq_pbl_num_entries[2];
+	u8 is_soc_en;
+	u8 soc_num_of_blocks_log;
+	u8 bdq_pbl_num_entries[3];
 };
 
 struct qed_rdma_pf_params {
@@ -316,16 +312,16 @@ enum qed_int_mode {
 };
 
 struct qed_sb_info {
-	struct status_block	*sb_virt;
-	dma_addr_t		sb_phys;
-	u32			sb_ack; /* Last given ack */
-	u16			igu_sb_id;
-	void __iomem		*igu_addr;
-	u8			flags;
-#define QED_SB_INFO_INIT        0x1
-#define QED_SB_INFO_SETUP       0x2
+	struct status_block_e4 *sb_virt;
+	dma_addr_t sb_phys;
+	u32 sb_ack; /* Last given ack */
+	u16 igu_sb_id;
+	void __iomem *igu_addr;
+	u8 flags;
+#define QED_SB_INFO_INIT	0x1
+#define QED_SB_INFO_SETUP	0x2
 
-	struct qed_dev		*cdev;
+	struct qed_dev *cdev;
 };
 
 enum qed_dev_type {
@@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
 	u16 rc = 0;
 
 	prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
-	       STATUS_BLOCK_PROD_INDEX_MASK;
+	       STATUS_BLOCK_E4_PROD_INDEX_MASK;
 	if (sb_info->sb_ack != prod) {
 		sb_info->sb_ack = prod;
 		rc |= QED_SB_IDX;
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 111e606a74c8..d0df1bec5357 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
 	u32 ss_thresh;
 	u16 srtt;
 	u16 rtt_var;
-	u32 ts_time;
 	u32 ts_recent;
 	u32 ts_recent_age;
 	u32 total_rt;
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
 	u16 mss;
 	u8 snd_wnd_scale;
 	u8 rcv_wnd_scale;
-	u32 ts_ticks_per_second;
 	u16 da_timeout_value;
 	u8 ack_frequency;
 };
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index e755954d85fd..266c1fb45387 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
 	u32 opaque_data_1;
 
 	/* GSI only */
-	u32 gid_dst[4];
+	u32 src_qp;
 	u16 qp_id;
 
 	union {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index a9b3050f469c..c1a446ebe362 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -32,28 +32,29 @@
 
 #ifndef __RDMA_COMMON__
 #define __RDMA_COMMON__
+
 /************************/
 /* RDMA FW CONSTANTS */
 /************************/
 
-#define RDMA_RESERVED_LKEY                      (0)
-#define RDMA_RING_PAGE_SIZE                     (0x1000)
+#define RDMA_RESERVED_LKEY		(0)
+#define RDMA_RING_PAGE_SIZE		(0x1000)
 
-#define RDMA_MAX_SGE_PER_SQ_WQE         (4)
-#define RDMA_MAX_SGE_PER_RQ_WQE         (4)
+#define RDMA_MAX_SGE_PER_SQ_WQE		(4)
+#define RDMA_MAX_SGE_PER_RQ_WQE		(4)
 
 #define RDMA_MAX_DATA_SIZE_IN_WQE	(0x80000000)
 
-#define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
-#define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE	(0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE	(0x20)
 
-#define RDMA_MAX_CQS                            (64 * 1024)
-#define RDMA_MAX_TIDS                           (128 * 1024 - 1)
-#define RDMA_MAX_PDS                            (64 * 1024)
+#define RDMA_MAX_CQS			(64 * 1024)
+#define RDMA_MAX_TIDS			(128 * 1024 - 1)
+#define RDMA_MAX_PDS			(64 * 1024)
 
-#define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS	MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2	MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB	MAX_NUM_VPORTS_BB
 
 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
 
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index fe6a33e45977..e15e0da71240 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -33,13 +33,18 @@
 #ifndef __ROCE_COMMON__
 #define __ROCE_COMMON__
 
-#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
-#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+/************************/
+/* ROCE FW CONSTANTS */
+/************************/
 
-#define ROCE_MAX_QPS	(32 * 1024)
-#define ROCE_DCQCN_NP_MAX_QPS	(64)
-#define ROCE_DCQCN_RP_MAX_QPS	(64)
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE	(256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE	(288)
 
+#define ROCE_MAX_QPS			(32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS		(64)
+#define ROCE_DCQCN_RP_MAX_QPS		(64)
+
+/* Affiliated asynchronous events / errors enumeration */
 enum roce_async_events_type {
 	ROCE_ASYNC_EVENT_NONE = 0,
 	ROCE_ASYNC_EVENT_COMM_EST = 1,
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 08df82a096b6..505c0b48a761 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -33,43 +33,77 @@
 #ifndef __STORAGE_COMMON__
 #define __STORAGE_COMMON__
 
-#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
-#define BDQ_NUM_RESOURCES (4)
-
-#define BDQ_ID_RQ                        (0)
-#define BDQ_ID_IMM_DATA          (1)
-#define BDQ_NUM_IDS          (2)
-
-#define SCSI_NUM_SGES_SLOW_SGL_THR      8
+/*********************/
+/* SCSI CONSTANTS */
+/*********************/
+
+#define SCSI_MAX_NUM_OF_CMDQS		(NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES		(4)
+
+#define BDQ_ID_RQ			(0)
+#define BDQ_ID_IMM_DATA			(1)
+#define BDQ_ID_TQ			(2)
+#define BDQ_NUM_IDS			(3)
+
+#define SCSI_NUM_SGES_SLOW_SGL_THR	8
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE	BIT(15)
+
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE	(0x89)
+#define SCSI_OPCODE_READ_10		(0x28)
+#define SCSI_OPCODE_WRITE_6		(0x0A)
+#define SCSI_OPCODE_WRITE_10		(0x2A)
+#define SCSI_OPCODE_WRITE_12		(0xAA)
+#define SCSI_OPCODE_WRITE_16		(0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10	(0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12	(0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16	(0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+	__le16 reserved_zero[3];
+	__le16 opaque;
+};
 
-#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+	struct regpair fcoe_opaque;
+	struct iscsi_drv_opaque iscsi_opaque;
+};
 
+/* SCSI buffer descriptor */
 struct scsi_bd {
 	struct regpair address;
-	struct regpair opaque;
+	union scsi_opaque opaque;
 };
 
+/* Scsi Drv BDQ struct */
 struct scsi_bdq_ram_drv_data {
 	__le16 external_producer;
 	__le16 reserved0[3];
 };
 
+/* SCSI SGE entry */
 struct scsi_sge {
 	struct regpair sge_addr;
 	__le32 sge_len;
 	__le32 reserved;
 };
 
+/* Cached SGEs section */
 struct scsi_cached_sges {
 	struct scsi_sge sge[4];
 };
 
+/* Scsi Drv CMDQ struct */
 struct scsi_drv_cmdq {
 	__le16 cmdq_cons;
 	__le16 reserved0;
 	__le32 reserved1;
 };
 
+/* Common SCSI init params passed by driver to FW in function init ramrod */
 struct scsi_init_func_params {
 	__le16 num_tasks;
 	u8 log_page_size;
@@ -77,6 +111,7 @@ struct scsi_init_func_params {
 	u8 reserved2[12];
 };
 
+/* SCSI RQ/CQ/CMDQ firmware function init parameters */
 struct scsi_init_func_queues {
 	struct regpair glbl_q_params_addr;
 	__le16 rq_buffer_size;
@@ -84,39 +119,45 @@ struct scsi_init_func_queues {
 	__le16 cmdq_num_entries;
 	u8 bdq_resource_id;
 	u8 q_validity;
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK        0x1
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT       0
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK  0x1
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK       0x1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT      2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK  0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT			0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK		0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT		1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT			2
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT			3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT			4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK	0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT	5
+	__le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
 	u8 num_queues;
 	u8 queue_relative_offset;
 	u8 cq_sb_pi;
 	u8 cmdq_sb_pi;
-	__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
-	__le16 reserved0;
 	u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+	u8 reserved1;
 	struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
 	__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
-	__le16 bdq_xon_threshold[BDQ_NUM_IDS];
 	__le16 cmdq_xoff_threshold;
+	__le16 bdq_xon_threshold[BDQ_NUM_IDS];
 	__le16 cmdq_xon_threshold;
-	__le32 reserved1;
 };
 
+/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
 struct scsi_ram_per_bdq_resource_drv_data {
 	struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
 };
 
+/* SCSI SGL types */
 enum scsi_sgl_mode {
 	SCSI_TX_SLOW_SGL,
 	SCSI_FAST_SGL,
 	MAX_SCSI_SGL_MODE
 };
 
+/* SCSI SGL parameters */
 struct scsi_sgl_params {
 	struct regpair sgl_addr;
 	__le32 sgl_total_length;
@@ -126,10 +167,16 @@ struct scsi_sgl_params {
 	u8 reserved;
 };
 
+/* SCSI terminate connection params */
 struct scsi_terminate_extra_params {
 	__le16 unsolicited_cq_count;
 	__le16 cmdq_count;
 	u8 reserved[4];
 };
 
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+	__le16 itid;
+};
+
 #endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dbf7a43c3e1f..4a4845193539 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -33,8 +33,13 @@
 #ifndef __TCP_COMMON__
 #define __TCP_COMMON__
 
-#define TCP_INVALID_TIMEOUT_VAL -1
+/********************/
+/* TCP FW CONSTANTS */
+/********************/
 
+#define TCP_INVALID_TIMEOUT_VAL	-1
+
+/* OOO opaque data received from LL2 */
 struct ooo_opaque {
 	__le32 cid;
 	u8 drop_isle;
@@ -43,25 +48,29 @@ struct ooo_opaque {
 	u8 ooo_isle;
 };
 
+/* tcp connect mode enum */
 enum tcp_connect_mode {
 	TCP_CONNECT_ACTIVE,
 	TCP_CONNECT_PASSIVE,
 	MAX_TCP_CONNECT_MODE
 };
 
+/* tcp function init parameters */
 struct tcp_init_params {
 	__le32 two_msl_timer;
 	__le16 tx_sws_timer;
-	u8 maxfinrt;
+	u8 max_fin_rt;
 	u8 reserved[9];
 };
 
+/* tcp IPv4/IPv6 enum */
 enum tcp_ip_version {
 	TCP_IPV4,
 	TCP_IPV6,
 	MAX_TCP_IP_VERSION
 };
 
+/* tcp offload parameters */
 struct tcp_offload_params {
 	__le16 local_mac_addr_lo;
 	__le16 local_mac_addr_mid;
@@ -70,24 +79,29 @@ struct tcp_offload_params {
 	__le16 remote_mac_addr_mid;
 	__le16 remote_mac_addr_hi;
 	__le16 vlan_id;
-	u8 flags;
-#define TCP_OFFLOAD_PARAMS_TS_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT        0
-#define TCP_OFFLOAD_PARAMS_DA_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT        1
-#define TCP_OFFLOAD_PARAMS_KA_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT        2
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT     3
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK     0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT    4
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT     5
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK  0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK     0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT    7
+	__le16 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK			0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT			0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK			0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT			1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK			0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT			2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT		3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT	4
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT		5
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT		6
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT		7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT		8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK		0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT		9
 	u8 ip_version;
+	u8 reserved0[3];
 	__le32 remote_ip[4];
 	__le32 local_ip[4];
 	__le32 flow_label;
@@ -99,17 +113,21 @@ struct tcp_offload_params {
 	u8 rcv_wnd_scale;
 	u8 connect_mode;
 	__le16 srtt;
-	__le32 cwnd;
 	__le32 ss_thresh;
-	__le16 reserved1;
+	__le32 rcv_wnd;
+	__le32 cwnd;
 	u8 ka_max_probe_cnt;
 	u8 dup_ack_theshold;
+	__le16 reserved1;
+	__le32 ka_timeout;
+	__le32 ka_interval;
+	__le32 max_rt_time;
+	__le32 initial_rcv_wnd;
 	__le32 rcv_next;
 	__le32 snd_una;
 	__le32 snd_next;
 	__le32 snd_max;
 	__le32 snd_wnd;
-	__le32 rcv_wnd;
 	__le32 snd_wl1;
 	__le32 ts_recent;
 	__le32 ts_recent_age;
@@ -122,16 +140,13 @@ struct tcp_offload_params {
 	u8 rt_cnt;
 	__le16 rtt_var;
 	__le16 fw_internal;
-	__le32 ka_timeout;
-	__le32 ka_interval;
-	__le32 max_rt_time;
-	__le32 initial_rcv_wnd;
 	u8 snd_wnd_scale;
 	u8 ack_frequency;
 	__le16 da_timeout_value;
-	__le32 reserved3[2];
+	__le32 reserved3;
 };
 
+/* tcp offload parameters */
 struct tcp_offload_params_opt2 {
 	__le16 local_mac_addr_lo;
 	__le16 local_mac_addr_mid;
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
 	__le16 remote_mac_addr_mid;
 	__le16 remote_mac_addr_hi;
 	__le16 vlan_id;
-	u8 flags;
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT     0
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT     1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT     2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK  0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+	__le16 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT	0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT	1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT	2
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT	3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK	0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT	4
 	u8 ip_version;
+	u8 reserved1[3];
 	__le32 remote_ip[4];
 	__le32 local_ip[4];
 	__le32 flow_label;
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
 	__le16 syn_ip_payload_length;
 	__le32 syn_phy_addr_lo;
 	__le32 syn_phy_addr_hi;
-	__le32 reserved1[22];
+	__le32 cwnd;
+	u8 ka_max_probe_cnt;
+	u8 reserved2[3];
+	__le32 ka_timeout;
+	__le32 ka_interval;
+	__le32 max_rt_time;
+	__le32 reserved3[16];
 };
 
+/* tcp IPv4/IPv6 enum */
 enum tcp_seg_placement_event {
 	TCP_EVENT_ADD_PEN,
 	TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event {
 	MAX_TCP_SEG_PLACEMENT_EVENT
 };
 
+/* tcp init parameters */
 struct tcp_update_params {
 	__le16 flags;
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK   0x1
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT  0
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK               0x1
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT              1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK               0x1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT              2
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK         0x1
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT        3
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK        0x1
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT       4
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK       0x1
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT      5
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK       0x1
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT      6
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK        0x1
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT       7
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK   0x1
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT  8
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK  0x1
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK             0x1
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT            10
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK          0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT         11
-#define TCP_UPDATE_PARAMS_KA_EN_MASK                     0x1
-#define TCP_UPDATE_PARAMS_KA_EN_SHIFT                    12
-#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK                  0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT                 13
-#define TCP_UPDATE_PARAMS_KA_RESTART_MASK                0x1
-#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT               14
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK        0x1
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT       15
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT		0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT			1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT			2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT		3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT		4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT		5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT		6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT		7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT		8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT	9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT			10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT		11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK				0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT				12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK				0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT			13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK			0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT			14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK		0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT		15
 	__le16 remote_mac_addr_lo;
 	__le16 remote_mac_addr_mid;
 	__le16 remote_mac_addr_hi;
@@ -226,6 +252,7 @@ struct tcp_update_params {
 	u8 reserved1[7];
 };
 
+/* toe upload parameters */
 struct tcp_upload_params {
 	__le32 rcv_next;
 	__le32 snd_una;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 361c08e35dbc..c9df2527e0cd 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -207,6 +207,7 @@ struct rhashtable_iter {
 	struct rhashtable_walker walker;
 	unsigned int slot;
 	unsigned int skip;
+	bool end_of_table;
 };
 
 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
 	return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
 }
 
-static inline unsigned int rht_key_hashfn(
-	struct rhashtable *ht, const struct bucket_table *tbl,
-	const void *key, const struct rhashtable_params params)
+static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+	const void *key, const struct rhashtable_params params,
+	unsigned int hash_rnd)
 {
 	unsigned int hash;
 
 	/* params must be equal to ht->p if it isn't constant. */
 	if (!__builtin_constant_p(params.key_len))
-		hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+		hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
 	else if (params.key_len) {
 		unsigned int key_len = params.key_len;
 
 		if (params.hashfn)
-			hash = params.hashfn(key, key_len, tbl->hash_rnd);
+			hash = params.hashfn(key, key_len, hash_rnd);
 		else if (key_len & (sizeof(u32) - 1))
-			hash = jhash(key, key_len, tbl->hash_rnd);
+			hash = jhash(key, key_len, hash_rnd);
 		else
-			hash = jhash2(key, key_len / sizeof(u32),
-				      tbl->hash_rnd);
+			hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
 	} else {
 		unsigned int key_len = ht->p.key_len;
 
 		if (params.hashfn)
-			hash = params.hashfn(key, key_len, tbl->hash_rnd);
+			hash = params.hashfn(key, key_len, hash_rnd);
 		else
-			hash = jhash(key, key_len, tbl->hash_rnd);
+			hash = jhash(key, key_len, hash_rnd);
 	}
 
+	return hash;
+}
+
+static inline unsigned int rht_key_hashfn(
+	struct rhashtable *ht, const struct bucket_table *tbl,
+	const void *key, const struct rhashtable_params params)
+{
+	unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
+
 	return rht_bucket_index(tbl, hash);
 }
 
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
 void rhashtable_walk_enter(struct rhashtable *ht,
 			   struct rhashtable_iter *iter);
 void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+
+static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+	(void)rhashtable_walk_start_check(iter);
+}
+
 void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void *rhashtable_walk_peek(struct rhashtable_iter *iter);
 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
 
 void rhashtable_free_and_destroy(struct rhashtable *ht,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 1eadec3fc228..1fdcde96eb65 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
 
 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
-			 gfp_t flags, int *new_nsid);
+			 gfp_t flags, int *new_nsid, int new_ifindex);
 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
 				       unsigned change, u32 event,
-				       gfp_t flags, int *new_nsid);
+				       gfp_t flags, int *new_nsid,
+				       int new_ifindex);
 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
 		       gfp_t flags);
 
@@ -96,13 +97,9 @@ void rtnetlink_init(void);
 void __rtnl_unlock(void);
 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
 
-#define ASSERT_RTNL() do { \
-	if (unlikely(!rtnl_is_locked())) { \
-		printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
-		       __FILE__,  __LINE__); \
-		dump_stack(); \
-	} \
-} while(0)
+#define ASSERT_RTNL() \
+	WARN_ONCE(!rtnl_is_locked(), \
+		  "RTNL: assertion failed at %s (%d)\n", __FILE__,  __LINE__)
 
 extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
 			     struct netlink_callback *cb,
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index da803dfc7a39..b36c76635f18 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -102,11 +102,15 @@ enum sctp_cid {
 	/* AUTH Extension Section 4.1 */
 	SCTP_CID_AUTH			= 0x0F,
 
+	/* sctp ndata 5.1. I-DATA */
+	SCTP_CID_I_DATA			= 0x40,
+
 	/* PR-SCTP Sec 3.2 */
 	SCTP_CID_FWD_TSN		= 0xC0,
 
 	/* Use hex, as defined in ADDIP sec. 3.1 */
 	SCTP_CID_ASCONF			= 0xC1,
+	SCTP_CID_I_FWD_TSN		= 0xC2,
 	SCTP_CID_ASCONF_ACK		= 0x80,
 	SCTP_CID_RECONF			= 0x82,
 }; /* enum */
@@ -240,6 +244,23 @@ struct sctp_data_chunk {
 	struct sctp_datahdr data_hdr;
 };
 
+struct sctp_idatahdr {
+	__be32 tsn;
+	__be16 stream;
+	__be16 reserved;
+	__be32 mid;
+	union {
+		__u32 ppid;
+		__be32 fsn;
+	};
+	__u8 payload[0];
+};
+
+struct sctp_idata_chunk {
+	struct sctp_chunkhdr chunk_hdr;
+	struct sctp_idatahdr data_hdr;
+};
+
 /* DATA Chuck Specific Flags */
 enum {
 	SCTP_DATA_MIDDLE_FRAG	= 0x00,
@@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk {
 	struct sctp_fwdtsn_hdr fwdtsn_hdr;
 };
 
+struct sctp_ifwdtsn_skip {
+	__be16 stream;
+	__u8 reserved;
+	__u8 flags;
+	__be32 mid;
+};
+
+struct sctp_ifwdtsn_hdr {
+	__be32 new_cum_tsn;
+	struct sctp_ifwdtsn_skip skip[0];
+};
+
+struct sctp_ifwdtsn_chunk {
+	struct sctp_chunkhdr chunk_hdr;
+	struct sctp_ifwdtsn_hdr fwdtsn_hdr;
+};
 
 /* ADDIP
  * Section 3.1.1 Address Configuration Change Chunk (ASCONF)
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index d4bb46a26dc3..531031a15cdd 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -76,6 +76,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device
 	return container_of(d, struct serdev_device_driver, driver);
 }
 
+enum serdev_parity {
+	SERDEV_PARITY_NONE,
+	SERDEV_PARITY_EVEN,
+	SERDEV_PARITY_ODD,
+};
+
 /*
  * serdev controller structures
  */
@@ -86,6 +92,7 @@ struct serdev_controller_ops {
 	int (*open)(struct serdev_controller *);
 	void (*close)(struct serdev_controller *);
 	void (*set_flow_control)(struct serdev_controller *, bool);
+	int (*set_parity)(struct serdev_controller *, enum serdev_parity);
 	unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
 	void (*wait_until_sent)(struct serdev_controller *, long);
 	int (*get_tiocm)(struct serdev_controller *);
@@ -299,6 +306,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl
 		return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
 }
 
+int serdev_device_set_parity(struct serdev_device *serdev,
+			     enum serdev_parity parity);
+
 /*
  * serdev hooks into TTY core
  */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 4a906f560817..e724d5a3dd80 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -3,7 +3,7 @@
 
 #include <linux/phy.h>
 
-struct __packed sfp_eeprom_base {
+struct sfp_eeprom_base {
 	u8 phys_id;
 	u8 phys_ext_id;
 	u8 connector;
@@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base {
 	char vendor_rev[4];
 	union {
 		__be16 optical_wavelength;
-		u8 cable_spec;
-	};
+		__be16 cable_compliance;
+		struct {
+#if defined __BIG_ENDIAN_BITFIELD
+			u8 reserved60_2:6;
+			u8 fc_pi_4_app_h:1;
+			u8 sff8431_app_e:1;
+			u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+			u8 sff8431_app_e:1;
+			u8 fc_pi_4_app_h:1;
+			u8 reserved60_2:6;
+			u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+		} __packed passive;
+		struct {
+#if defined __BIG_ENDIAN_BITFIELD
+			u8 reserved60_4:4;
+			u8 fc_pi_4_lim:1;
+			u8 sff8431_lim:1;
+			u8 fc_pi_4_app_h:1;
+			u8 sff8431_app_e:1;
+			u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+			u8 sff8431_app_e:1;
+			u8 fc_pi_4_app_h:1;
+			u8 sff8431_lim:1;
+			u8 fc_pi_4_lim:1;
+			u8 reserved60_4:4;
+			u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+		} __packed active;
+	} __packed;
 	u8 reserved62;
 	u8 cc_base;
-};
+} __packed;
 
-struct __packed sfp_eeprom_ext {
+struct sfp_eeprom_ext {
 	__be16 options;
 	u8 br_max;
 	u8 br_min;
@@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext {
 	u8 enhopts;
 	u8 sff8472_compliance;
 	u8 cc_ext;
-};
-
-struct __packed sfp_eeprom_id {
+} __packed;
+
+/**
+ * struct sfp_eeprom_id - raw SFP module identification information
+ * @base: base SFP module identification structure
+ * @ext: extended SFP module identification structure
+ *
+ * See the SFF-8472 specification and related documents for the definition
+ * of these structure members. This can be obtained from
+ * ftp://ftp.seagate.com/sff
+ */
+struct sfp_eeprom_id {
 	struct sfp_eeprom_base base;
 	struct sfp_eeprom_ext ext;
-};
+} __packed;
 
 /* SFP EEPROM registers */
 enum {
@@ -222,6 +265,7 @@ enum {
 	SFP_SFF8472_COMPLIANCE		= 0x5e,
 	SFP_CC_EXT			= 0x5f,
 
+	SFP_PHYS_ID_SFF			= 0x02,
 	SFP_PHYS_ID_SFP			= 0x03,
 	SFP_PHYS_EXT_ID_SFP		= 0x04,
 	SFP_CONNECTOR_UNSPEC		= 0x00,
@@ -347,19 +391,32 @@ enum {
 	SFP_PAGE			= 0x7f,
 };
 
-struct device_node;
+struct fwnode_handle;
 struct ethtool_eeprom;
 struct ethtool_modinfo;
 struct net_device;
 struct sfp_bus;
 
+/**
+ * struct sfp_upstream_ops - upstream operations structure
+ * @module_insert: called after a module has been detected to determine
+ *   whether the module is supported for the upstream device.
+ * @module_remove: called after the module has been removed.
+ * @link_down: called when the link is non-operational for whatever
+ *   reason.
+ * @link_up: called when the link is operational.
+ * @connect_phy: called when an I2C accessible PHY has been detected
+ *   on the module.
+ * @disconnect_phy: called when a module with an I2C accessible PHY has
+ *   been removed.
+ */
 struct sfp_upstream_ops {
-	int (*module_insert)(void *, const struct sfp_eeprom_id *id);
-	void (*module_remove)(void *);
-	void (*link_down)(void *);
-	void (*link_up)(void *);
-	int (*connect_phy)(void *, struct phy_device *);
-	void (*disconnect_phy)(void *);
+	int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
+	void (*module_remove)(void *priv);
+	void (*link_down)(void *priv);
+	void (*link_up)(void *priv);
+	int (*connect_phy)(void *priv, struct phy_device *);
+	void (*disconnect_phy)(void *priv);
 };
 
 #if IS_ENABLED(CONFIG_SFP)
@@ -375,7 +432,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
 			  u8 *data);
 void sfp_upstream_start(struct sfp_bus *bus);
 void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
 				      struct net_device *ndev, void *upstream,
 				      const struct sfp_upstream_ops *ops);
 void sfp_unregister_upstream(struct sfp_bus *bus);
@@ -419,7 +476,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
 {
 }
 
-static inline struct sfp_bus *sfp_register_upstream(struct device_node *np,
+static inline struct sfp_bus *sfp_register_upstream(
+	struct fwnode_handle *fwnode,
 	struct net_device *ndev, void *upstream,
 	const struct sfp_upstream_ops *ops)
 {
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 94081e9a5010..6dfda97a6c1a 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -5,12 +5,9 @@
 #include <linux/phy.h>
 #include <linux/if_ether.h>
 
-enum {EDMAC_LITTLE_ENDIAN};
-
 struct sh_eth_plat_data {
 	int phy;
 	int phy_irq;
-	int edmac_endian;
 	phy_interface_t phy_interface;
 	void (*set_mdio_gate)(void *addr);
 
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 8621ffdeecbf..a6b6e8bb3d7b 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -69,7 +69,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb
  */
 static inline bool __skb_array_empty(struct skb_array *a)
 {
-	return !__ptr_ring_peek(&a->ring);
+	return __ptr_ring_empty(&a->ring);
+}
+
+static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
+{
+	return __ptr_ring_peek(&a->ring);
 }
 
 static inline bool skb_array_empty(struct skb_array *a)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a87e43d16f44..ac89a93b7c83 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
 				  data, proto, nhoff, hlen, flags);
 }
 
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+			     struct flow_dissector *flow_dissector,
+			     void *target_container);
+
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
 	if (!skb->l4_hash && !skb->sw_hash)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3bf273538840..4894d322d258 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+			   size_t max_size, unsigned int cpu_mult,
+			   gfp_t gfp);
+
+void free_bucket_spinlocks(spinlock_t *locks);
+
 #endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a6361389b..8f4c54986f97 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -335,6 +335,17 @@ struct tcp_sock {
 
 	int			linger2;
 
+
+/* Sock_ops bpf program related variables */
+#ifdef CONFIG_BPF
+	u8	bpf_sock_ops_cb_flags;  /* Control calling BPF programs
+					 * values defined in uapi/linux/tcp.h
+					 */
+#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
+#else
+#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
+#endif
+
 /* Receiver side RTT estimation */
 	struct {
 		u32	rtt_us;
@@ -344,7 +355,7 @@ struct tcp_sock {
 
 /* Receiver queue space */
 	struct {
-		int	space;
+		u32	space;
 		u32	seq;
 		u64	time;
 	} rcvq_space;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af44e7c2d577..8a1442c4e513 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
 void perf_event_detach_bpf_prog(struct perf_event *event);
+int perf_event_query_prog_array(struct perf_event *event, void __user *info);
 #else
 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 {
@@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
 
 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
 
+static inline int
+perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 enum {
@@ -528,6 +534,7 @@ do {									\
 struct perf_event;
 
 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+DECLARE_PER_CPU(int, bpf_kprobe_override);
 
 extern int  perf_trace_init(struct perf_event *event);
 extern void perf_trace_destroy(struct perf_event *event);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index fd08df74c466..6ed9692f20bd 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -86,7 +86,7 @@ struct tc_action_ops {
 	int     (*act)(struct sk_buff *, const struct tc_action *,
 		       struct tcf_result *);
 	int     (*dump)(struct sk_buff *, struct tc_action *, int, int);
-	void	(*cleanup)(struct tc_action *, int bind);
+	void	(*cleanup)(struct tc_action *);
 	int     (*lookup)(struct net *, struct tc_action **, u32);
 	int     (*init)(struct net *net, struct nlattr *nla,
 			struct nlattr *est, struct tc_action **act, int ovr,
@@ -120,12 +120,19 @@ int tc_action_net_init(struct tc_action_net *tn,
 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
 			 struct tcf_idrinfo *idrinfo);
 
-static inline void tc_action_net_exit(struct tc_action_net *tn)
+static inline void tc_action_net_exit(struct list_head *net_list,
+				      unsigned int id)
 {
+	struct net *net;
+
 	rtnl_lock();
-	tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+	list_for_each_entry(net, net_list, exit_list) {
+		struct tc_action_net *tn = net_generic(net, id);
+
+		tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+		kfree(tn->idrinfo);
+	}
 	rtnl_unlock();
-	kfree(tn->idrinfo);
 }
 
 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b623b65a79d1..c4185a7b0e90 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -180,7 +180,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
  */
 int ipv6_addr_label_init(void);
 void ipv6_addr_label_cleanup(void);
-void ipv6_addr_label_rtnl_register(void);
+int ipv6_addr_label_rtnl_register(void);
 u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
 		    int type, int ifindex);
 
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fe328c52c46b..801489bb14c3 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
  */
 int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
 
+static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
+{
+	u8 tmp;
+
+	cfpkt_extr_head(pkt, &tmp, 1);
+
+	return tmp;
+}
+
+static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
+{
+	__le16 tmp;
+
+	cfpkt_extr_head(pkt, &tmp, 2);
+
+	return le16_to_cpu(tmp);
+}
+
+static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
+{
+	__le32 tmp;
+
+	cfpkt_extr_head(pkt, &tmp, 4);
+
+	return le32_to_cpu(tmp);
+}
+
 /*
  * Peek header from packet.
  * Reads data from packet without changing packet.
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fb94a8bd8ab5..81174f9b8d14 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1775,6 +1775,8 @@ enum cfg80211_signal_type {
  *	by %parent_bssid.
  * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
  *	the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  */
 struct cfg80211_inform_bss {
 	struct ieee80211_channel *chan;
@@ -1783,6 +1785,8 @@ struct cfg80211_inform_bss {
 	u64 boottime_ns;
 	u64 parent_tsf;
 	u8 parent_bssid[ETH_ALEN] __aligned(2);
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
 };
 
 /**
@@ -1826,6 +1830,8 @@ struct cfg80211_bss_ies {
  *	that holds the beacon data. @beacon_ies is still valid, of course, and
  *	points to the same data as hidden_beacon_bss->beacon_ies in that case.
  * @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
  */
 struct cfg80211_bss {
@@ -1844,6 +1850,8 @@ struct cfg80211_bss {
 	u16 capability;
 
 	u8 bssid[ETH_ALEN];
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
 
 	u8 priv[0] __aligned(sizeof(void *));
 };
@@ -2023,6 +2031,9 @@ struct cfg80211_disassoc_request {
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
  *	will be used in ht_capa.  Un-supported values will be ignored.
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * 	CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
  */
 struct cfg80211_ibss_params {
 	const u8 *ssid;
@@ -2039,6 +2050,8 @@ struct cfg80211_ibss_params {
 	int mcast_rate[NUM_NL80211_BANDS];
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
+	struct key_params *wep_keys;
+	int wep_tx_key;
 };
 
 /**
@@ -5577,7 +5590,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
  * cfg80211_rx_mgmt - notification of received, unprocessed management frame
  * @wdev: wireless device receiving the frame
  * @freq: Frequency on which the frame was received in MHz
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
  * @buf: Management frame (header + body)
  * @len: length of the frame data
  * @flags: flags, as defined in enum nl80211_rxmgmt_flags
@@ -5756,7 +5769,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
  * @frame: the frame
  * @len: length of the frame
  * @freq: frequency the frame was received on
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
  *
  * Use this function to report to userspace when a beacon was
  * received. It is not useful to call this when there is no
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9654e133599..6545b03e97f7 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -26,10 +26,12 @@ struct devlink {
 	struct list_head port_list;
 	struct list_head sb_list;
 	struct list_head dpipe_table_list;
+	struct list_head resource_list;
 	struct devlink_dpipe_headers *dpipe_headers;
 	const struct devlink_ops *ops;
 	struct device *dev;
 	possible_net_t _net;
+	struct mutex lock;
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops;
  * @counters_enabled: indicates if counters are active
  * @counter_control_extern: indicates if counter control is in dpipe or
  *			    external tool
+ * @resource_valid: Indicate that the resource id is valid
+ * @resource_id: relative resource this table is related to
+ * @resource_units: number of resource's unit consumed per table's entry
  * @table_ops: table operations
  * @rcu: rcu
  */
@@ -190,6 +195,9 @@ struct devlink_dpipe_table {
 	const char *name;
 	bool counters_enabled;
 	bool counter_control_extern;
+	bool resource_valid;
+	u64 resource_id;
+	u64 resource_units;
 	struct devlink_dpipe_table_ops *table_ops;
 	struct rcu_head rcu;
 };
@@ -223,7 +231,63 @@ struct devlink_dpipe_headers {
 	unsigned int headers_count;
 };
 
+/**
+ * struct devlink_resource_ops - resource ops
+ * @occ_get: get the occupied size
+ * @size_validate: validate the size of the resource before update, reload
+ *                 is needed for changes to take place
+ */
+struct devlink_resource_ops {
+	u64 (*occ_get)(struct devlink *devlink);
+	int (*size_validate)(struct devlink *devlink, u64 size,
+			     struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_resource_size_params - resource's size parameters
+ * @size_min: minimum size which can be set
+ * @size_max: maximum size which can be set
+ * @size_granularity: size granularity
+ * @size_unit: resource's basic unit
+ */
+struct devlink_resource_size_params {
+	u64 size_min;
+	u64 size_max;
+	u64 size_granularity;
+	enum devlink_resource_unit unit;
+};
+
+/**
+ * struct devlink_resource - devlink resource
+ * @name: name of the resource
+ * @id: id, per devlink instance
+ * @size: size of the resource
+ * @size_new: updated size of the resource, reload is needed
+ * @size_valid: valid in case the total size of the resource is valid
+ *              including its children
+ * @parent: parent resource
+ * @size_params: size parameters
+ * @list: parent list
+ * @resource_list: list of child resources
+ * @resource_ops: resource ops
+ */
+struct devlink_resource {
+	const char *name;
+	u64 id;
+	u64 size;
+	u64 size_new;
+	bool size_valid;
+	struct devlink_resource *parent;
+	struct devlink_resource_size_params *size_params;
+	struct list_head list;
+	struct list_head resource_list;
+	const struct devlink_resource_ops *resource_ops;
+};
+
+#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+
 struct devlink_ops {
+	int (*reload)(struct devlink *devlink);
 	int (*port_type_set)(struct devlink_port *devlink_port,
 			     enum devlink_port_type port_type);
 	int (*port_split)(struct devlink *devlink, unsigned int port_index,
@@ -332,6 +396,23 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
 extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
 extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
 
+int devlink_resource_register(struct devlink *devlink,
+			      const char *resource_name,
+			      bool top_hierarchy,
+			      u64 resource_size,
+			      u64 resource_id,
+			      u64 parent_resource_id,
+			      struct devlink_resource_size_params *size_params,
+			      const struct devlink_resource_ops *resource_ops);
+void devlink_resources_unregister(struct devlink *devlink,
+				  struct devlink_resource *resource);
+int devlink_resource_size_get(struct devlink *devlink,
+			      u64 resource_id,
+			      u64 *p_resource_size);
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+				     const char *table_name, u64 resource_id,
+				     u64 resource_units);
+
 #else
 
 static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
@@ -468,6 +549,40 @@ devlink_dpipe_match_put(struct sk_buff *skb,
 	return 0;
 }
 
+static inline int
+devlink_resource_register(struct devlink *devlink,
+			  const char *resource_name,
+			  bool top_hierarchy,
+			  u64 resource_size,
+			  u64 resource_id,
+			  u64 parent_resource_id,
+			  struct devlink_resource_size_params *size_params,
+			  const struct devlink_resource_ops *resource_ops)
+{
+	return 0;
+}
+
+static inline void
+devlink_resources_unregister(struct devlink *devlink,
+			     struct devlink_resource *resource)
+{
+}
+
+static inline int
+devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
+			  u64 *p_resource_size)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_dpipe_table_resource_set(struct devlink *devlink,
+				 const char *table_name, u64 resource_id,
+				 u64 resource_units)
+{
+	return -EOPNOTSUPP;
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 55df9939bca2..342d2503cba5 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -69,6 +69,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
  */
 struct dn_route {
 	struct dst_entry dst;
+	struct dn_route __rcu *dn_next;
 
 	struct neighbour *n;
 
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2a05738570d8..6cb602dd970c 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,31 +296,39 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
 	return mask;
 }
 
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+/* Return the local port used to reach an arbitrary switch port */
+static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
+					    int port)
 {
-	struct dsa_switch_tree *dst = ds->dst;
-
-	/*
-	 * If this is the root switch (i.e. the switch that connects
-	 * to the CPU), return the cpu port number on this switch.
-	 * Else return the (DSA) port number that connects to the
-	 * switch that is one hop closer to the cpu.
-	 */
-	if (dst->cpu_dp->ds == ds)
-		return dst->cpu_dp->index;
+	if (device == ds->index)
+		return port;
 	else
-		return ds->rtable[dst->cpu_dp->ds->index];
+		return ds->rtable[device];
+}
+
+/* Return the local port used to reach the dedicated CPU port */
+static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
+{
+	const struct dsa_port *dp = dsa_to_port(ds, port);
+	const struct dsa_port *cpu_dp = dp->cpu_dp;
+
+	if (!cpu_dp)
+		return port;
+
+	return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
 }
 
 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
 			      bool is_static, void *data);
 struct dsa_switch_ops {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 	/*
 	 * Legacy probing.
 	 */
 	const char	*(*probe)(struct device *dsa_dev,
 				  struct device *host_dev, int sw_addr,
 				  void **priv);
+#endif
 
 	enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
 						  int port);
@@ -412,12 +420,10 @@ struct dsa_switch_ops {
 	 */
 	int	(*port_vlan_filtering)(struct dsa_switch *ds, int port,
 				       bool vlan_filtering);
-	int	(*port_vlan_prepare)(struct dsa_switch *ds, int port,
-				     const struct switchdev_obj_port_vlan *vlan,
-				     struct switchdev_trans *trans);
-	void	(*port_vlan_add)(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_vlan *vlan,
-				 struct switchdev_trans *trans);
+	int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
+				 const struct switchdev_obj_port_vlan *vlan);
+	void (*port_vlan_add)(struct dsa_switch *ds, int port,
+			      const struct switchdev_obj_port_vlan *vlan);
 	int	(*port_vlan_del)(struct dsa_switch *ds, int port,
 				 const struct switchdev_obj_port_vlan *vlan);
 	/*
@@ -433,12 +439,10 @@ struct dsa_switch_ops {
 	/*
 	 * Multicast database
 	 */
-	int	(*port_mdb_prepare)(struct dsa_switch *ds, int port,
-				    const struct switchdev_obj_port_mdb *mdb,
-				    struct switchdev_trans *trans);
-	void	(*port_mdb_add)(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_mdb *mdb,
-				struct switchdev_trans *trans);
+	int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+				const struct switchdev_obj_port_mdb *mdb);
+	void (*port_mdb_add)(struct dsa_switch *ds, int port,
+			     const struct switchdev_obj_port_mdb *mdb);
 	int	(*port_mdb_del)(struct dsa_switch *ds, int port,
 				const struct switchdev_obj_port_mdb *mdb);
 	/*
@@ -472,11 +476,20 @@ struct dsa_switch_driver {
 	const struct dsa_switch_ops *ops;
 };
 
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 /* Legacy driver registration */
 void register_switch_driver(struct dsa_switch_driver *type);
 void unregister_switch_driver(struct dsa_switch_driver *type);
 struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
 
+#else
+static inline void register_switch_driver(struct dsa_switch_driver *type) { }
+static inline void unregister_switch_driver(struct dsa_switch_driver *type) { }
+static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
+{
+	return NULL;
+}
+#endif
 struct net_device *dsa_dev_to_net_device(struct device *dev);
 
 /* Keep inline for faster access in hot path */
diff --git a/include/net/dst.h b/include/net/dst.h
index d49d607dd2b3..c63d2c37f6e9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -34,13 +34,9 @@ struct sk_buff;
 
 struct dst_entry {
 	struct net_device       *dev;
-	struct rcu_head		rcu_head;
-	struct dst_entry	*child;
 	struct  dst_ops	        *ops;
 	unsigned long		_metrics;
 	unsigned long           expires;
-	struct dst_entry	*path;
-	struct dst_entry	*from;
 #ifdef CONFIG_XFRM
 	struct xfrm_state	*xfrm;
 #else
@@ -59,8 +55,6 @@ struct dst_entry {
 #define DST_XFRM_QUEUE		0x0040
 #define DST_METADATA		0x0080
 
-	short			error;
-
 	/* A non-zero value of dst->obsolete forces by-hand validation
 	 * of the route entry.  Positive values are set by the generic
 	 * dst layer to indicate that the entry has been forcefully
@@ -76,35 +70,24 @@ struct dst_entry {
 #define DST_OBSOLETE_KILL	-2
 	unsigned short		header_len;	/* more space at head required */
 	unsigned short		trailer_len;	/* space to reserve at tail */
-	unsigned short		__pad3;
 
-#ifdef CONFIG_IP_ROUTE_CLASSID
-	__u32			tclassid;
-#else
-	__u32			__pad2;
-#endif
-
-#ifdef CONFIG_64BIT
-	/*
-	 * Align __refcnt to a 64 bytes alignment
-	 * (L1_CACHE_SIZE would be too much)
-	 */
-	long			__pad_to_align_refcnt[2];
-#endif
 	/*
 	 * __refcnt wants to be on a different cache line from
 	 * input/output/ops or performance tanks badly
 	 */
-	atomic_t		__refcnt;	/* client references	*/
+#ifdef CONFIG_64BIT
+	atomic_t		__refcnt;	/* 64-bit offset 64 */
+#endif
 	int			__use;
 	unsigned long		lastuse;
 	struct lwtunnel_state   *lwtstate;
-	union {
-		struct dst_entry	*next;
-		struct rtable __rcu	*rt_next;
-		struct rt6_info __rcu	*rt6_next;
-		struct dn_route __rcu	*dn_next;
-	};
+	struct rcu_head		rcu_head;
+	short			error;
+	short			__pad;
+	__u32			tclassid;
+#ifndef CONFIG_64BIT
+	atomic_t		__refcnt;	/* 32-bit offset 64 */
+#endif
 };
 
 struct dst_metrics {
@@ -250,7 +233,7 @@ static inline void dst_hold(struct dst_entry *dst)
 {
 	/*
 	 * If your kernel compilation stops here, please check
-	 * __pad_to_align_refcnt declaration in struct dst_entry
+	 * the placement of __refcnt in struct dst_entry
 	 */
 	BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
 	WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
diff --git a/include/net/erspan.h b/include/net/erspan.h
index ca94fc86865e..5daa4866412b 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -15,7 +15,7 @@
  *  s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
  *  other fields are set to zero, so only a sequence number follows.
  *
- *  ERSPAN Type II header (8 octets [42:49])
+ *  ERSPAN Version 1 (Type II) header (8 octets [42:49])
  *  0                   1                   2                   3
  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -24,11 +24,31 @@
  * |      Reserved         |                  Index                |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
+ *
+ *  ERSPAN Version 2 (Type III) header (12 octets [42:49])
+ *  0                   1                   2                   3
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |  Ver  |          VLAN         | COS |BSO|T|     Session ID    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                          Timestamp                            |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |             SGT               |P|    FT   |   Hw ID   |D|Gra|O|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *      Platform Specific SubHeader (8 octets, optional)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |  Platf ID |               Platform Specific Info              |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  Platform Specific Info                       |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
  * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
  */
 
-#define ERSPAN_VERSION	0x1
+#include <uapi/linux/erspan.h>
 
+#define ERSPAN_VERSION	0x1	/* ERSPAN type II */
 #define VER_MASK	0xf000
 #define VLAN_MASK	0x0fff
 #define COS_MASK	0xe000
@@ -37,6 +57,19 @@
 #define ID_MASK		0x03ff
 #define INDEX_MASK	0xfffff
 
+#define ERSPAN_VERSION2	0x2	/* ERSPAN type III*/
+#define BSO_MASK	EN_MASK
+#define SGT_MASK	0xffff0000
+#define P_MASK		0x8000
+#define FT_MASK		0x7c00
+#define HWID_MASK	0x03f0
+#define DIR_MASK	0x0008
+#define GRA_MASK	0x0006
+#define O_MASK		0x0001
+
+#define HWID_OFFSET    4
+#define DIR_OFFSET     3
+
 enum erspan_encap_type {
 	ERSPAN_ENCAP_NOVLAN = 0x0,	/* originally without VLAN tag */
 	ERSPAN_ENCAP_ISL = 0x1,		/* originally ISL encapsulated */
@@ -44,18 +77,199 @@ enum erspan_encap_type {
 	ERSPAN_ENCAP_INFRAME = 0x3,	/* VLAN tag perserved in frame */
 };
 
-struct erspan_metadata {
-	__be32 index;   /* type II */
-};
+#define ERSPAN_V1_MDSIZE	4
+#define ERSPAN_V2_MDSIZE	8
 
-struct erspanhdr {
-	__be16 ver_vlan;
-#define VER_OFFSET  12
-	__be16 session_id;
-#define COS_OFFSET  13
-#define EN_OFFSET   11
-#define T_OFFSET    10
-	struct erspan_metadata md;
+struct erspan_base_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8	vlan_upper:4,
+		ver:4;
+	__u8	vlan:8;
+	__u8	session_id_upper:2,
+		t:1,
+		en:2,
+		cos:3;
+	__u8	session_id:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	__u8	ver: 4,
+		vlan_upper:4;
+	__u8	vlan:8;
+	__u8	cos:3,
+		en:2,
+		t:1,
+		session_id_upper:2;
+	__u8	session_id:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
 };
 
+static inline void set_session_id(struct erspan_base_hdr *ershdr, u16 id)
+{
+	ershdr->session_id = id & 0xff;
+	ershdr->session_id_upper = (id >> 8) & 0x3;
+}
+
+static inline u16 get_session_id(const struct erspan_base_hdr *ershdr)
+{
+	return (ershdr->session_id_upper << 8) + ershdr->session_id;
+}
+
+static inline void set_vlan(struct erspan_base_hdr *ershdr, u16 vlan)
+{
+	ershdr->vlan = vlan & 0xff;
+	ershdr->vlan_upper = (vlan >> 8) & 0xf;
+}
+
+static inline u16 get_vlan(const struct erspan_base_hdr *ershdr)
+{
+	return (ershdr->vlan_upper << 8) + ershdr->vlan;
+}
+
+static inline void set_hwid(struct erspan_md2 *md2, u8 hwid)
+{
+	md2->hwid = hwid & 0xf;
+	md2->hwid_upper = (hwid >> 4) & 0x3;
+}
+
+static inline u8 get_hwid(const struct erspan_md2 *md2)
+{
+	return (md2->hwid_upper << 4) + md2->hwid;
+}
+
+static inline int erspan_hdr_len(int version)
+{
+	return sizeof(struct erspan_base_hdr) +
+	       (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
+}
+
+static inline u8 tos_to_cos(u8 tos)
+{
+	u8 dscp, cos;
+
+	dscp = tos >> 2;
+	cos = dscp >> 3;
+	return cos;
+}
+
+static inline void erspan_build_header(struct sk_buff *skb,
+				u32 id, u32 index,
+				bool truncate, bool is_ipv4)
+{
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	enum erspan_encap_type enc_type;
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *ersmd;
+	struct qtag_prefix {
+		__be16 eth_type;
+		__be16 tci;
+	} *qp;
+	u16 vlan_tci = 0;
+	u8 tos;
+
+	tos = is_ipv4 ? ip_hdr(skb)->tos :
+			(ipv6_hdr(skb)->priority << 4) +
+			(ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+	enc_type = ERSPAN_ENCAP_NOVLAN;
+
+	/* If mirrored packet has vlan tag, extract tci and
+	 *  perserve vlan header in the mirrored frame.
+	 */
+	if (eth->h_proto == htons(ETH_P_8021Q)) {
+		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+		vlan_tci = ntohs(qp->tci);
+		enc_type = ERSPAN_ENCAP_INFRAME;
+	}
+
+	skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+	ershdr = (struct erspan_base_hdr *)skb->data;
+	memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+
+	/* Build base header */
+	ershdr->ver = ERSPAN_VERSION;
+	ershdr->cos = tos_to_cos(tos);
+	ershdr->en = enc_type;
+	ershdr->t = truncate;
+	set_vlan(ershdr, vlan_tci);
+	set_session_id(ershdr, id);
+
+	/* Build metadata */
+	ersmd = (struct erspan_metadata *)(ershdr + 1);
+	ersmd->u.index = htonl(index & INDEX_MASK);
+}
+
+/* ERSPAN GRA: timestamp granularity
+ *   00b --> granularity = 100 microseconds
+ *   01b --> granularity = 100 nanoseconds
+ *   10b --> granularity = IEEE 1588
+ * Here we only support 100 microseconds.
+ */
+static inline __be32 erspan_get_timestamp(void)
+{
+	u64 h_usecs;
+	ktime_t kt;
+
+	kt = ktime_get_real();
+	h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC);
+
+	/* ERSPAN base header only has 32-bit,
+	 * so it wraps around 4 days.
+	 */
+	return htonl((u32)h_usecs);
+}
+
+static inline void erspan_build_header_v2(struct sk_buff *skb,
+					  u32 id, u8 direction, u16 hwid,
+					  bool truncate, bool is_ipv4)
+{
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *md;
+	struct qtag_prefix {
+		__be16 eth_type;
+		__be16 tci;
+	} *qp;
+	u16 vlan_tci = 0;
+	u8 gra = 0; /* 100 usec */
+	u8 bso = 0; /* Bad/Short/Oversized */
+	u8 sgt = 0;
+	u8 tos;
+
+	tos = is_ipv4 ? ip_hdr(skb)->tos :
+			(ipv6_hdr(skb)->priority << 4) +
+			(ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+	/* Unlike v1, v2 does not have En field,
+	 * so only extract vlan tci field.
+	 */
+	if (eth->h_proto == htons(ETH_P_8021Q)) {
+		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+		vlan_tci = ntohs(qp->tci);
+	}
+
+	skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+	ershdr = (struct erspan_base_hdr *)skb->data;
+	memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+
+	/* Build base header */
+	ershdr->ver = ERSPAN_VERSION2;
+	ershdr->cos = tos_to_cos(tos);
+	ershdr->en = bso;
+	ershdr->t = truncate;
+	set_vlan(ershdr, vlan_tci);
+	set_session_id(ershdr, id);
+
+	/* Build metadata */
+	md = (struct erspan_metadata *)(ershdr + 1);
+	md->u.md2.timestamp = erspan_get_timestamp();
+	md->u.md2.sgt = htons(sgt);
+	md->u.md2.p = 1;
+	md->u.md2.ft = 0;
+	md->u.md2.dir = direction;
+	md->u.md2.gra = gra;
+	md->u.md2.o = 0;
+	set_hwid(&md->u.md2, hwid);
+}
+
 #endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 304f7aa9cc01..0304ba2ae353 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -49,6 +49,9 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d,
 int gnet_stats_copy_queue(struct gnet_dump *d,
 			  struct gnet_stats_queue __percpu *cpu_q,
 			  struct gnet_stats_queue *q, __u32 qlen);
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+			     const struct gnet_stats_queue __percpu *cpu_q,
+			     const struct gnet_stats_queue *q, __u32 qlen);
 int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
 
 int gnet_stats_finish_copy(struct gnet_dump *d);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index ec72cdb5bc39..6692d67e9245 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops {
  * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
  * @icsk_ulp_ops	   Pluggable ULP control hook
  * @icsk_ulp_data	   ULP private data
+ * @icsk_listen_portaddr_node	hash to the portaddr listener hashtable
  * @icsk_ca_state:	   Congestion control state
  * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
  * @icsk_pending:	   Scheduled timer event
@@ -101,6 +102,7 @@ struct inet_connection_sock {
 	const struct inet_connection_sock_af_ops *icsk_af_ops;
 	const struct tcp_ulp_ops  *icsk_ulp_ops;
 	void			  *icsk_ulp_data;
+	struct hlist_node         icsk_listen_portaddr_node;
 	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
 	__u8			  icsk_ca_state:6,
 				  icsk_ca_setsockopt:1,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 2dbbbff5e1e3..9141e95529e7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -111,6 +111,7 @@ struct inet_bind_hashbucket {
  */
 struct inet_listen_hashbucket {
 	spinlock_t		lock;
+	unsigned int		count;
 	struct hlist_head	head;
 };
 
@@ -132,12 +133,13 @@ struct inet_hashinfo {
 	/* Ok, let's try this, I give up, we do need a local binding
 	 * TCP hash as well as the others for fast bind/connect.
 	 */
+	struct kmem_cache		*bind_bucket_cachep;
 	struct inet_bind_hashbucket	*bhash;
-
 	unsigned int			bhash_size;
-	/* 4 bytes hole on 64 bit */
 
-	struct kmem_cache		*bind_bucket_cachep;
+	/* The 2nd listener table hashed by local port and address */
+	unsigned int			lhash2_mask;
+	struct inet_listen_hashbucket	*lhash2;
 
 	/* All the above members are written once at bootup and
 	 * never written again _or_ are predominantly read-access.
@@ -145,14 +147,25 @@ struct inet_hashinfo {
 	 * Now align to a new cache line as all the following members
 	 * might be often dirty.
 	 */
-	/* All sockets in TCP_LISTEN state will be in here.  This is the only
-	 * table where wildcard'd TCP sockets can exist.  Hash function here
-	 * is just local port number.
+	/* All sockets in TCP_LISTEN state will be in listening_hash.
+	 * This is the only table where wildcard'd TCP sockets can
+	 * exist.  listening_hash is only hashed by local port number.
+	 * If lhash2 is initialized, the same socket will also be hashed
+	 * to lhash2 by port and address.
 	 */
 	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
 					____cacheline_aligned_in_smp;
 };
 
+#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
+	hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
+
+static inline struct inet_listen_hashbucket *
+inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
+{
+	return &h->lhash2[hash & h->lhash2_mask];
+}
+
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
 	struct inet_hashinfo *hashinfo,
 	unsigned int hash)
@@ -208,6 +221,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child);
 void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
+void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+			 unsigned long numentries, int scale,
+			 unsigned long low_limit,
+			 unsigned long high_limit);
 
 bool inet_ehash_insert(struct sock *sk, struct sock *osk);
 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 39efb968b7a4..0a671c32d6b9 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 
 int inet_sk_rebuild_header(struct sock *sk);
 
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+	/* state change might impact lockless readers. */
+	return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
 static inline unsigned int __inet_ehashfn(const __be32 laddr,
 					  const __u16 lport,
 					  const __be32 faddr,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 1356fa6a7566..899495589a7e 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -93,8 +93,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 					   struct inet_timewait_death_row *dr,
 					   const int state);
 
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
-			   struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+			 struct inet_hashinfo *hashinfo);
 
 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
 			  bool rearm);
diff --git a/include/net/ip.h b/include/net/ip.h
index af8addbaa3c1..746abff9ce51 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -26,12 +26,14 @@
 #include <linux/ip.h>
 #include <linux/in.h>
 #include <linux/skbuff.h>
+#include <linux/jhash.h>
 
 #include <net/inet_sock.h>
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
 #include <net/flow_dissector.h>
+#include <net/netns/hash.h>
 
 #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
 #define IPV4_MIN_MTU		68			/* RFC 791 */
@@ -522,6 +524,13 @@ static inline unsigned int ipv4_addr_hash(__be32 ip)
 	return (__force unsigned int) ip;
 }
 
+static inline u32 ipv4_portaddr_hash(const struct net *net,
+				     __be32 saddr,
+				     unsigned int port)
+{
+	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
+}
+
 bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 10c913816032..34ec321d6a03 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -129,6 +129,8 @@ struct rt6_exception {
 
 struct rt6_info {
 	struct dst_entry		dst;
+	struct rt6_info __rcu		*rt6_next;
+	struct rt6_info			*from;
 
 	/*
 	 * Tail elements of dst_entry (__refcnt etc.)
@@ -147,6 +149,7 @@ struct rt6_info {
 	 */
 	struct list_head		rt6i_siblings;
 	unsigned int			rt6i_nsiblings;
+	atomic_t			rt6i_nh_upper_bound;
 
 	atomic_t			rt6i_ref;
 
@@ -168,19 +171,21 @@ struct rt6_info {
 	u32				rt6i_metric;
 	u32				rt6i_pmtu;
 	/* more non-fragment space at head required */
+	int				rt6i_nh_weight;
 	unsigned short			rt6i_nfheader_len;
 	u8				rt6i_protocol;
 	u8				exception_bucket_flushed:1,
-					unused:7;
+					should_flush:1,
+					unused:6;
 };
 
 #define for_each_fib6_node_rt_rcu(fn)					\
 	for (rt = rcu_dereference((fn)->leaf); rt;			\
-	     rt = rcu_dereference(rt->dst.rt6_next))
+	     rt = rcu_dereference(rt->rt6_next))
 
 #define for_each_fib6_walker_rt(w)					\
 	for (rt = (w)->leaf; rt;					\
-	     rt = rcu_dereference_protected(rt->dst.rt6_next, 1))
+	     rt = rcu_dereference_protected(rt->rt6_next, 1))
 
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
 {
@@ -203,11 +208,9 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
 {
 	struct rt6_info *rt;
 
-	for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES);
-	     rt = (struct rt6_info *)rt->dst.from);
+	for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
 	if (rt && rt != rt0)
 		rt0->dst.expires = rt->dst.expires;
-
 	dst_set_expires(&rt0->dst, timeout);
 	rt0->rt6i_flags |= RTF_EXPIRES;
 }
@@ -242,8 +245,8 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 	u32 cookie = 0;
 
 	if (rt->rt6i_flags & RTF_PCPU ||
-	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
-		rt = (struct rt6_info *)(rt->dst.from);
+	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
+		rt = rt->from;
 
 	rt6_get_cookie_safe(rt, &cookie);
 
@@ -404,6 +407,7 @@ unsigned int fib6_tables_seq_read(struct net *net);
 int fib6_tables_dump(struct net *net, struct notifier_block *nb);
 
 void fib6_update_sernum(struct rt6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 int fib6_rules_init(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 18e442ea93d8..27d23a65f3cd 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
 		(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
 }
 
+static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt)
+{
+	return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+	       RTF_GATEWAY;
+}
+
 void ip6_route_input(struct sk_buff *skb);
 struct dst_entry *ip6_route_input_lookup(struct net *net,
 					 struct net_device *dev,
@@ -165,10 +171,13 @@ struct rt6_rtnl_dump_arg {
 };
 
 int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
+void rt6_disable_ip(struct net_device *dev, unsigned long event);
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
+void rt6_multipath_rebalance(struct rt6_info *rt);
 
 static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
 {
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d66f70f63734..236e40ba06bf 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,10 @@ struct __ip6_tnl_parm {
 	__be32			o_key;
 
 	__u32			fwmark;
+	__u32			index;	/* ERSPAN type II index */
+	__u8			erspan_ver;	/* ERSPAN version */
+	__u8			dir;	/* direction */
+	__u16			hwid;	/* hwid */
 };
 
 /* IPv6 tunnel */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 24628f6b09bf..1f16773cfd76 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -116,8 +116,11 @@ struct ip_tunnel {
 	u32		o_seqno;	/* The last output seqno */
 	int		tun_hlen;	/* Precalculated header length */
 
-	/* This field used only by ERSPAN */
+	/* These four fields used only by ERSPAN */
 	u32		index;		/* ERSPAN type II index */
+	u8		erspan_ver;	/* ERSPAN version */
+	u8		dir;		/* ERSPAN direction */
+	u16		hwid;		/* ERSPAN hardware ID */
 
 	struct dst_cache dst_cache;
 
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ff68cf288f9b..eb0bec043c96 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -69,8 +69,7 @@ struct ip_vs_iphdr {
 };
 
 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
-				      int len, void *buffer,
-				      const struct ip_vs_iphdr *ipvsh)
+				      int len, void *buffer)
 {
 	return skb_header_pointer(skb, offset, len, buffer);
 }
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 221238254eb7..8606c9113d3f 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -22,6 +22,7 @@
 #include <net/flow.h>
 #include <net/flow_dissector.h>
 #include <net/snmp.h>
+#include <net/netns/hash.h>
 
 #define SIN6_LEN_RFC2133	24
 
@@ -674,6 +675,22 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
 					cpu_to_be32(0x0000ffff))) == 0UL;
 }
 
+static inline u32 ipv6_portaddr_hash(const struct net *net,
+				     const struct in6_addr *addr6,
+				     unsigned int port)
+{
+	unsigned int hash, mix = net_hash_mix(net);
+
+	if (ipv6_addr_any(addr6))
+		hash = jhash_1word(0, mix);
+	else if (ipv6_addr_v4mapped(addr6))
+		hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
+	else
+		hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
+
+	return hash ^ port;
+}
+
 /*
  * Check for a RFC 4843 ORCHID address
  * (Overlay Routable Cryptographic Hash Identifiers)
@@ -953,6 +970,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
 			      &inet6_sk(sk)->cork);
 }
 
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
+
 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
 		   struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index eec143cca1c0..906e90223066 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
  * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
  *	driver for a key to indicate that sufficient tailroom must always
  *	be reserved for ICV or MIC, even when HW encryption is enabled.
+ * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
+ *	a TKIP key if it only requires MIC space. Do not set together with
+ *	@IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
  */
 enum ieee80211_key_flags {
 	IEEE80211_KEY_FLAG_GENERATE_IV_MGMT	= BIT(0),
@@ -1562,6 +1565,7 @@ enum ieee80211_key_flags {
 	IEEE80211_KEY_FLAG_PUT_IV_SPACE		= BIT(5),
 	IEEE80211_KEY_FLAG_RX_MGMT		= BIT(6),
 	IEEE80211_KEY_FLAG_RESERVE_TAILROOM	= BIT(7),
+	IEEE80211_KEY_FLAG_PUT_MIC_SPACE	= BIT(8),
 };
 
 /**
@@ -1593,8 +1597,8 @@ struct ieee80211_key_conf {
 	u8 icv_len;
 	u8 iv_len;
 	u8 hw_key_idx;
-	u8 flags;
 	s8 keyidx;
+	u16 flags;
 	u8 keylen;
 	u8 key[0];
 };
@@ -2056,6 +2060,9 @@ struct ieee80211_txq {
  *	The stack will not do fragmentation.
  *	The callback for @set_frag_threshold should be set as well.
  *
+ * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
+ *	TDLS links.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags {
 	IEEE80211_HW_TX_FRAG_LIST,
 	IEEE80211_HW_REPORTS_LOW_ACK,
 	IEEE80211_HW_SUPPORTS_TX_FRAG,
+	IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 049008493faf..f306b2aa15a4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -51,7 +51,7 @@ struct net {
 	refcount_t		passive;	/* To decided when the network
 						 * namespace should be freed.
 						 */
-	atomic_t		count;		/* To decided when the network
+	refcount_t		count;		/* To decided when the network
 						 *  namespace should be shut down.
 						 */
 	spinlock_t		rules_mod_lock;
@@ -195,7 +195,7 @@ void __put_net(struct net *net);
 
 static inline struct net *get_net(struct net *net)
 {
-	atomic_inc(&net->count);
+	refcount_inc(&net->count);
 	return net;
 }
 
@@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net)
 	 * exists.  If the reference count is zero this
 	 * function fails and returns NULL.
 	 */
-	if (!atomic_inc_not_zero(&net->count))
+	if (!refcount_inc_not_zero(&net->count))
 		net = NULL;
 	return net;
 }
 
 static inline void put_net(struct net *net)
 {
-	if (atomic_dec_and_test(&net->count))
+	if (refcount_dec_and_test(&net->count))
 		__put_net(net);
 }
 
@@ -225,7 +225,7 @@ int net_eq(const struct net *net1, const struct net *net2)
 
 static inline int check_net(const struct net *net)
 {
-	return atomic_read(&net->count) != 0;
+	return refcount_read(&net->count) != 0;
 }
 
 void net_drop_ns(void *);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 4ed1040bbe4a..73f825732326 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,17 +13,17 @@
 
 const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
 
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
 #endif
 
 int nf_conntrack_ipv4_compat_init(void);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 9cd55be95853..effa8dfba68c 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,17 +4,17 @@
 
 extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
 
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
 #endif
 
 #include <linux/sysctl.h>
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
new file mode 100644
index 000000000000..adf8db44cf86
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -0,0 +1,17 @@
+#ifndef _NF_CONNTRACK_COUNT_H
+#define _NF_CONNTRACK_COUNT_H
+
+struct nf_conncount_data;
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+					    unsigned int keylen);
+void nf_conncount_destroy(struct net *net, unsigned int family,
+			  struct nf_conncount_data *data);
+
+unsigned int nf_conncount_count(struct net *net,
+				struct nf_conncount_data *data,
+				const u32 *key,
+				unsigned int family,
+				const struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_zone *zone);
+#endif
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7ef56c13698a..a7220eef9aee 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,6 +27,9 @@ struct nf_conntrack_l4proto {
 	/* Resolve clashes on insertion races. */
 	bool allow_clash;
 
+	/* protoinfo nlattr size, closes a hole */
+	u16 nlattr_size;
+
 	/* Try to fill in the third arg: dataoff is offset past network protocol
            hdr.  Return true if possible. */
 	bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
@@ -66,8 +69,6 @@ struct nf_conntrack_l4proto {
 	/* convert protoinfo to nfnetink attributes */
 	int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
 			 struct nf_conn *ct);
-	/* Calculate protoinfo nlattr size */
-	int (*nlattr_size)(void);
 
 	/* convert nfnetlink attributes to protoinfo */
 	int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -80,8 +81,6 @@ struct nf_conntrack_l4proto {
 			       struct nf_conntrack_tuple *t);
 	const struct nla_policy *nla_policy;
 
-	size_t nla_size;
-
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
 	struct {
 		int (*nlattr_to_obj)(struct nlattr *tb[],
@@ -109,7 +108,7 @@ struct nf_conntrack_l4proto {
 };
 
 /* Existing built-in generic protocol */
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
 #define MAX_NF_CT_PROTO 256
 
@@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
 void nf_ct_l4proto_pernet_unregister_one(struct net *net,
 				const struct nf_conntrack_l4proto *proto);
 int nf_ct_l4proto_pernet_register(struct net *net,
-				  struct nf_conntrack_l4proto *const proto[],
+				  const struct nf_conntrack_l4proto *const proto[],
 				  unsigned int num_proto);
 void nf_ct_l4proto_pernet_unregister(struct net *net,
-				struct nf_conntrack_l4proto *const proto[],
+				const struct nf_conntrack_l4proto *const proto[],
 				unsigned int num_proto);
 
 /* Protocol global registration. */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
 void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
 			   unsigned int num_proto);
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
 			      unsigned int num_proto);
 
 /* Generic netlink helpers */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
new file mode 100644
index 000000000000..b22b22082733
--- /dev/null
+++ b/include/net/netfilter/nf_flow_table.h
@@ -0,0 +1,122 @@
+#ifndef _NF_FLOW_TABLE_H
+#define _NF_FLOW_TABLE_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/rcupdate.h>
+#include <net/dst.h>
+
+struct nf_flowtable;
+
+struct nf_flowtable_type {
+	struct list_head		list;
+	int				family;
+	void				(*gc)(struct work_struct *work);
+	const struct rhashtable_params	*params;
+	nf_hookfn			*hook;
+	struct module			*owner;
+};
+
+struct nf_flowtable {
+	struct rhashtable		rhashtable;
+	const struct nf_flowtable_type	*type;
+	struct delayed_work		gc_work;
+};
+
+enum flow_offload_tuple_dir {
+	FLOW_OFFLOAD_DIR_ORIGINAL,
+	FLOW_OFFLOAD_DIR_REPLY,
+	__FLOW_OFFLOAD_DIR_MAX		= FLOW_OFFLOAD_DIR_REPLY,
+};
+#define FLOW_OFFLOAD_DIR_MAX	(__FLOW_OFFLOAD_DIR_MAX + 1)
+
+struct flow_offload_tuple {
+	union {
+		struct in_addr		src_v4;
+		struct in6_addr		src_v6;
+	};
+	union {
+		struct in_addr		dst_v4;
+		struct in6_addr		dst_v6;
+	};
+	struct {
+		__be16			src_port;
+		__be16			dst_port;
+	};
+
+	int				iifidx;
+
+	u8				l3proto;
+	u8				l4proto;
+	u8				dir;
+
+	int				oifidx;
+
+	struct dst_entry		*dst_cache;
+};
+
+struct flow_offload_tuple_rhash {
+	struct rhash_head		node;
+	struct flow_offload_tuple	tuple;
+};
+
+#define FLOW_OFFLOAD_SNAT	0x1
+#define FLOW_OFFLOAD_DNAT	0x2
+#define FLOW_OFFLOAD_DYING	0x4
+
+struct flow_offload {
+	struct flow_offload_tuple_rhash		tuplehash[FLOW_OFFLOAD_DIR_MAX];
+	u32					flags;
+	union {
+		/* Your private driver data here. */
+		u32		timeout;
+	};
+};
+
+#define NF_FLOW_TIMEOUT (30 * HZ)
+
+struct nf_flow_route {
+	struct {
+		struct dst_entry	*dst;
+		int			ifindex;
+	} tuple[FLOW_OFFLOAD_DIR_MAX];
+};
+
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
+					struct nf_flow_route *route);
+void flow_offload_free(struct flow_offload *flow);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow);
+struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+						     struct flow_offload_tuple *tuple);
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+			  void (*iter)(struct flow_offload *flow, void *data),
+			  void *data);
+void nf_flow_offload_work_gc(struct work_struct *work);
+extern const struct rhashtable_params nf_flow_offload_rhash_params;
+
+void flow_offload_dead(struct flow_offload *flow);
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir);
+int nf_flow_dnat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir);
+
+struct flow_ports {
+	__be16 source, dest;
+};
+
+unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+				     const struct nf_hook_state *state);
+unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+				       const struct nf_hook_state *state);
+
+#define MODULE_ALIAS_NF_FLOWTABLE(family)	\
+	MODULE_ALIAS("nf-flowtable-" __stringify(family))
+
+#endif /* _FLOW_OFFLOAD_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 814058d0f167..a50a69f5334c 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -25,7 +25,7 @@ struct nf_queue_entry {
 struct nf_queue_handler {
 	int		(*outfn)(struct nf_queue_entry *entry,
 				 unsigned int queuenum);
-	unsigned int	(*nf_hook_drop)(struct net *net);
+	void		(*nf_hook_drop)(struct net *net);
 };
 
 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fecc6112c768..663b015dace5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -9,6 +9,7 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
 #include <linux/u64_stats_sync.h>
+#include <net/netfilter/nf_flow_table.h>
 #include <net/netlink.h>
 
 #define NFT_JUMP_STACK_SIZE	16
@@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
 	pkt->xt.state = state;
 }
 
-static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
-						struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+					  struct sk_buff *skb)
 {
 	pkt->tprot_set = false;
 	pkt->tprot = 0;
@@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
 	pkt->xt.fragoff = 0;
 }
 
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
-					  struct sk_buff *skb,
-					  const struct nf_hook_state *state)
-{
-	nft_set_pktinfo(pkt, skb, state);
-	nft_set_pktinfo_proto_unspec(pkt, skb);
-}
-
 /**
  * 	struct nft_verdict - nf_tables verdict
  *
@@ -150,22 +143,22 @@ static inline void nft_data_debug(const struct nft_data *data)
  *	struct nft_ctx - nf_tables rule/set context
  *
  *	@net: net namespace
- * 	@afi: address family info
  * 	@table: the table the chain is contained in
  * 	@chain: the chain the rule is contained in
  *	@nla: netlink attributes
  *	@portid: netlink portID of the original message
  *	@seq: netlink sequence number
+ *	@family: protocol family
  *	@report: notify via unicast netlink message
  */
 struct nft_ctx {
 	struct net			*net;
-	struct nft_af_info		*afi;
 	struct nft_table		*table;
 	struct nft_chain		*chain;
 	const struct nlattr * const 	*nla;
 	u32				portid;
 	u32				seq;
+	u8				family;
 	bool				report;
 };
 
@@ -381,6 +374,7 @@ void nft_unregister_set(struct nft_set_type *type);
  *	@list: table set list node
  *	@bindings: list of set bindings
  * 	@name: name of the set
+ *	@handle: unique handle of the set
  * 	@ktype: key type (numeric type defined by userspace, not used in the kernel)
  * 	@dtype: data type (verdict or numeric type defined by userspace)
  * 	@objtype: object type (see NFT_OBJECT_* definitions)
@@ -403,6 +397,7 @@ struct nft_set {
 	struct list_head		list;
 	struct list_head		bindings;
 	char				*name;
+	u64				handle;
 	u32				ktype;
 	u32				dtype;
 	u32				objtype;
@@ -424,6 +419,11 @@ struct nft_set {
 		__attribute__((aligned(__alignof__(u64))));
 };
 
+static inline bool nft_set_is_anonymous(const struct nft_set *set)
+{
+	return set->flags & NFT_SET_ANONYMOUS;
+}
+
 static inline void *nft_set_priv(const struct nft_set *set)
 {
 	return (void *)set->data;
@@ -883,7 +883,7 @@ enum nft_chain_type {
  * 	@family: address family
  * 	@owner: module owner
  * 	@hook_mask: mask of valid hooks
- * 	@hooks: hookfn overrides
+ * 	@hooks: array of hook functions
  */
 struct nf_chain_type {
 	const char			*name;
@@ -905,8 +905,6 @@ struct nft_stats {
 	struct u64_stats_sync	syncp;
 };
 
-#define NFT_HOOK_OPS_MAX		2
-
 /**
  *	struct nft_base_chain - nf_tables base chain
  *
@@ -918,7 +916,7 @@ struct nft_stats {
  *	@dev_name: device name that this base chain is attached to (if any)
  */
 struct nft_base_chain {
-	struct nf_hook_ops		ops[NFT_HOOK_OPS_MAX];
+	struct nf_hook_ops		ops;
 	const struct nf_chain_type	*type;
 	u8				policy;
 	u8				flags;
@@ -948,10 +946,13 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
  *	@chains: chains in the table
  *	@sets: sets in the table
  *	@objects: stateful objects in the table
+ *	@flowtables: flow tables in the table
  *	@hgenerator: handle generator state
+ *	@handle: table handle
  *	@use: number of chain references to this table
  *	@flags: table flag (see enum nft_table_flags)
  *	@genmask: generation mask
+ *	@afinfo: address family info
  *	@name: name of the table
  */
 struct nft_table {
@@ -959,46 +960,16 @@ struct nft_table {
 	struct list_head		chains;
 	struct list_head		sets;
 	struct list_head		objects;
+	struct list_head		flowtables;
 	u64				hgenerator;
+	u64				handle;
 	u32				use;
-	u16				flags:14,
+	u16				family:6,
+					flags:8,
 					genmask:2;
 	char				*name;
 };
 
-enum nft_af_flags {
-	NFT_AF_NEEDS_DEV	= (1 << 0),
-};
-
-/**
- *	struct nft_af_info - nf_tables address family info
- *
- *	@list: used internally
- *	@family: address family
- *	@nhooks: number of hooks in this family
- *	@owner: module owner
- *	@tables: used internally
- *	@flags: family flags
- *	@nops: number of hook ops in this family
- *	@hook_ops_init: initialization function for chain hook ops
- *	@hooks: hookfn overrides for packet validation
- */
-struct nft_af_info {
-	struct list_head		list;
-	int				family;
-	unsigned int			nhooks;
-	struct module			*owner;
-	struct list_head		tables;
-	u32				flags;
-	unsigned int			nops;
-	void				(*hook_ops_init)(struct nf_hook_ops *,
-							 unsigned int);
-	nf_hookfn			*hooks[NF_MAX_HOOKS];
-};
-
-int nft_register_afinfo(struct net *, struct nft_af_info *);
-void nft_unregister_afinfo(struct net *, struct nft_af_info *);
-
 int nft_register_chain_type(const struct nf_chain_type *);
 void nft_unregister_chain_type(const struct nf_chain_type *);
 
@@ -1016,9 +987,9 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
  *	@name: name of this stateful object
  *	@genmask: generation mask
  *	@use: number of references to this stateful object
- * 	@data: object data, layout depends on type
+ *	@handle: unique object handle
  *	@ops: object operations
- *	@data: pointer to object data
+ * 	@data: object data, layout depends on type
  */
 struct nft_object {
 	struct list_head		list;
@@ -1026,6 +997,7 @@ struct nft_object {
 	struct nft_table		*table;
 	u32				genmask:2,
 					use:30;
+	u64				handle;
 	/* runtime data below here */
 	const struct nft_object_ops	*ops ____cacheline_aligned;
 	unsigned char			data[]
@@ -1097,6 +1069,46 @@ int nft_register_obj(struct nft_object_type *obj_type);
 void nft_unregister_obj(struct nft_object_type *obj_type);
 
 /**
+ *	struct nft_flowtable - nf_tables flow table
+ *
+ *	@list: flow table list node in table list
+ * 	@table: the table the flow table is contained in
+ *	@name: name of this flow table
+ *	@hooknum: hook number
+ *	@priority: hook priority
+ *	@ops_len: number of hooks in array
+ *	@genmask: generation mask
+ *	@use: number of references to this flow table
+ * 	@handle: unique object handle
+ *	@data: rhashtable and garbage collector
+ * 	@ops: array of hooks
+ */
+struct nft_flowtable {
+	struct list_head		list;
+	struct nft_table		*table;
+	char				*name;
+	int				hooknum;
+	int				priority;
+	int				ops_len;
+	u32				genmask:2,
+					use:30;
+	u64				handle;
+	/* runtime data below here */
+	struct nf_hook_ops		*ops ____cacheline_aligned;
+	struct nf_flowtable		data;
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+						 const struct nlattr *nla,
+						 u8 genmask);
+void nft_flow_table_iterate(struct net *net,
+			    void (*iter)(struct nf_flowtable *flowtable, void *data),
+			    void *data);
+
+void nft_register_flowtable_type(struct nf_flowtable_type *type);
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
+
+/**
  *	struct nft_traceinfo - nft tracing information and state
  *
  *	@pkt: pktinfo currently processed
@@ -1125,12 +1137,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
 
 void nft_trace_notify(struct nft_traceinfo *info);
 
-#define nft_dereference(p)					\
-	nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
-
-#define MODULE_ALIAS_NFT_FAMILY(family)	\
-	MODULE_ALIAS("nft-afinfo-" __stringify(family))
-
 #define MODULE_ALIAS_NFT_CHAIN(family, name) \
 	MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
 
@@ -1332,4 +1338,11 @@ struct nft_trans_obj {
 #define nft_trans_obj(trans)	\
 	(((struct nft_trans_obj *)trans->data)->obj)
 
+struct nft_trans_flowtable {
+	struct nft_flowtable		*flowtable;
+};
+
+#define nft_trans_flowtable(trans)	\
+	(((struct nft_trans_flowtable *)trans->data)->flowtable)
+
 #endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index f0896ba456c4..ed7b511f0a59 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,15 +5,11 @@
 #include <net/netfilter/nf_tables.h>
 #include <net/ip.h>
 
-static inline void
-nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
-		     struct sk_buff *skb,
-		     const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+					struct sk_buff *skb)
 {
 	struct iphdr *ip;
 
-	nft_set_pktinfo(pkt, skb, state);
-
 	ip = ip_hdr(pkt->skb);
 	pkt->tprot_set = true;
 	pkt->tprot = ip->protocol;
@@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
 	pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
 }
 
-static inline int
-__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
-				struct sk_buff *skb,
-				const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+						  struct sk_buff *skb)
 {
 	struct iphdr *iph, _iph;
 	u32 len, thoff;
@@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
 	return 0;
 }
 
-static inline void
-nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
-			      struct sk_buff *skb,
-			      const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+						 struct sk_buff *skb)
 {
-	nft_set_pktinfo(pkt, skb, state);
-	if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
-		nft_set_pktinfo_proto_unspec(pkt, skb);
+	if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
+		nft_set_pktinfo_unspec(pkt, skb);
 }
 
-extern struct nft_af_info nft_af_ipv4;
-
 #endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index b8065b72f56e..dabe6fdb553a 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -5,20 +5,16 @@
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/ipv6.h>
 
-static inline void
-nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-		     struct sk_buff *skb,
-		     const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+					struct sk_buff *skb)
 {
 	unsigned int flags = IP6_FH_F_AUTH;
 	int protohdr, thoff = 0;
 	unsigned short frag_off;
 
-	nft_set_pktinfo(pkt, skb, state);
-
 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
 	if (protohdr < 0) {
-		nft_set_pktinfo_proto_unspec(pkt, skb);
+		nft_set_pktinfo_unspec(pkt, skb);
 		return;
 	}
 
@@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
 	pkt->xt.fragoff = frag_off;
 }
 
-static inline int
-__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
-				struct sk_buff *skb,
-				const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+						  struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_IPV6)
 	unsigned int flags = IP6_FH_F_AUTH;
@@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
 #endif
 }
 
-static inline void
-nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
-			      struct sk_buff *skb,
-			      const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+						 struct sk_buff *skb)
 {
-	nft_set_pktinfo(pkt, skb, state);
-	if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
-		nft_set_pktinfo_proto_unspec(pkt, skb);
+	if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
+		nft_set_pktinfo_unspec(pkt, skb);
 }
 
-extern struct nft_af_info nft_af_ipv6;
-
 #endif
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index ecf238b8862c..ca9bd9fba5b5 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -8,7 +8,7 @@
 
 #include <linux/spinlock.h>
 
-struct dev_rcv_lists;
+struct can_dev_rcv_lists;
 struct s_stats;
 struct s_pstats;
 
@@ -28,7 +28,7 @@ struct netns_can {
 #endif
 
 	/* receive filters subscribed for 'all' CAN devices */
-	struct dev_rcv_lists *can_rx_alldev_list;
+	struct can_dev_rcv_lists *can_rx_alldev_list;
 	spinlock_t can_rcvlists_lock;
 	struct timer_list can_stattimer;/* timer for statistics update */
 	struct s_stats *can_stats;	/* packet statistics */
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 0ad4d0c71228..36c2d998a43c 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -11,7 +11,10 @@ struct netns_core {
 
 	int	sysctl_somaxconn;
 
-	struct prot_inuse __percpu *inuse;
+#ifdef CONFIG_PROC_FS
+	int __percpu *sock_inuse;
+	struct prot_inuse __percpu *prot_inuse;
+#endif
 };
 
 #endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index cc00af2ac2d7..ca043342c0eb 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -17,7 +17,17 @@ struct netns_nf {
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header *nf_log_dir_header;
 #endif
-	struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+	struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+	struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+	struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+	struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
+#endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 	bool			defrag_ipv4;
 #endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 4109b5f3010f..48134353411d 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,14 +7,8 @@
 struct nft_af_info;
 
 struct netns_nftables {
-	struct list_head	af_info;
+	struct list_head	tables;
 	struct list_head	commit_list;
-	struct nft_af_info	*ipv4;
-	struct nft_af_info	*ipv6;
-	struct nft_af_info	*inet;
-	struct nft_af_info	*arp;
-	struct nft_af_info	*bridge;
-	struct nft_af_info	*netdev;
 	unsigned int		base_seq;
 	u8			gencursor;
 };
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index ebc813277662..0db7fb3e4e15 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -122,9 +122,12 @@ struct netns_sctp {
 	/* Flag to indicate if PR-CONFIG is enabled. */
 	int reconf_enable;
 
-	/* Flag to idicate if SCTP-AUTH is enabled */
+	/* Flag to indicate if SCTP-AUTH is enabled */
 	int auth_enable;
 
+	/* Flag to indicate if stream interleave is enabled */
+	int intl_enable;
+
 	/*
 	 * Policy to control SCTP IPv4 address scoping
 	 * 0   - Disable IPv4 address scoping
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 753ac9361154..87406252f0a3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -29,6 +29,7 @@ struct tcf_block_ext_info {
 	enum tcf_block_binder_type binder_type;
 	tcf_chain_head_change_t *chain_head_change;
 	void *chain_head_change_priv;
+	u32 block_index;
 };
 
 struct tcf_block_cb;
@@ -38,16 +39,25 @@ bool tcf_queue_work(struct work_struct *work);
 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 				bool create);
 void tcf_chain_put(struct tcf_chain *chain);
+void tcf_block_netif_keep_dst(struct tcf_block *block);
 int tcf_block_get(struct tcf_block **p_block,
-		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+		  struct netlink_ext_ack *extack);
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei);
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack);
 void tcf_block_put(struct tcf_block *block);
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 		       struct tcf_block_ext_info *ei);
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+	return block->index;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
+	WARN_ON(tcf_block_shared(block));
 	return block->q;
 }
 
@@ -77,14 +87,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 #else
 static inline
 int tcf_block_get(struct tcf_block **p_block,
-		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+		  struct netlink_ext_ack *extack)
 {
 	return 0;
 }
 
 static inline
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei)
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack)
 {
 	return 0;
 }
@@ -364,7 +376,8 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
 		      struct nlattr **tb, struct nlattr *rate_tlv,
-		      struct tcf_exts *exts, bool ovr);
+		      struct tcf_exts *exts, bool ovr,
+		      struct netlink_ext_ack *extack);
 void tcf_exts_destroy(struct tcf_exts *exts);
 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -544,13 +557,16 @@ static inline int tcf_valid_offset(const struct sk_buff *skb,
 #include <net/net_namespace.h>
 
 static inline int
-tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
+tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
+		 struct netlink_ext_ack *extack)
 {
 	char indev[IFNAMSIZ];
 	struct net_device *dev;
 
-	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
+	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
+		NL_SET_ERR_MSG(extack, "Interface name too long");
 		return -EINVAL;
+	}
 	dev = __dev_get_by_name(net, indev);
 	if (!dev)
 		return -ENODEV;
@@ -586,17 +602,9 @@ struct tc_cls_common_offload {
 	u32 chain_index;
 	__be16 protocol;
 	u32 prio;
+	struct netlink_ext_ack *extack;
 };
 
-static inline void
-tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
-			   const struct tcf_proto *tp)
-{
-	cls_common->chain_index = tp->chain->index;
-	cls_common->protocol = tp->protocol;
-	cls_common->prio = tp->prio;
-}
-
 struct tc_cls_u32_knode {
 	struct tcf_exts *exts;
 	struct tc_u32_sel *sel;
@@ -637,6 +645,31 @@ static inline bool tc_can_offload(const struct net_device *dev)
 	return dev->features & NETIF_F_HW_TC;
 }
 
+static inline bool tc_can_offload_extack(const struct net_device *dev,
+					 struct netlink_ext_ack *extack)
+{
+	bool can = tc_can_offload(dev);
+
+	if (!can)
+		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
+
+	return can;
+}
+
+static inline bool
+tc_cls_can_offload_and_chain0(const struct net_device *dev,
+			      struct tc_cls_common_offload *common)
+{
+	if (!tc_can_offload_extack(dev, common->extack))
+		return false;
+	if (common->chain_index) {
+		NL_SET_ERR_MSG(common->extack,
+			       "Driver supports only offload of chain 0");
+		return false;
+	}
+	return true;
+}
+
 static inline bool tc_skip_hw(u32 flags)
 {
 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
@@ -664,6 +697,18 @@ static inline bool tc_in_hw(u32 flags)
 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
 }
 
+static inline void
+tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
+			   const struct tcf_proto *tp, u32 flags,
+			   struct netlink_ext_ack *extack)
+{
+	cls_common->chain_index = tp->chain->index;
+	cls_common->protocol = tp->protocol;
+	cls_common->prio = tp->prio;
+	if (tc_skip_sw(flags))
+		cls_common->extack = extack;
+}
+
 enum tc_fl_command {
 	TC_CLSFLOWER_REPLACE,
 	TC_CLSFLOWER_DESTROY,
@@ -706,7 +751,6 @@ struct tc_cls_bpf_offload {
 	struct bpf_prog *oldprog;
 	const char *name;
 	bool exts_integrated;
-	u32 gen_flags;
 };
 
 struct tc_mqprio_qopt_offload {
@@ -727,6 +771,11 @@ struct tc_cookie {
 	u32 len;
 };
 
+struct tc_qopt_offload_stats {
+	struct gnet_stats_basic_packed *bstats;
+	struct gnet_stats_queue *qstats;
+};
+
 enum tc_red_command {
 	TC_RED_REPLACE,
 	TC_RED_DESTROY,
@@ -739,9 +788,6 @@ struct tc_red_qopt_offload_params {
 	u32 max;
 	u32 probability;
 	bool is_ecn;
-};
-struct tc_red_qopt_offload_stats {
-	struct gnet_stats_basic_packed *bstats;
 	struct gnet_stats_queue *qstats;
 };
 
@@ -751,9 +797,34 @@ struct tc_red_qopt_offload {
 	u32 parent;
 	union {
 		struct tc_red_qopt_offload_params set;
-		struct tc_red_qopt_offload_stats stats;
+		struct tc_qopt_offload_stats stats;
 		struct red_stats *xstats;
 	};
 };
 
+enum tc_prio_command {
+	TC_PRIO_REPLACE,
+	TC_PRIO_DESTROY,
+	TC_PRIO_STATS,
+};
+
+struct tc_prio_qopt_offload_params {
+	int bands;
+	u8 priomap[TC_PRIO_MAX + 1];
+	/* In case that a prio qdisc is offloaded and now is changed to a
+	 * non-offloadedable config, it needs to update the backlog & qlen
+	 * values to negate the HW backlog & qlen values (and only them).
+	 */
+	struct gnet_stats_queue *qstats;
+};
+
+struct tc_prio_qopt_offload {
+	enum tc_prio_command command;
+	u32 handle;
+	u32 parent;
+	union {
+		struct tc_prio_qopt_offload_params replace_params;
+		struct tc_qopt_offload_stats stats;
+	};
+};
 #endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1f413f06c72..815b92a23936 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
 
 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-			       unsigned int limit);
+			       unsigned int limit,
+			       struct netlink_ext_ack *extack);
 
 int register_qdisc(struct Qdisc_ops *qops);
 int unregister_qdisc(struct Qdisc_ops *qops);
@@ -99,22 +100,24 @@ int qdisc_set_default(const char *id);
 void qdisc_hash_add(struct Qdisc *q, bool invisible);
 void qdisc_hash_del(struct Qdisc *q);
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-					struct nlattr *tab);
+					struct nlattr *tab,
+					struct netlink_ext_ack *extack);
 void qdisc_put_rtab(struct qdisc_rate_table *tab);
 void qdisc_put_stab(struct qdisc_size_table *tab);
 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-		    struct net_device *dev, struct netdev_queue *txq,
-		    spinlock_t *root_lock, bool validate);
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+		     struct net_device *dev, struct netdev_queue *txq,
+		     spinlock_t *root_lock, bool validate);
 
 void __qdisc_run(struct Qdisc *q);
 
 static inline void qdisc_run(struct Qdisc *q)
 {
-	if (qdisc_run_begin(q))
+	if (qdisc_run_begin(q)) {
 		__qdisc_run(q);
+		qdisc_run_end(q);
+	}
 }
 
 static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
diff --git a/include/net/route.h b/include/net/route.h
index d538e6db1afe..1eb9ce470e25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -217,7 +217,7 @@ unsigned int inet_addr_type_dev_table(struct net *net,
 				      const struct net_device *dev,
 				      __be32 addr);
 void ip_rt_multicast_event(struct in_device *);
-int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
 struct rtable *rt_dst_alloc(struct net_device *dev,
 			     unsigned int flags, u16 type,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index ead018744ff5..14b6b3af8918 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,10 +13,10 @@ enum rtnl_link_flags {
 	RTNL_FLAG_DOIT_UNLOCKED = 1,
 };
 
-int __rtnl_register(int protocol, int msgtype,
-		    rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
 void rtnl_register(int protocol, int msgtype,
 		   rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+			 rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
 int rtnl_unregister(int protocol, int msgtype);
 void rtnl_unregister_all(int protocol);
 
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index becf86aa4ac6..e2ab13687fb9 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -71,6 +71,7 @@ struct Qdisc {
 				      * qdisc_tree_decrease_qlen() should stop.
 				      */
 #define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
+#define TCQ_F_NOLOCK		0x100 /* qdisc does not require locking */
 #define TCQ_F_OFFLOADED		0x200 /* qdisc is offloaded to HW */
 	u32			limit;
 	const struct Qdisc_ops	*ops;
@@ -88,14 +89,14 @@ struct Qdisc {
 	/*
 	 * For performance sake on SMP, we put highly modified fields at the end
 	 */
-	struct sk_buff		*gso_skb ____cacheline_aligned_in_smp;
+	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
 	struct qdisc_skb_head	q;
 	struct gnet_stats_basic_packed bstats;
 	seqcount_t		running;
 	struct gnet_stats_queue	qstats;
 	unsigned long		state;
 	struct Qdisc            *next_sched;
-	struct sk_buff		*skb_bad_txq;
+	struct sk_buff_head	skb_bad_txq;
 	int			padded;
 	refcount_t		refcnt;
 
@@ -150,19 +151,23 @@ struct Qdisc_class_ops {
 	/* Child qdisc manipulation */
 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
 	int			(*graft)(struct Qdisc *, unsigned long cl,
-					struct Qdisc *, struct Qdisc **);
+					struct Qdisc *, struct Qdisc **,
+					struct netlink_ext_ack *extack);
 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
 	void			(*qlen_notify)(struct Qdisc *, unsigned long);
 
 	/* Class manipulation routines */
 	unsigned long		(*find)(struct Qdisc *, u32 classid);
 	int			(*change)(struct Qdisc *, u32, u32,
-					struct nlattr **, unsigned long *);
+					struct nlattr **, unsigned long *,
+					struct netlink_ext_ack *);
 	int			(*delete)(struct Qdisc *, unsigned long);
 	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
 
 	/* Filter manipulation */
-	struct tcf_block *	(*tcf_block)(struct Qdisc *, unsigned long);
+	struct tcf_block *	(*tcf_block)(struct Qdisc *sch,
+					     unsigned long arg,
+					     struct netlink_ext_ack *extack);
 	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
 					u32 classid);
 	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -187,15 +192,26 @@ struct Qdisc_ops {
 	struct sk_buff *	(*dequeue)(struct Qdisc *);
 	struct sk_buff *	(*peek)(struct Qdisc *);
 
-	int			(*init)(struct Qdisc *, struct nlattr *arg);
+	int			(*init)(struct Qdisc *sch, struct nlattr *arg,
+					struct netlink_ext_ack *extack);
 	void			(*reset)(struct Qdisc *);
 	void			(*destroy)(struct Qdisc *);
-	int			(*change)(struct Qdisc *, struct nlattr *arg);
-	void			(*attach)(struct Qdisc *);
+	int			(*change)(struct Qdisc *sch,
+					  struct nlattr *arg,
+					  struct netlink_ext_ack *extack);
+	void			(*attach)(struct Qdisc *sch);
+	int			(*change_tx_queue_len)(struct Qdisc *, unsigned int);
 
 	int			(*dump)(struct Qdisc *, struct sk_buff *);
 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
 
+	void			(*ingress_block_set)(struct Qdisc *sch,
+						     u32 block_index);
+	void			(*egress_block_set)(struct Qdisc *sch,
+						    u32 block_index);
+	u32			(*ingress_block_get)(struct Qdisc *sch);
+	u32			(*egress_block_get)(struct Qdisc *sch);
+
 	struct module		*owner;
 };
 
@@ -218,14 +234,18 @@ struct tcf_proto_ops {
 					    const struct tcf_proto *,
 					    struct tcf_result *);
 	int			(*init)(struct tcf_proto*);
-	void			(*destroy)(struct tcf_proto*);
+	void			(*destroy)(struct tcf_proto *tp,
+					   struct netlink_ext_ack *extack);
 
 	void*			(*get)(struct tcf_proto*, u32 handle);
 	int			(*change)(struct net *net, struct sk_buff *,
 					struct tcf_proto*, unsigned long,
 					u32 handle, struct nlattr **,
-					void **, bool);
-	int			(*delete)(struct tcf_proto*, void *, bool*);
+					void **, bool,
+					struct netlink_ext_ack *);
+	int			(*delete)(struct tcf_proto *tp, void *arg,
+					  bool *last,
+					  struct netlink_ext_ack *);
 	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);
 	void			(*bind_class)(void *, u32, unsigned long);
 
@@ -247,8 +267,6 @@ struct tcf_proto {
 
 	/* All the rest */
 	u32			prio;
-	u32			classid;
-	struct Qdisc		*q;
 	void			*data;
 	const struct tcf_proto_ops	*ops;
 	struct tcf_chain	*chain;
@@ -267,8 +285,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
 
 struct tcf_chain {
 	struct tcf_proto __rcu *filter_chain;
-	tcf_chain_head_change_t *chain_head_change;
-	void *chain_head_change_priv;
+	struct list_head filter_chain_list;
 	struct list_head list;
 	struct tcf_block *block;
 	u32 index; /* chain index */
@@ -277,12 +294,33 @@ struct tcf_chain {
 
 struct tcf_block {
 	struct list_head chain_list;
+	u32 index; /* block index for shared blocks */
+	unsigned int refcnt;
 	struct net *net;
 	struct Qdisc *q;
 	struct list_head cb_list;
-	struct work_struct work;
+	struct list_head owner_list;
+	bool keep_dst;
+	unsigned int offloadcnt; /* Number of oddloaded filters */
+	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
 };
 
+static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+{
+	if (*flags & TCA_CLS_FLAGS_IN_HW)
+		return;
+	*flags |= TCA_CLS_FLAGS_IN_HW;
+	block->offloadcnt++;
+}
+
+static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+{
+	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+		return;
+	*flags &= ~TCA_CLS_FLAGS_IN_HW;
+	block->offloadcnt--;
+}
+
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
 	struct qdisc_skb_cb *qcb;
@@ -291,11 +329,31 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+	return this_cpu_ptr(q->cpu_qstats)->qlen;
+}
+
 static inline int qdisc_qlen(const struct Qdisc *q)
 {
 	return q->q.qlen;
 }
 
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
+{
+	__u32 qlen = 0;
+	int i;
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		for_each_possible_cpu(i)
+			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+	} else {
+		qlen = q->q.qlen;
+	}
+
+	return qlen;
+}
+
 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 {
 	return (struct qdisc_skb_cb *)skb->cb;
@@ -432,6 +490,7 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *,
 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
 
+int dev_qdisc_change_tx_queue_len(struct net_device *dev);
 void dev_init_scheduler(struct net_device *dev);
 void dev_shutdown(struct net_device *dev);
 void dev_activate(struct net_device *dev);
@@ -444,10 +503,12 @@ void qdisc_destroy(struct Qdisc *qdisc);
 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
 			       unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-			  const struct Qdisc_ops *ops);
+			  const struct Qdisc_ops *ops,
+			  struct netlink_ext_ack *extack);
 void qdisc_free(struct Qdisc *qdisc);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
-				const struct Qdisc_ops *ops, u32 parentid);
+				const struct Qdisc_ops *ops, u32 parentid,
+				struct netlink_ext_ack *extack);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
 			       const struct qdisc_size_table *stab);
 int skb_do_redirect(struct sk_buff *);
@@ -633,12 +694,39 @@ static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
 	sch->qstats.backlog -= qdisc_pkt_len(skb);
 }
 
+static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
+						const struct sk_buff *skb)
+{
+	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
 					    const struct sk_buff *skb)
 {
 	sch->qstats.backlog += qdisc_pkt_len(skb);
 }
 
+static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
+						const struct sk_buff *skb)
+{
+	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+{
+	this_cpu_inc(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+{
+	this_cpu_dec(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+{
+	this_cpu_inc(sch->cpu_qstats->requeues);
+}
+
 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
 {
 	sch->qstats.drops += count;
@@ -769,26 +857,30 @@ static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
 /* generic pseudo peek method for non-work-conserving qdisc */
 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
 {
+	struct sk_buff *skb = skb_peek(&sch->gso_skb);
+
 	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
-	if (!sch->gso_skb) {
-		sch->gso_skb = sch->dequeue(sch);
-		if (sch->gso_skb) {
+	if (!skb) {
+		skb = sch->dequeue(sch);
+
+		if (skb) {
+			__skb_queue_head(&sch->gso_skb, skb);
 			/* it's still part of the queue */
-			qdisc_qstats_backlog_inc(sch, sch->gso_skb);
+			qdisc_qstats_backlog_inc(sch, skb);
 			sch->q.qlen++;
 		}
 	}
 
-	return sch->gso_skb;
+	return skb;
 }
 
 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 {
-	struct sk_buff *skb = sch->gso_skb;
+	struct sk_buff *skb = skb_peek(&sch->gso_skb);
 
 	if (skb) {
-		sch->gso_skb = NULL;
+		skb = __skb_dequeue(&sch->gso_skb);
 		qdisc_qstats_backlog_dec(sch, skb);
 		sch->q.qlen--;
 	} else {
@@ -846,6 +938,14 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
 	qdisc_qstats_drop(sch);
 }
 
+static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
+				 struct sk_buff **to_free)
+{
+	__qdisc_drop(skb, to_free);
+	qdisc_qstats_cpu_drop(sch);
+
+	return NET_XMIT_DROP;
+}
 
 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
 			     struct sk_buff **to_free)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index deaafa9b09cb..20ff237c5eb2 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER,		enum sctp_event_other,	other)
 SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE,	enum sctp_event_primitive, primitive)
 
 
-#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
+			       a->chunk_hdr->type == SCTP_CID_I_DATA)
 
 /* Calculate the actual data size in a data chunk */
-#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\
-		       		- (unsigned long)(c->chunk_hdr)\
-				- sizeof(struct sctp_data_chunk)))
+#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
+				    (unsigned long)(c->chunk_hdr) - \
+				    sctp_datachk_len(&c->asoc->stream)))
 
 /* Internal error codes */
 enum sctp_ierror {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 608d123ef25f..f7ae6b0a21d0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
-int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_start(struct rhashtable_iter *iter);
 void sctp_transport_walk_stop(struct rhashtable_iter *iter);
 struct sctp_transport *sctp_transport_get_next(struct net *net,
 			struct rhashtable_iter *iter);
@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
 	int frag = pmtu;
 
 	frag -= sp->pf->af->net_header_len;
-	frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
+	frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
 
 	if (asoc->user_frag)
 		frag = min_t(int, frag, asoc->user_frag);
 
 	frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
-					    sizeof(struct sctp_data_chunk)));
+					    sctp_datachk_len(&asoc->stream)));
 
 	return frag;
 }
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 70fb397f65b0..2883c43c5258 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -197,10 +197,14 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
 struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
 				 const __u32 lowest_tsn,
 				 const struct sctp_chunk *chunk);
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+				   __u8 flags, int paylen, gfp_t gfp);
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+				     __u32 new_cum_tsn, size_t nstreams,
+				     struct sctp_ifwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
 					    const struct sctp_sndrcvinfo *sinfo,
-					    int len, const __u8 flags,
-					    __u16 ssn, gfp_t gfp);
+					    int len, __u8 flags, gfp_t gfp);
 struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
 				  const __u32 lowest_tsn);
 struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
@@ -342,7 +346,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
 	__u16 size;
 
 	size = ntohs(chunk->chunk_hdr->length);
-	size -= sizeof(struct sctp_data_chunk);
+	size -= sctp_datahdr_len(&chunk->asoc->stream);
 
 	return size;
 }
@@ -358,6 +362,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
 	 typecheck(__u32, b) && \
 	 ((__s32)((a) - (b)) <= 0))
 
+/* Compare two MIDs */
+#define MID_lt(a, b)	\
+	(typecheck(__u32, a) && \
+	 typecheck(__u32, b) && \
+	 ((__s32)((a) - (b)) < 0))
+
 /* Compare two SSNs */
 #define SSN_lt(a,b)		\
 	(typecheck(__u16, a) && \
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 000000000000..6657711c8bc4
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,61 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation  is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ *                 ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ *    lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ *   Xin Long <lucien.xin@gmail.com>
+ */
+
+#ifndef __sctp_stream_interleave_h__
+#define __sctp_stream_interleave_h__
+
+struct sctp_stream_interleave {
+	__u16	data_chunk_len;
+	__u16	ftsn_chunk_len;
+	/* (I-)DATA process */
+	struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
+					    const struct sctp_sndrcvinfo *sinfo,
+					    int len, __u8 flags, gfp_t gfp);
+	void	(*assign_number)(struct sctp_chunk *chunk);
+	bool	(*validate_data)(struct sctp_chunk *chunk);
+	int	(*ulpevent_data)(struct sctp_ulpq *ulpq,
+				 struct sctp_chunk *chunk, gfp_t gfp);
+	int	(*enqueue_event)(struct sctp_ulpq *ulpq,
+				 struct sctp_ulpevent *event);
+	void	(*renege_events)(struct sctp_ulpq *ulpq,
+				 struct sctp_chunk *chunk, gfp_t gfp);
+	void	(*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+	void	(*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+	/* (I-)FORWARD-TSN process */
+	void	(*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
+	bool	(*validate_ftsn)(struct sctp_chunk *chunk);
+	void	(*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
+	void	(*handle_ftsn)(struct sctp_ulpq *ulpq,
+			       struct sctp_chunk *chunk);
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 9a5ccf03a59b..02369e379d35 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -89,6 +89,7 @@ struct sctp_stream;
 #include <net/sctp/tsnmap.h>
 #include <net/sctp/ulpevent.h>
 #include <net/sctp/ulpqueue.h>
+#include <net/sctp/stream_interleave.h>
 
 /* Structures useful for managing bind/connect. */
 
@@ -217,6 +218,7 @@ struct sctp_sock {
 		disable_fragments:1,
 		v4mapped:1,
 		frag_interleave:1,
+		strm_interleave:1,
 		recvrcvinfo:1,
 		recvnxtinfo:1,
 		data_ready_signalled:1;
@@ -397,6 +399,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
 #define sctp_ssn_skip(stream, type, sid, ssn) \
 	((stream)->type[sid].ssn = ssn + 1)
 
+/* What is the current MID number for this stream? */
+#define sctp_mid_peek(stream, type, sid) \
+	((stream)->type[sid].mid)
+
+/* Return the next MID number for this stream.  */
+#define sctp_mid_next(stream, type, sid) \
+	((stream)->type[sid].mid++)
+
+/* Skip over this mid and all below. */
+#define sctp_mid_skip(stream, type, sid, mid) \
+	((stream)->type[sid].mid = mid + 1)
+
+#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+
+/* What is the current MID_uo number for this stream? */
+#define sctp_mid_uo_peek(stream, type, sid) \
+	((stream)->type[sid].mid_uo)
+
+/* Return the next MID_uo number for this stream.  */
+#define sctp_mid_uo_next(stream, type, sid) \
+	((stream)->type[sid].mid_uo++)
+
 /*
  * Pointers to address related SCTP functions.
  * (i.e. things that depend on the address family.)
@@ -574,6 +598,8 @@ struct sctp_chunk {
 		struct sctp_addiphdr *addip_hdr;
 		struct sctp_fwdtsn_hdr *fwdtsn_hdr;
 		struct sctp_authhdr *auth_hdr;
+		struct sctp_idatahdr *idata_hdr;
+		struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
 	} subh;
 
 	__u8 *chunk_end;
@@ -620,6 +646,7 @@ struct sctp_chunk {
 	__u16	rtt_in_progress:1,	/* This chunk used for RTT calc? */
 		has_tsn:1,		/* Does this chunk have a TSN yet? */
 		has_ssn:1,		/* Does this chunk have a SSN yet? */
+#define has_mid has_ssn
 		singleton:1,		/* Only chunk in the packet? */
 		end_of_packet:1,	/* Last chunk in the packet? */
 		ecn_ce_done:1,		/* Have we processed the ECN CE bit? */
@@ -1073,6 +1100,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
 void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
 void sctp_prsctp_prune(struct sctp_association *asoc,
 		       struct sctp_sndrcvinfo *sinfo, int msg_len);
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
 /* Uncork and flush an outqueue.  */
 static inline void sctp_outq_cork(struct sctp_outq *q)
 {
@@ -1357,13 +1385,25 @@ struct sctp_stream_out_ext {
 };
 
 struct sctp_stream_out {
-	__u16	ssn;
-	__u8	state;
+	union {
+		__u32 mid;
+		__u16 ssn;
+	};
+	__u32 mid_uo;
 	struct sctp_stream_out_ext *ext;
+	__u8 state;
 };
 
 struct sctp_stream_in {
-	__u16	ssn;
+	union {
+		__u32 mid;
+		__u16 ssn;
+	};
+	__u32 mid_uo;
+	__u32 fsn;
+	__u32 fsn_uo;
+	char pd_mode;
+	char pd_mode_uo;
 };
 
 struct sctp_stream {
@@ -1387,11 +1427,32 @@ struct sctp_stream {
 			struct sctp_stream_out_ext *rr_next;
 		};
 	};
+	struct sctp_stream_interleave *si;
 };
 
 #define SCTP_STREAM_CLOSED		0x00
 #define SCTP_STREAM_OPEN		0x01
 
+static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
+{
+	return stream->si->data_chunk_len;
+}
+
+static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
+{
+	return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
+{
+	return stream->si->ftsn_chunk_len;
+}
+
+static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
+{
+	return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
 /* SCTP_GET_ASSOC_STATS counters */
 struct sctp_priv_assoc_stats {
 	/* Maximum observed rto in the association during subsequent
@@ -1940,6 +2001,7 @@ struct sctp_association {
 	__u8 need_ecne:1,	/* Need to send an ECNE Chunk? */
 	     temp:1,		/* Is it a temporary association? */
 	     force_delay:1,
+	     intl_enable:1,
 	     prsctp_enable:1,
 	     reconf_enable:1;
 
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 231dc42f1da6..51b4e0626c34 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -45,19 +45,29 @@
 /* A structure to carry information to the ULP (e.g. Sockets API) */
 /* Warning: This sits inside an skb.cb[] area.  Be very careful of
  * growing this structure as it is at the maximum limit now.
+ *
+ * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
+ * have been taken by sock_skb_cb, So here it has to use 'packed'
+ * to make sctp_ulpevent fit into the rest 44 bytes.
  */
 struct sctp_ulpevent {
 	struct sctp_association *asoc;
 	struct sctp_chunk *chunk;
 	unsigned int rmem_len;
-	__u32 ppid;
+	union {
+		__u32 mid;
+		__u16 ssn;
+	};
+	union {
+		__u32 ppid;
+		__u32 fsn;
+	};
 	__u32 tsn;
 	__u32 cumtsn;
 	__u16 stream;
-	__u16 ssn;
 	__u16 flags;
 	__u16 msg_flags;
-};
+} __packed;
 
 /* Retrieve the skb this event sits inside of. */
 static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
 
 struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
 	const struct sctp_association *asoc,
-	__u32 indication, gfp_t gfp);
+	__u32 indication, __u32 sid, __u32 seq,
+	__u32 flags, gfp_t gfp);
 
 struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
 	const struct sctp_association *asoc, gfp_t gfp);
@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
 	const struct sctp_association *asoc, __u16 flags,
 	__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
 
+struct sctp_ulpevent *sctp_make_reassembled_event(
+	struct net *net, struct sk_buff_head *queue,
+	struct sk_buff *f_frag, struct sk_buff *l_frag);
+
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
 				   struct msghdr *);
 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index e0dce07b8794..bb0ecba3db2b 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -45,6 +45,7 @@ struct sctp_ulpq {
 	char pd_mode;
 	struct sctp_association *asoc;
 	struct sk_buff_head reasm;
+	struct sk_buff_head reasm_uo;
 	struct sk_buff_head lobby;
 };
 
@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
 
 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
-#endif /* __sctp_ulpqueue_h__ */
-
-
-
-
-
 
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
+			    struct sk_buff_head *list, __u16 needed);
 
+#endif /* __sctp_ulpqueue_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index c47c635c9d19..63731289186a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -72,6 +72,7 @@
 #include <net/tcp_states.h>
 #include <linux/net_tstamp.h>
 #include <net/smc.h>
+#include <net/l3mdev.h>
 
 /*
  * This structure really needs to be cleaned up.
@@ -1262,6 +1263,7 @@ proto_memory_pressure(struct proto *prot)
 /* Called with local bh disabled */
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
 int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
 		int inc)
@@ -2335,31 +2337,6 @@ static inline bool sk_listener(const struct sock *sk)
 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 }
 
-/**
- * sk_state_load - read sk->sk_state for lockless contexts
- * @sk: socket pointer
- *
- * Paired with sk_state_store(). Used in places we do not hold socket lock :
- * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
- */
-static inline int sk_state_load(const struct sock *sk)
-{
-	return smp_load_acquire(&sk->sk_state);
-}
-
-/**
- * sk_state_store - update sk->sk_state
- * @sk: socket pointer
- * @newstate: new state
- *
- * Paired with sk_state_load(). Should be used in contexts where
- * state change might impact lockless readers.
- */
-static inline void sk_state_store(struct sock *sk, int newstate)
-{
-	smp_store_release(&sk->sk_state, newstate);
-}
-
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
@@ -2410,4 +2387,34 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
 	return *proto->sysctl_rmem;
 }
 
+/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+ * Some wifi drivers need to tweak it to get more chunks.
+ * They can use this helper from their ndo_start_xmit()
+ */
+static inline void sk_pacing_shift_update(struct sock *sk, int val)
+{
+	if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+		return;
+	sk->sk_pacing_shift = val;
+}
+
+/* if a socket is bound to a device, check that the given device
+ * index is either the same or that the socket is bound to an L3
+ * master device and the given device index is also enslaved to
+ * that L3 master
+ */
+static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
+{
+	int mdif;
+
+	if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+		return true;
+
+	mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
+	if (mdif && mdif == sk->sk_bound_dev_if)
+		return true;
+
+	return false;
+}
+
 #endif	/* _SOCK_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 781f3433a0be..9470fd7e4350 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -6,10 +6,16 @@
 #include <net/act_api.h>
 #include <linux/tc_act/tc_csum.h>
 
+struct tcf_csum_params {
+	int action;
+	u32 update_flags;
+	struct rcu_head rcu;
+};
+
 struct tcf_csum {
 	struct tc_action common;
 
-	u32 update_flags;
+	struct tcf_csum_params __rcu *params;
 };
 #define to_tcf_csum(a) ((struct tcf_csum *)a)
 
@@ -24,7 +30,13 @@ static inline bool is_tcf_csum(const struct tc_action *a)
 
 static inline u32 tcf_csum_update_flags(const struct tc_action *a)
 {
-	return to_tcf_csum(a)->update_flags;
+	u32 update_flags;
+
+	rcu_read_lock();
+	update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags;
+	rcu_read_unlock();
+
+	return update_flags;
 }
 
 #endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 21d253c9a8c6..a2e9cbca5c9e 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,10 +8,8 @@
 struct tcf_mirred {
 	struct tc_action	common;
 	int			tcfm_eaction;
-	int			tcfm_ifindex;
 	bool			tcfm_mac_header_xmit;
 	struct net_device __rcu	*tcfm_dev;
-	struct net		*net;
 	struct list_head	tcfm_list;
 };
 #define to_mirred(a) ((struct tcf_mirred *)a)
@@ -34,9 +32,9 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
 	return false;
 }
 
-static inline int tcf_mirred_ifindex(const struct tc_action *a)
+static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
 {
-	return to_mirred(a)->tcfm_ifindex;
+	return rtnl_dereference(to_mirred(a)->tcfm_dev);
 }
 
 #endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 50b21a49d870..58278669cc55 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -953,6 +953,7 @@ struct rate_sample {
 	u32  prior_in_flight;	/* in flight before this ACK */
 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
 	bool is_retrans;	/* is sample from retransmission? */
+	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
 };
 
 struct tcp_congestion_ops {
@@ -1507,8 +1508,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
 
 /* From tcp_fastopen.c */
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
-			    unsigned long *last_syn_loss);
+			    struct tcp_fastopen_cookie *cookie);
 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
 			    u16 try_exp);
@@ -1546,7 +1546,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
 void tcp_fastopen_active_disable(struct sock *sk);
 bool tcp_fastopen_active_should_disable(struct sock *sk);
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
-void tcp_fastopen_active_timeout_reset(void);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
 
 /* Latencies incurred by various limits for a sender. They are
  * chronograph-like stats that are mutually exclusive.
@@ -2006,17 +2006,21 @@ void tcp_cleanup_ulp(struct sock *sk);
  * program loaded).
  */
 #ifdef CONFIG_BPF
-static inline int tcp_call_bpf(struct sock *sk, int op)
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
 {
 	struct bpf_sock_ops_kern sock_ops;
 	int ret;
 
-	if (sk_fullsock(sk))
+	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+	if (sk_fullsock(sk)) {
+		sock_ops.is_fullsock = 1;
 		sock_owned_by_me(sk);
+	}
 
-	memset(&sock_ops, 0, sizeof(sock_ops));
 	sock_ops.sk = sk;
 	sock_ops.op = op;
+	if (nargs > 0)
+		memcpy(sock_ops.args, args, nargs * sizeof(*args));
 
 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
 	if (ret == 0)
@@ -2025,18 +2029,46 @@ static inline int tcp_call_bpf(struct sock *sk, int op)
 		ret = -1;
 	return ret;
 }
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+	u32 args[2] = {arg1, arg2};
+
+	return tcp_call_bpf(sk, op, 2, args);
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+				    u32 arg3)
+{
+	u32 args[3] = {arg1, arg2, arg3};
+
+	return tcp_call_bpf(sk, op, 3, args);
+}
+
 #else
-static inline int tcp_call_bpf(struct sock *sk, int op)
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
 {
 	return -EPERM;
 }
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+	return -EPERM;
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+				    u32 arg3)
+{
+	return -EPERM;
+}
+
 #endif
 
 static inline u32 tcp_timeout_init(struct sock *sk)
 {
 	int timeout;
 
-	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT);
+	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
 
 	if (timeout <= 0)
 		timeout = TCP_TIMEOUT_INIT;
@@ -2047,7 +2079,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
 {
 	int rwnd;
 
-	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT);
+	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
 
 	if (rwnd < 0)
 		rwnd = 0;
@@ -2056,7 +2088,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
 
 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
 {
-	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1);
+	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
 }
 
 #if IS_ENABLED(CONFIG_SMC)
diff --git a/include/net/tls.h b/include/net/tls.h
index 9185e53a743c..4913430ab807 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -36,6 +36,7 @@
 
 #include <linux/types.h>
 #include <asm/byteorder.h>
+#include <linux/crypto.h>
 #include <linux/socket.h>
 #include <linux/tcp.h>
 #include <net/tcp.h>
@@ -57,6 +58,7 @@
 
 struct tls_sw_context {
 	struct crypto_aead *aead_send;
+	struct crypto_wait async_wait;
 
 	/* Sending context */
 	char aad_space[TLS_AAD_SPACE_SIZE];
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index f96391e84a8a..ad73d8b3fcc2 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -301,7 +301,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
 		l4_hdr = ipv6_hdr(skb)->nexthdr;
 		break;
 	default:
-		return features;;
+		return features;
 	}
 
 	if ((l4_hdr == IPPROTO_UDP) &&
diff --git a/include/net/wext.h b/include/net/wext.h
index e51f067fdb3a..aa192a670304 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -7,7 +7,7 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, unsigned int cmd,
 		      void __user *arg);
 int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 			     unsigned long arg);
@@ -15,7 +15,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 struct iw_statistics *get_wireless_stats(struct net_device *dev);
 int call_commit_handler(struct net_device *dev);
 #else
-static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
+static inline int wext_handle_ioctl(struct net *net, unsigned int cmd,
 				    void __user *arg)
 {
 	return -EINVAL;
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 000000000000..b2362ddfa694
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,48 @@
+/* include/net/xdp.h
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2.  See COPYING.
+ */
+#ifndef __LINUX_NET_XDP_H__
+#define __LINUX_NET_XDP_H__
+
+/**
+ * DOC: XDP RX-queue information
+ *
+ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
+ * level RX-ring queues.  It is information that is specific to how
+ * the driver have configured a given RX-ring queue.
+ *
+ * Each xdp_buff frame received in the driver carry a (pointer)
+ * reference to this xdp_rxq_info structure.  This provides the XDP
+ * data-path read-access to RX-info for both kernel and bpf-side
+ * (limited subset).
+ *
+ * For now, direct access is only safe while running in NAPI/softirq
+ * context.  Contents is read-mostly and must not be updated during
+ * driver NAPI/softirq poll.
+ *
+ * The driver usage API is a register and unregister API.
+ *
+ * The struct is not directly tied to the XDP prog.  A new XDP prog
+ * can be attached as long as it doesn't change the underlying
+ * RX-ring.  If the RX-ring does change significantly, the NIC driver
+ * naturally need to stop the RX-ring before purging and reallocating
+ * memory.  In that process the driver MUST call unregistor (which
+ * also apply for driver shutdown and unload).  The register API is
+ * also mandatory during RX-ring setup.
+ */
+
+struct xdp_rxq_info {
+	struct net_device *dev;
+	u32 queue_index;
+	u32 reg_state;
+} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+		     struct net_device *dev, u32 queue_index);
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+
+#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae35991b5877..7d2077665c0b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -968,7 +968,7 @@ static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_c
 
 /* A struct encoding bundle of transformations to apply to some set of flow.
  *
- * dst->child points to the next element of bundle.
+ * xdst->child points to the next element of bundle.
  * dst->xfrm  points to an instanse of transformer.
  *
  * Due to unfortunate limitations of current routing cache, which we
@@ -984,6 +984,8 @@ struct xfrm_dst {
 		struct rt6_info		rt6;
 	} u;
 	struct dst_entry *route;
+	struct dst_entry *child;
+	struct dst_entry *path;
 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
 	int num_pols, num_xfrms;
 	u32 xfrm_genid;
@@ -994,7 +996,35 @@ struct xfrm_dst {
 	u32 path_cookie;
 };
 
+static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+	if (dst->xfrm) {
+		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
+
+		return xdst->path;
+	}
+#endif
+	return (struct dst_entry *) dst;
+}
+
+static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+	if (dst->xfrm) {
+		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+		return xdst->child;
+	}
+#endif
+	return NULL;
+}
+
 #ifdef CONFIG_XFRM
+static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
+{
+	xdst->child = child;
+}
+
 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
 {
 	xfrm_pols_put(xdst->pols, xdst->num_pols);
@@ -1021,6 +1051,7 @@ struct xfrm_offload {
 #define	XFRM_GSO_SEGMENT	16
 #define	XFRM_GRO		32
 #define	XFRM_ESP_NO_TRAILER	64
+#define	XFRM_DEV_RESUME		128
 
 	__u32			status;
 #define CRYPTO_SUCCESS				1
@@ -1847,34 +1878,53 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
 {
 	return skb->sp->xvec[skb->sp->len - 1];
 }
+#endif
+
 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
 	struct sec_path *sp = skb->sp;
 
 	if (!sp || !sp->olen || sp->len != sp->olen)
 		return NULL;
 
 	return &sp->ovec[sp->olen - 1];
-}
+#else
+	return NULL;
 #endif
+}
 
 void __net_init xfrm_dev_init(void);
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 		       struct xfrm_user_offload *xuo);
 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
 
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+	struct xfrm_state_offload *xso = &x->xso;
+
+	if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+		xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+}
+
 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
 {
 	struct xfrm_state *x = dst->xfrm;
+	struct xfrm_dst *xdst;
 
 	if (!x || !x->type_offload)
 		return false;
 
-	if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) &&
-	    !dst->child->xfrm)
+	xdst = (struct xfrm_dst *) dst;
+	if (!x->xso.offload_handle && !xdst->child->xfrm)
+		return true;
+	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
+	    !xdst->child->xfrm)
 		return true;
 
 	return false;
@@ -1894,15 +1944,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
 	 struct net_device *dev = xso->dev;
 
 	if (dev && dev->xfrmdev_ops) {
-		dev->xfrmdev_ops->xdo_dev_state_free(x);
+		if (dev->xfrmdev_ops->xdo_dev_state_free)
+			dev->xfrmdev_ops->xdo_dev_state_free(x);
 		xso->dev = NULL;
 		dev_put(dev);
 	}
 }
 #else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
 {
-	return 0;
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+	return skb;
 }
 
 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
@@ -1923,6 +1982,10 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x
 	return false;
 }
 
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+}
+
 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
 {
 	return false;
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 1bee3e7fdf32..8ea966448b58 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -82,8 +82,8 @@ TRACE_EVENT(fdb_delete,
 	TP_fast_assign(
 		__assign_str(br_dev, br->dev->name);
 		__assign_str(dev, f->dst ? f->dst->dev->name : "null");
-		memcpy(__entry->addr, f->addr.addr, ETH_ALEN);
-		__entry->vid = f->vlan_id;
+		memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN);
+		__entry->vid = f->key.vlan_id;
 	),
 
 	TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
new file mode 100644
index 000000000000..3930119cab08
--- /dev/null
+++ b/include/trace/events/net_probe_common.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_PROBE_COMMON_H
+
+#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk)			\
+	do {								\
+		struct sockaddr_in *v4 = (void *)__entry->saddr;	\
+									\
+		v4->sin_family = AF_INET;				\
+		v4->sin_port = inet->inet_sport;			\
+		v4->sin_addr.s_addr = inet->inet_saddr;			\
+		v4 = (void *)__entry->daddr;				\
+		v4->sin_family = AF_INET;				\
+		v4->sin_port = inet->inet_dport;			\
+		v4->sin_addr.s_addr = inet->inet_daddr;			\
+	} while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk)				\
+	do {								\
+		if (sk->sk_family == AF_INET6) {			\
+			struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+									\
+			v6->sin6_family = AF_INET6;			\
+			v6->sin6_port = inet->inet_sport;		\
+			v6->sin6_addr = inet6_sk(sk)->saddr;		\
+			v6 = (void *)__entry->daddr;			\
+			v6->sin6_family = AF_INET6;			\
+			v6->sin6_port = inet->inet_dport;		\
+			v6->sin6_addr = sk->sk_v6_daddr;		\
+		} else							\
+			TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);	\
+	} while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk)		\
+	TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
+
+#endif
+
+#endif
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
new file mode 100644
index 000000000000..7475c7be165a
--- /dev/null
+++ b/include/trace/events/sctp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sctp
+
+#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCTP_H
+
+#include <net/sctp/structs.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sctp_probe_path,
+
+	TP_PROTO(struct sctp_transport *sp,
+		 const struct sctp_association *asoc),
+
+	TP_ARGS(sp, asoc),
+
+	TP_STRUCT__entry(
+		__field(__u64, asoc)
+		__field(__u32, primary)
+		__array(__u8, ipaddr, sizeof(union sctp_addr))
+		__field(__u32, state)
+		__field(__u32, cwnd)
+		__field(__u32, ssthresh)
+		__field(__u32, flight_size)
+		__field(__u32, partial_bytes_acked)
+		__field(__u32, pathmtu)
+	),
+
+	TP_fast_assign(
+		__entry->asoc = (unsigned long)asoc;
+		__entry->primary = (sp == asoc->peer.primary_path);
+		memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr));
+		__entry->state = sp->state;
+		__entry->cwnd = sp->cwnd;
+		__entry->ssthresh = sp->ssthresh;
+		__entry->flight_size = sp->flight_size;
+		__entry->partial_bytes_acked = sp->partial_bytes_acked;
+		__entry->pathmtu = sp->pathmtu;
+	),
+
+	TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u "
+		  "flight_size=%u partial_bytes_acked=%u pathmtu=%u",
+		  __entry->asoc, __entry->primary ? "(*)" : "",
+		  __entry->ipaddr, __entry->state, __entry->cwnd,
+		  __entry->ssthresh, __entry->flight_size,
+		  __entry->partial_bytes_acked, __entry->pathmtu)
+);
+
+TRACE_EVENT(sctp_probe,
+
+	TP_PROTO(const struct sctp_endpoint *ep,
+		 const struct sctp_association *asoc,
+		 struct sctp_chunk *chunk),
+
+	TP_ARGS(ep, asoc, chunk),
+
+	TP_STRUCT__entry(
+		__field(__u64, asoc)
+		__field(__u32, mark)
+		__field(__u16, bind_port)
+		__field(__u16, peer_port)
+		__field(__u32, pathmtu)
+		__field(__u32, rwnd)
+		__field(__u16, unack_data)
+	),
+
+	TP_fast_assign(
+		struct sk_buff *skb = chunk->skb;
+
+		__entry->asoc = (unsigned long)asoc;
+		__entry->mark = skb->mark;
+		__entry->bind_port = ep->base.bind_addr.port;
+		__entry->peer_port = asoc->peer.port;
+		__entry->pathmtu = asoc->pathmtu;
+		__entry->rwnd = asoc->peer.rwnd;
+		__entry->unack_data = asoc->unack_data;
+
+		if (trace_sctp_probe_path_enabled()) {
+			struct sctp_transport *sp;
+
+			list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+					    transports) {
+				trace_sctp_probe_path(sp, asoc);
+			}
+		}
+	),
+
+	TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+		  "rwnd=%u unack_data=%d",
+		  __entry->asoc, __entry->mark, __entry->bind_port,
+		  __entry->peer_port, __entry->pathmtu, __entry->rwnd,
+		  __entry->unack_data)
+);
+
+#endif /* _TRACE_SCTP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index ec4dade24466..3176a3931107 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -6,7 +6,58 @@
 #define _TRACE_SOCK_H
 
 #include <net/sock.h>
+#include <net/ipv6.h>
 #include <linux/tracepoint.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#define family_names			\
+		EM(AF_INET)				\
+		EMe(AF_INET6)
+
+/* The protocol traced by inet_sock_set_state */
+#define inet_protocol_names		\
+		EM(IPPROTO_TCP)			\
+		EM(IPPROTO_DCCP)		\
+		EMe(IPPROTO_SCTP)
+
+#define tcp_state_names			\
+		EM(TCP_ESTABLISHED)		\
+		EM(TCP_SYN_SENT)		\
+		EM(TCP_SYN_RECV)		\
+		EM(TCP_FIN_WAIT1)		\
+		EM(TCP_FIN_WAIT2)		\
+		EM(TCP_TIME_WAIT)		\
+		EM(TCP_CLOSE)			\
+		EM(TCP_CLOSE_WAIT)		\
+		EM(TCP_LAST_ACK)		\
+		EM(TCP_LISTEN)			\
+		EM(TCP_CLOSING)			\
+		EMe(TCP_NEW_SYN_RECV)
+
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a)       TRACE_DEFINE_ENUM(a);
+#define EMe(a)      TRACE_DEFINE_ENUM(a);
+
+family_names
+inet_protocol_names
+tcp_state_names
+
+#undef EM
+#undef EMe
+#define EM(a)       { a, #a },
+#define EMe(a)      { a, #a }
+
+#define show_family_name(val)			\
+	__print_symbolic(val, family_names)
+
+#define show_inet_protocol_name(val)    \
+	__print_symbolic(val, inet_protocol_names)
+
+#define show_tcp_state_name(val)        \
+	__print_symbolic(val, tcp_state_names)
 
 TRACE_EVENT(sock_rcvqueue_full,
 
@@ -63,6 +114,72 @@ TRACE_EVENT(sock_exceed_buf_limit,
 		__entry->rmem_alloc)
 );
 
+TRACE_EVENT(inet_sock_set_state,
+
+	TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+	TP_ARGS(sk, oldstate, newstate),
+
+	TP_STRUCT__entry(
+		__field(const void *, skaddr)
+		__field(int, oldstate)
+		__field(int, newstate)
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__field(__u16, family)
+		__field(__u8, protocol)
+		__array(__u8, saddr, 4)
+		__array(__u8, daddr, 4)
+		__array(__u8, saddr_v6, 16)
+		__array(__u8, daddr_v6, 16)
+	),
+
+	TP_fast_assign(
+		struct inet_sock *inet = inet_sk(sk);
+		struct in6_addr *pin6;
+		__be32 *p32;
+
+		__entry->skaddr = sk;
+		__entry->oldstate = oldstate;
+		__entry->newstate = newstate;
+
+		__entry->family = sk->sk_family;
+		__entry->protocol = sk->sk_protocol;
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+
+		p32 = (__be32 *) __entry->saddr;
+		*p32 = inet->inet_saddr;
+
+		p32 = (__be32 *) __entry->daddr;
+		*p32 =  inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+		if (sk->sk_family == AF_INET6) {
+			pin6 = (struct in6_addr *)__entry->saddr_v6;
+			*pin6 = sk->sk_v6_rcv_saddr;
+			pin6 = (struct in6_addr *)__entry->daddr_v6;
+			*pin6 = sk->sk_v6_daddr;
+		} else
+#endif
+		{
+			pin6 = (struct in6_addr *)__entry->saddr_v6;
+			ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+			pin6 = (struct in6_addr *)__entry->daddr_v6;
+			ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+		}
+	),
+
+	TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+			show_family_name(__entry->family),
+			show_inet_protocol_name(__entry->protocol),
+			__entry->sport, __entry->dport,
+			__entry->saddr, __entry->daddr,
+			__entry->saddr_v6, __entry->daddr_v6,
+			show_tcp_state_name(__entry->oldstate),
+			show_tcp_state_name(__entry->newstate))
+);
+
 #endif /* _TRACE_SOCK_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ab34c561f26b..878b2be7ce77 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM tcp
 
@@ -8,22 +9,7 @@
 #include <linux/tcp.h>
 #include <linux/tracepoint.h>
 #include <net/ipv6.h>
-
-#define tcp_state_name(state)	{ state, #state }
-#define show_tcp_state_name(val)			\
-	__print_symbolic(val,				\
-		tcp_state_name(TCP_ESTABLISHED),	\
-		tcp_state_name(TCP_SYN_SENT),		\
-		tcp_state_name(TCP_SYN_RECV),		\
-		tcp_state_name(TCP_FIN_WAIT1),		\
-		tcp_state_name(TCP_FIN_WAIT2),		\
-		tcp_state_name(TCP_TIME_WAIT),		\
-		tcp_state_name(TCP_CLOSE),		\
-		tcp_state_name(TCP_CLOSE_WAIT),		\
-		tcp_state_name(TCP_LAST_ACK),		\
-		tcp_state_name(TCP_LISTEN),		\
-		tcp_state_name(TCP_CLOSING),		\
-		tcp_state_name(TCP_NEW_SYN_RECV))
+#include <net/tcp.h>
 
 #define TP_STORE_V4MAPPED(__entry, saddr, daddr)		\
 	do {							\
@@ -270,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack,
 		  __entry->saddr_v6, __entry->daddr_v6)
 );
 
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(tcp_probe,
+
+	TP_PROTO(struct sock *sk, struct sk_buff *skb),
+
+	TP_ARGS(sk, skb),
+
+	TP_STRUCT__entry(
+		/* sockaddr_in6 is always bigger than sockaddr_in */
+		__array(__u8, saddr, sizeof(struct sockaddr_in6))
+		__array(__u8, daddr, sizeof(struct sockaddr_in6))
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__field(__u32, mark)
+		__field(__u16, length)
+		__field(__u32, snd_nxt)
+		__field(__u32, snd_una)
+		__field(__u32, snd_cwnd)
+		__field(__u32, ssthresh)
+		__field(__u32, snd_wnd)
+		__field(__u32, srtt)
+		__field(__u32, rcv_wnd)
+	),
+
+	TP_fast_assign(
+		const struct tcp_sock *tp = tcp_sk(sk);
+		const struct inet_sock *inet = inet_sk(sk);
+
+		memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+		memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+		TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+		/* For filtering use */
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+		__entry->mark = skb->mark;
+
+		__entry->length = skb->len;
+		__entry->snd_nxt = tp->snd_nxt;
+		__entry->snd_una = tp->snd_una;
+		__entry->snd_cwnd = tp->snd_cwnd;
+		__entry->snd_wnd = tp->snd_wnd;
+		__entry->rcv_wnd = tp->rcv_wnd;
+		__entry->ssthresh = tcp_current_ssthresh(sk);
+		__entry->srtt = tp->srtt_us >> 3;
+	),
+
+	TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
+		  "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
+		  "rcv_wnd=%u",
+		  __entry->saddr, __entry->daddr, __entry->mark,
+		  __entry->length, __entry->snd_nxt, __entry->snd_una,
+		  __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
+		  __entry->srtt, __entry->rcv_wnd)
+);
+
 #endif /* _TRACE_TCP_H */
 
 /* This part must be outside protection */
diff --git a/net/batman-adv/packet.h b/include/uapi/linux/batadv_packet.h
index 8e8a5db197cb..5cb360be2a11 100644
--- a/net/batman-adv/packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -15,13 +16,20 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
+#ifndef _UAPI_LINUX_BATADV_PACKET_H_
+#define _UAPI_LINUX_BATADV_PACKET_H_
 
 #include <asm/byteorder.h>
+#include <linux/if_ether.h>
 #include <linux/types.h>
 
-#define batadv_tp_is_error(n) ((u8)(n) > 127 ? 1 : 0)
+/**
+ * batadv_tp_is_error() - Check throughput meter return code for error
+ * @n: throughput meter return code
+ *
+ * Return: 0 when not error was detected, != 0 otherwise
+ */
+#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
 
 /**
  * enum batadv_packettype - types for batman-adv encapsulated packets
@@ -83,12 +91,20 @@ enum batadv_subtype {
  *     one hop neighbor on the interface where it was originally received.
  */
 enum batadv_iv_flags {
-	BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
-	BATADV_PRIMARIES_FIRST_HOP = BIT(1),
-	BATADV_DIRECTLINK          = BIT(2),
+	BATADV_NOT_BEST_NEXT_HOP   = 1UL << 0,
+	BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
+	BATADV_DIRECTLINK          = 1UL << 2,
 };
 
-/* ICMP message types */
+/**
+ * enum batadv_icmp_packettype - ICMP message types
+ * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
+ * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
+ * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
+ * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
+ * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
+ * @BATADV_TP: throughput meter packet
+ */
 enum batadv_icmp_packettype {
 	BATADV_ECHO_REPLY	       = 0,
 	BATADV_DESTINATION_UNREACHABLE = 3,
@@ -106,9 +122,9 @@ enum batadv_icmp_packettype {
  * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
  */
 enum batadv_mcast_flags {
-	BATADV_MCAST_WANT_ALL_UNSNOOPABLES	= BIT(0),
-	BATADV_MCAST_WANT_ALL_IPV4		= BIT(1),
-	BATADV_MCAST_WANT_ALL_IPV6		= BIT(2),
+	BATADV_MCAST_WANT_ALL_UNSNOOPABLES	= 1UL << 0,
+	BATADV_MCAST_WANT_ALL_IPV4		= 1UL << 1,
+	BATADV_MCAST_WANT_ALL_IPV6		= 1UL << 2,
 };
 
 /* tt data subtypes */
@@ -122,10 +138,10 @@ enum batadv_mcast_flags {
  * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
  */
 enum batadv_tt_data_flags {
-	BATADV_TT_OGM_DIFF   = BIT(0),
-	BATADV_TT_REQUEST    = BIT(1),
-	BATADV_TT_RESPONSE   = BIT(2),
-	BATADV_TT_FULL_TABLE = BIT(4),
+	BATADV_TT_OGM_DIFF   = 1UL << 0,
+	BATADV_TT_REQUEST    = 1UL << 1,
+	BATADV_TT_RESPONSE   = 1UL << 2,
+	BATADV_TT_FULL_TABLE = 1UL << 4,
 };
 
 /**
@@ -133,10 +149,17 @@ enum batadv_tt_data_flags {
  * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
  */
 enum batadv_vlan_flags {
-	BATADV_VLAN_HAS_TAG	= BIT(15),
+	BATADV_VLAN_HAS_TAG	= 1UL << 15,
 };
 
-/* claim frame types for the bridge loop avoidance */
+/**
+ * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
+ * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
+ * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
+ * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
+ * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
+ * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
+ */
 enum batadv_bla_claimframe {
 	BATADV_CLAIM_TYPE_CLAIM		= 0x00,
 	BATADV_CLAIM_TYPE_UNCLAIM	= 0x01,
@@ -168,8 +191,8 @@ enum batadv_tvlv_type {
  * transport the claim type and the group id
  */
 struct batadv_bla_claim_dst {
-	u8     magic[3];	/* FF:43:05 */
-	u8     type;		/* bla_claimframe */
+	__u8   magic[3];	/* FF:43:05 */
+	__u8   type;		/* bla_claimframe */
 	__be16 group;		/* group id */
 };
 
@@ -189,15 +212,15 @@ struct batadv_bla_claim_dst {
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     flags;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   flags;
 	__be32 seqno;
-	u8     orig[ETH_ALEN];
-	u8     prev_sender[ETH_ALEN];
-	u8     reserved;
-	u8     tq;
+	__u8   orig[ETH_ALEN];
+	__u8   prev_sender[ETH_ALEN];
+	__u8   reserved;
+	__u8   tq;
 	__be16 tvlv_len;
 	/* __packed is not needed as the struct size is divisible by 4,
 	 * and the largest data type in this struct has a size of 4.
@@ -218,12 +241,12 @@ struct batadv_ogm_packet {
  * @throughput: the currently flooded path throughput
  */
 struct batadv_ogm2_packet {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     flags;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   flags;
 	__be32 seqno;
-	u8     orig[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
 	__be16 tvlv_len;
 	__be32 throughput;
 	/* __packed is not needed as the struct size is divisible by 4,
@@ -242,9 +265,9 @@ struct batadv_ogm2_packet {
  * @elp_interval: currently used ELP sending interval in ms
  */
 struct batadv_elp_packet {
-	u8     packet_type;
-	u8     version;
-	u8     orig[ETH_ALEN];
+	__u8   packet_type;
+	__u8   version;
+	__u8   orig[ETH_ALEN];
 	__be32 seqno;
 	__be32 elp_interval;
 };
@@ -267,14 +290,14 @@ struct batadv_elp_packet {
  * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-	u8 packet_type;
-	u8 version;
-	u8 ttl;
-	u8 msg_type; /* see ICMP message types above */
-	u8 dst[ETH_ALEN];
-	u8 orig[ETH_ALEN];
-	u8 uid;
-	u8 align[3];
+	__u8 packet_type;
+	__u8 version;
+	__u8 ttl;
+	__u8 msg_type; /* see ICMP message types above */
+	__u8 dst[ETH_ALEN];
+	__u8 orig[ETH_ALEN];
+	__u8 uid;
+	__u8 align[3];
 };
 
 /**
@@ -290,14 +313,14 @@ struct batadv_icmp_header {
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     msg_type; /* see ICMP message types above */
-	u8     dst[ETH_ALEN];
-	u8     orig[ETH_ALEN];
-	u8     uid;
-	u8     reserved;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   msg_type; /* see ICMP message types above */
+	__u8   dst[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
+	__u8   uid;
+	__u8   reserved;
 	__be16 seqno;
 };
 
@@ -319,15 +342,15 @@ struct batadv_icmp_packet {
  *  store it using network order
  */
 struct batadv_icmp_tp_packet {
-	u8  packet_type;
-	u8  version;
-	u8  ttl;
-	u8  msg_type; /* see ICMP message types above */
-	u8  dst[ETH_ALEN];
-	u8  orig[ETH_ALEN];
-	u8  uid;
-	u8  subtype;
-	u8  session[2];
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   msg_type; /* see ICMP message types above */
+	__u8   dst[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
+	__u8   uid;
+	__u8   subtype;
+	__u8   session[2];
 	__be32 seqno;
 	__be32 timestamp;
 };
@@ -358,16 +381,16 @@ enum batadv_icmp_tp_subtype {
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     msg_type; /* see ICMP message types above */
-	u8     dst[ETH_ALEN];
-	u8     orig[ETH_ALEN];
-	u8     uid;
-	u8     rr_cur;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   msg_type; /* see ICMP message types above */
+	__u8   dst[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
+	__u8   uid;
+	__u8   rr_cur;
 	__be16 seqno;
-	u8     rr[BATADV_RR_LEN][ETH_ALEN];
+	__u8   rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
 #define BATADV_ICMP_MAX_PACKET_SIZE	sizeof(struct batadv_icmp_packet_rr)
@@ -393,11 +416,11 @@ struct batadv_icmp_packet_rr {
  * @dest: originator destination of the unicast packet
  */
 struct batadv_unicast_packet {
-	u8 packet_type;
-	u8 version;
-	u8 ttl;
-	u8 ttvn; /* destination translation table version number */
-	u8 dest[ETH_ALEN];
+	__u8 packet_type;
+	__u8 version;
+	__u8 ttl;
+	__u8 ttvn; /* destination translation table version number */
+	__u8 dest[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -412,9 +435,9 @@ struct batadv_unicast_packet {
  */
 struct batadv_unicast_4addr_packet {
 	struct batadv_unicast_packet u;
-	u8 src[ETH_ALEN];
-	u8 subtype;
-	u8 reserved;
+	__u8 src[ETH_ALEN];
+	__u8 subtype;
+	__u8 reserved;
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -434,22 +457,22 @@ struct batadv_unicast_4addr_packet {
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
-	u8     no:4;
-	u8     priority:3;
-	u8     reserved:1;
+	__u8   no:4;
+	__u8   priority:3;
+	__u8   reserved:1;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-	u8     reserved:1;
-	u8     priority:3;
-	u8     no:4;
+	__u8   reserved:1;
+	__u8   priority:3;
+	__u8   no:4;
 #else
 #error "unknown bitfield endianness"
 #endif
-	u8     dest[ETH_ALEN];
-	u8     orig[ETH_ALEN];
+	__u8   dest[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
 	__be16 seqno;
 	__be16 total_size;
 };
@@ -464,12 +487,12 @@ struct batadv_frag_packet {
  * @orig: originator of the broadcast packet
  */
 struct batadv_bcast_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
-	u8     reserved;
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
+	__u8   reserved;
 	__be32 seqno;
-	u8     orig[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -493,19 +516,19 @@ struct batadv_bcast_packet {
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
-	u8     first_ttvn;
-	/* u8  first_dest[ETH_ALEN]; - saved in mac header destination */
-	u8     first_source[ETH_ALEN];
-	u8     first_orig_dest[ETH_ALEN];
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
+	__u8   first_ttvn;
+	/* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+	__u8   first_source[ETH_ALEN];
+	__u8   first_orig_dest[ETH_ALEN];
 	__be32 first_crc;
-	u8     second_ttl;
-	u8     second_ttvn;
-	u8     second_dest[ETH_ALEN];
-	u8     second_source[ETH_ALEN];
-	u8     second_orig_dest[ETH_ALEN];
+	__u8   second_ttl;
+	__u8   second_ttvn;
+	__u8   second_dest[ETH_ALEN];
+	__u8   second_source[ETH_ALEN];
+	__u8   second_orig_dest[ETH_ALEN];
 	__be32 second_crc;
 	__be16 coded_len;
 };
@@ -524,14 +547,14 @@ struct batadv_coded_packet {
  * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
-	u8     reserved;
-	u8     dst[ETH_ALEN];
-	u8     src[ETH_ALEN];
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
+	__u8   reserved;
+	__u8   dst[ETH_ALEN];
+	__u8   src[ETH_ALEN];
 	__be16 tvlv_len;
-	u16    align;
+	__u16  align;
 };
 
 /**
@@ -541,8 +564,8 @@ struct batadv_unicast_tvlv_packet {
  * @len: tvlv container length
  */
 struct batadv_tvlv_hdr {
-	u8     type;
-	u8     version;
+	__u8   type;
+	__u8   version;
 	__be16 len;
 };
 
@@ -565,8 +588,8 @@ struct batadv_tvlv_gateway_data {
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
-	u8     flags;
-	u8     ttvn;
+	__u8   flags;
+	__u8   ttvn;
 	__be16 num_vlan;
 };
 
@@ -580,7 +603,7 @@ struct batadv_tvlv_tt_data {
 struct batadv_tvlv_tt_vlan_data {
 	__be32 crc;
 	__be16 vid;
-	u16    reserved;
+	__u16  reserved;
 };
 
 /**
@@ -592,9 +615,9 @@ struct batadv_tvlv_tt_vlan_data {
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
-	u8     flags;
-	u8     reserved[3];
-	u8     addr[ETH_ALEN];
+	__u8   flags;
+	__u8   reserved[3];
+	__u8   addr[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -604,7 +627,7 @@ struct batadv_tvlv_tt_change {
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_roam_adv {
-	u8     client[ETH_ALEN];
+	__u8   client[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -614,8 +637,8 @@ struct batadv_tvlv_roam_adv {
  * @reserved: reserved field
  */
 struct batadv_tvlv_mcast_data {
-	u8 flags;
-	u8 reserved[3];
+	__u8 flags;
+	__u8 reserved[3];
 };
 
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
+#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index efd641c8a5d6..ae00c99cbed0 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,18 +1,25 @@
+/* SPDX-License-Identifier: MIT */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #ifndef _UAPI_LINUX_BATMAN_ADV_H_
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
 #define BPF_ALU64	0x07	/* alu mode in double word width */
 
 /* ld/ldx fields */
-#define BPF_DW		0x18	/* double word */
+#define BPF_DW		0x18	/* double word (64-bit) */
 #define BPF_XADD	0xc0	/* exclusive add */
 
 /* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
  */
 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
 
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
 #define BPF_PSEUDO_MAP_FD	1
 
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL		1
+
 /* flags for BPF_MAP_UPDATE_ELEM command */
 #define BPF_ANY		0 /* create new element or update existing */
 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
 					 * BPF_F_NUMA_NODE is set).
 					 */
 		char	map_name[BPF_OBJ_NAME_LEN];
+		__u32	map_ifindex;	/* ifindex of netdev to create on */
 	};
 
 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
  *     @optlen: length of optval in bytes
  *     Return: 0 or negative error
  *
+ * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
+ *     Set callback flags for sock_ops
+ *     @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
+ *     @flags: flags value
+ *     Return: 0 for no error
+ *             -EINVAL if there is no full tcp socket
+ *             bits in flags that are not supported by current kernel
+ *
  * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
  *     Grow or shrink room in sk_buff.
  *     @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
  *     @buf: buf to fill
  *     @buf_size: size of the buf
  *     Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ *	@pt_regs: pointer to struct pt_regs
+ *	@rc: the return value to set
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -736,7 +755,9 @@ union bpf_attr {
 	FN(xdp_adjust_meta),		\
 	FN(perf_event_read_value),	\
 	FN(perf_prog_read_value),	\
-	FN(getsockopt),
+	FN(getsockopt),			\
+	FN(override_return),		\
+	FN(sock_ops_cb_flags_set),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
 	__u32 data;
 	__u32 data_end;
 	__u32 data_meta;
+	/* Below access go through struct xdp_rxq_info */
+	__u32 ingress_ifindex; /* rxq->dev->ifindex */
+	__u32 rx_queue_index;  /* rxq->queue_index  */
 };
 
 enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
 	__u32 nr_map_ids;
 	__aligned_u64 map_ids;
 	char name[BPF_OBJ_NAME_LEN];
+	__u32 ifindex;
+	__u64 netns_dev;
+	__u64 netns_ino;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
 	__u32 max_entries;
 	__u32 map_flags;
 	char  name[BPF_OBJ_NAME_LEN];
+	__u32 ifindex;
+	__u64 netns_dev;
+	__u64 netns_ino;
 } __attribute__((aligned(8)));
 
 /* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
 struct bpf_sock_ops {
 	__u32 op;
 	union {
-		__u32 reply;
-		__u32 replylong[4];
+		__u32 args[4];		/* Optionally passed to bpf program */
+		__u32 reply;		/* Returned by bpf program	    */
+		__u32 replylong[4];	/* Optionally returned by bpf prog  */
 	};
 	__u32 family;
 	__u32 remote_ip4;	/* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
 	__u32 local_ip6[4];	/* Stored in network byte order */
 	__u32 remote_port;	/* Stored in network byte order */
 	__u32 local_port;	/* stored in host byte order */
+	__u32 is_fullsock;	/* Some TCP fields are only valid if
+				 * there is a full socket. If not, the
+				 * fields read as zero.
+				 */
+	__u32 snd_cwnd;
+	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
+	__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
+	__u32 state;
+	__u32 rtt_min;
+	__u32 snd_ssthresh;
+	__u32 rcv_nxt;
+	__u32 snd_nxt;
+	__u32 snd_una;
+	__u32 mss_cache;
+	__u32 ecn_flags;
+	__u32 rate_delivered;
+	__u32 rate_interval_us;
+	__u32 packets_out;
+	__u32 retrans_out;
+	__u32 total_retrans;
+	__u32 segs_in;
+	__u32 data_segs_in;
+	__u32 segs_out;
+	__u32 data_segs_out;
+	__u32 lost_out;
+	__u32 sacked_out;
+	__u32 sk_txhash;
+	__u64 bytes_received;
+	__u64 bytes_acked;
 };
 
+/* Definitions for bpf_sock_ops_cb_flags */
+#define BPF_SOCK_OPS_RTO_CB_FLAG	(1<<0)
+#define BPF_SOCK_OPS_RETRANS_CB_FLAG	(1<<1)
+#define BPF_SOCK_OPS_STATE_CB_FLAG	(1<<2)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS       0x7		/* Mask of all currently
+							 * supported cb flags
+							 */
+
 /* List of known BPF sock_ops operators.
  * New entries can only be added at the end
  */
@@ -976,6 +1044,43 @@ enum {
 					 * a congestion threshold. RTTs above
 					 * this indicate congestion
 					 */
+	BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered.
+					 * Arg1: value of icsk_retransmits
+					 * Arg2: value of icsk_rto
+					 * Arg3: whether RTO has expired
+					 */
+	BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted.
+					 * Arg1: sequence number of 1st byte
+					 * Arg2: # segments
+					 * Arg3: return value of
+					 *       tcp_transmit_skb (0 => success)
+					 */
+	BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state.
+					 * Arg1: old_state
+					 * Arg2: new_state
+					 */
+};
+
+/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
+ * changes between the TCP and BPF versions. Ideally this should never happen.
+ * If it does, we need to add code to convert them before calling
+ * the BPF sock_ops function.
+ */
+enum {
+	BPF_TCP_ESTABLISHED = 1,
+	BPF_TCP_SYN_SENT,
+	BPF_TCP_SYN_RECV,
+	BPF_TCP_FIN_WAIT1,
+	BPF_TCP_FIN_WAIT2,
+	BPF_TCP_TIME_WAIT,
+	BPF_TCP_CLOSE,
+	BPF_TCP_CLOSE_WAIT,
+	BPF_TCP_LAST_ACK,
+	BPF_TCP_LISTEN,
+	BPF_TCP_CLOSING,	/* Now a valid state */
+	BPF_TCP_NEW_SYN_RECV,
+
+	BPF_TCP_MAX_STATES	/* Leave at the end! */
 };
 
 #define TCP_BPF_IW		1001	/* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
 #define BPF_DEVCG_DEV_CHAR	(1ULL << 1)
 
 struct bpf_cgroup_dev_ctx {
-	__u32 access_type; /* (access << 16) | type */
+	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+	__u32 access_type;
 	__u32 major;
 	__u32 minor;
 };
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/include/uapi/linux/bpf_common.h
+++ b/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
 
 /* ld/ldx fields */
 #define BPF_SIZE(code)  ((code) & 0x18)
-#define		BPF_W		0x00
-#define		BPF_H		0x08
-#define		BPF_B		0x10
+#define		BPF_W		0x00 /* 32-bit */
+#define		BPF_H		0x08 /* 16-bit */
+#define		BPF_B		0x10 /*  8-bit */
+/* eBPF		BPF_DW		0x18    64-bit */
 #define BPF_MODE(code)  ((code) & 0xe0)
 #define		BPF_IMM		0x00
 #define		BPF_ABS		0x20
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 96710e76d5ce..9f56fad4785b 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -132,6 +132,7 @@ enum {
 	IFLA_CAN_TERMINATION_CONST,
 	IFLA_CAN_BITRATE_CONST,
 	IFLA_CAN_DATA_BITRATE_CONST,
+	IFLA_CAN_BITRATE_MAX,
 	__IFLA_CAN_MAX
 };
 
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 6665df69e26a..1df65a4c2044 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -70,6 +70,13 @@ enum devlink_command {
 	DEVLINK_CMD_DPIPE_ENTRIES_GET,
 	DEVLINK_CMD_DPIPE_HEADERS_GET,
 	DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+	DEVLINK_CMD_RESOURCE_SET,
+	DEVLINK_CMD_RESOURCE_DUMP,
+
+	/* Hot driver reload, makes configuration changes take place. The
+	 * devlink instance is not released during the process.
+	 */
+	DEVLINK_CMD_RELOAD,
 
 	/* add new commands above here */
 	__DEVLINK_CMD_MAX,
@@ -202,6 +209,20 @@ enum devlink_attr {
 	DEVLINK_ATTR_PAD,
 
 	DEVLINK_ATTR_ESWITCH_ENCAP_MODE,	/* u8 */
+	DEVLINK_ATTR_RESOURCE_LIST,		/* nested */
+	DEVLINK_ATTR_RESOURCE,			/* nested */
+	DEVLINK_ATTR_RESOURCE_NAME,		/* string */
+	DEVLINK_ATTR_RESOURCE_ID,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_NEW,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_VALID,	/* u8 */
+	DEVLINK_ATTR_RESOURCE_SIZE_MIN,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_MAX,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_GRAN,        /* u64 */
+	DEVLINK_ATTR_RESOURCE_UNIT,		/* u8 */
+	DEVLINK_ATTR_RESOURCE_OCC,		/* u64 */
+	DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,	/* u64 */
+	DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
 
 	/* add new attributes above here, update the policy in devlink.c */
 
@@ -245,4 +266,8 @@ enum devlink_dpipe_header_id {
 	DEVLINK_DPIPE_HEADER_IPV6,
 };
 
+enum devlink_resource_unit {
+	DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
 #endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/erspan.h b/include/uapi/linux/erspan.h
new file mode 100644
index 000000000000..841573019ae1
--- /dev/null
+++ b/include/uapi/linux/erspan.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ERSPAN Tunnel Metadata
+ *
+ * Copyright (c) 2018 VMware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * Userspace API for metadata mode ERSPAN tunnel
+ */
+#ifndef _UAPI_ERSPAN_H
+#define _UAPI_ERSPAN_H
+
+#include <linux/types.h>	/* For __beXX in userspace */
+#include <asm/byteorder.h>
+
+/* ERSPAN version 2 metadata header */
+struct erspan_md2 {
+	__be32 timestamp;
+	__be16 sgt;	/* security group tag */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8	hwid_upper:2,
+		ft:5,
+		p:1;
+	__u8	o:1,
+		gra:2,
+		dir:1,
+		hwid:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	__u8	p:1,
+		ft:5,
+		hwid_upper:2;
+	__u8	hwid:4,
+		dir:1,
+		gra:2,
+		o:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+struct erspan_metadata {
+	int version;
+	union {
+		__be32 index;		/* Version 1 (type II)*/
+		struct erspan_md2 md2;	/* Version 2 (type III) */
+	} u;
+};
+
+#endif /* _UAPI_ERSPAN_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index ac71559314e7..44a0b675a6bc 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1686,6 +1686,7 @@ enum ethtool_reset_flags {
 	ETH_RESET_PHY		= 1 << 6,	/* Transceiver/PHY */
 	ETH_RESET_RAM		= 1 << 7,	/* RAM shared between
 						 * multiple components */
+	ETH_RESET_AP		= 1 << 8,	/* Application processor */
 
 	ETH_RESET_DEDICATED	= 0x0000ffff,	/* All components dedicated to
 						 * this interface */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 144de4d2f385..f8cb5760ea4f 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -48,6 +48,7 @@
 #define ETH_P_PUP	0x0200		/* Xerox PUP packet		*/
 #define ETH_P_PUPAT	0x0201		/* Xerox PUP Addr Trans packet	*/
 #define ETH_P_TSN	0x22F0		/* TSN (IEEE 1722) packet	*/
+#define ETH_P_ERSPAN2	0x22EB		/* ERSPAN version 2 (type III)	*/
 #define ETH_P_IP	0x0800		/* Internet Protocol packet	*/
 #define ETH_P_X25	0x0805		/* CCITT X.25			*/
 #define ETH_P_ARP	0x0806		/* Address Resolution packet	*/
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 19fc02660e0c..6d9447700e18 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -161,6 +161,9 @@ enum {
 	IFLA_EVENT,
 	IFLA_NEW_NETNSID,
 	IFLA_IF_NETNSID,
+	IFLA_CARRIER_UP_COUNT,
+	IFLA_CARRIER_DOWN_COUNT,
+	IFLA_NEW_IFINDEX,
 	__IFLA_MAX
 };
 
@@ -732,6 +735,8 @@ enum {
 	IFLA_VF_STATS_BROADCAST,
 	IFLA_VF_STATS_MULTICAST,
 	IFLA_VF_STATS_PAD,
+	IFLA_VF_STATS_RX_DROPPED,
+	IFLA_VF_STATS_TX_DROPPED,
 	__IFLA_VF_STATS_MAX,
 };
 
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 719d243471f4..98e4d5d7c45c 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,8 +22,13 @@
 
 #define MACSEC_KEYID_LEN 16
 
-#define MACSEC_DEFAULT_CIPHER_ID   0x0080020001000001ULL
-#define MACSEC_DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
+/* cipher IDs as per IEEE802.1AEbn-2011 */
+#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
+#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
+
+/* deprecated cipher ID for GCM-AES-128 */
+#define MACSEC_DEFAULT_CIPHER_ID     0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT    MACSEC_CIPHER_ID_GCM_AES_128
 
 #define MACSEC_MIN_ICV_LEN 8
 #define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 030d3e6d6029..ee432cd3018c 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -57,6 +57,8 @@
  */
 #define TUNSETVNETBE _IOW('T', 222, int)
 #define TUNGETVNETBE _IOR('T', 223, int)
+#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
+#define TUNSETFILTEREBPF _IOR('T', 225, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index e68dadbd6d45..1b3d148c4560 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -137,6 +137,9 @@ enum {
 	IFLA_GRE_IGNORE_DF,
 	IFLA_GRE_FWMARK,
 	IFLA_GRE_ERSPAN_INDEX,
+	IFLA_GRE_ERSPAN_VER,
+	IFLA_GRE_ERSPAN_DIR,
+	IFLA_GRE_ERSPAN_HWID,
 	__IFLA_GRE_MAX,
 };
 
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 817d807e9481..14565d703291 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -92,6 +92,8 @@ enum {
 	INET_DIAG_BC_D_COND,
 	INET_DIAG_BC_DEV_COND,   /* u32 ifindex */
 	INET_DIAG_BC_MARK_COND,
+	INET_DIAG_BC_S_EQ,
+	INET_DIAG_BC_D_EQ,
 };
 
 struct inet_diag_hostcond {
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index d84ce5c1c9aa..7d570c7bd117 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 {
  * TUNNEL_MODIFY	- CONN_ID, udpcsum
  * TUNNEL_GETSTATS	- CONN_ID, (stats)
  * TUNNEL_GET		- CONN_ID, (...)
- * SESSION_CREATE	- SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec
+ * SESSION_CREATE	- SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
  * SESSION_DELETE	- SESSION_ID
  * SESSION_MODIFY	- SESSION_ID, data_seq
  * SESSION_GET		- SESSION_ID, (...)
@@ -94,10 +94,10 @@ enum {
 	L2TP_ATTR_NONE,			/* no data */
 	L2TP_ATTR_PW_TYPE,		/* u16, enum l2tp_pwtype */
 	L2TP_ATTR_ENCAP_TYPE,		/* u16, enum l2tp_encap_type */
-	L2TP_ATTR_OFFSET,		/* u16 */
+	L2TP_ATTR_OFFSET,		/* u16 (not used) */
 	L2TP_ATTR_DATA_SEQ,		/* u16 */
 	L2TP_ATTR_L2SPEC_TYPE,		/* u8, enum l2tp_l2spec_type */
-	L2TP_ATTR_L2SPEC_LEN,		/* u8, enum l2tp_l2spec_type */
+	L2TP_ATTR_L2SPEC_LEN,		/* u8 (not used) */
 	L2TP_ATTR_PROTO_VERSION,	/* u8 */
 	L2TP_ATTR_IFNAME,		/* string */
 	L2TP_ATTR_CONN_ID,		/* u32 */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 57ccfb32e87f..9574bd40870b 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -101,12 +101,16 @@ enum ip_conntrack_status {
 	IPS_HELPER_BIT = 13,
 	IPS_HELPER = (1 << IPS_HELPER_BIT),
 
+	/* Conntrack has been offloaded to flow table. */
+	IPS_OFFLOAD_BIT = 14,
+	IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
+
 	/* Be careful here, modifying these bits can make things messy,
 	 * so don't let users modify them directly.
 	 */
 	IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
 				 IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
-				 IPS_SEQ_ADJUST | IPS_TEMPLATE),
+				 IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
 
 	__IPS_MAX_BIT = 14,
 };
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a3ee277b17a1..66dceee0ae30 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -92,6 +92,9 @@ enum nft_verdicts {
  * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes)
  * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes)
  * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
  */
 enum nf_tables_msg_types {
 	NFT_MSG_NEWTABLE,
@@ -116,6 +119,9 @@ enum nf_tables_msg_types {
 	NFT_MSG_GETOBJ,
 	NFT_MSG_DELOBJ,
 	NFT_MSG_GETOBJ_RESET,
+	NFT_MSG_NEWFLOWTABLE,
+	NFT_MSG_GETFLOWTABLE,
+	NFT_MSG_DELFLOWTABLE,
 	NFT_MSG_MAX,
 };
 
@@ -168,6 +174,8 @@ enum nft_table_attributes {
 	NFTA_TABLE_NAME,
 	NFTA_TABLE_FLAGS,
 	NFTA_TABLE_USE,
+	NFTA_TABLE_HANDLE,
+	NFTA_TABLE_PAD,
 	__NFTA_TABLE_MAX
 };
 #define NFTA_TABLE_MAX		(__NFTA_TABLE_MAX - 1)
@@ -311,6 +319,7 @@ enum nft_set_desc_attributes {
  * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
  * @NFTA_SET_USERDATA: user data (NLA_BINARY)
  * @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
+ * @NFTA_SET_HANDLE: set handle (NLA_U64)
  */
 enum nft_set_attributes {
 	NFTA_SET_UNSPEC,
@@ -329,6 +338,7 @@ enum nft_set_attributes {
 	NFTA_SET_USERDATA,
 	NFTA_SET_PAD,
 	NFTA_SET_OBJ_TYPE,
+	NFTA_SET_HANDLE,
 	__NFTA_SET_MAX
 };
 #define NFTA_SET_MAX		(__NFTA_SET_MAX - 1)
@@ -777,6 +787,7 @@ enum nft_exthdr_attributes {
  * @NFT_META_OIFGROUP: packet output interface group
  * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
  * @NFT_META_PRANDOM: a 32bit pseudo-random number
+ * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
  */
 enum nft_meta_keys {
 	NFT_META_LEN,
@@ -804,6 +815,7 @@ enum nft_meta_keys {
 	NFT_META_OIFGROUP,
 	NFT_META_CGROUP,
 	NFT_META_PRANDOM,
+	NFT_META_SECPATH,
 };
 
 /**
@@ -949,6 +961,17 @@ enum nft_ct_attributes {
 };
 #define NFTA_CT_MAX		(__NFTA_CT_MAX - 1)
 
+/**
+ * enum nft_flow_attributes - ct offload expression attributes
+ * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING)
+ */
+enum nft_offload_attributes {
+	NFTA_FLOW_UNSPEC,
+	NFTA_FLOW_TABLE_NAME,
+	__NFTA_FLOW_MAX,
+};
+#define NFTA_FLOW_MAX		(__NFTA_FLOW_MAX - 1)
+
 enum nft_limit_type {
 	NFT_LIMIT_PKTS,
 	NFT_LIMIT_PKT_BYTES
@@ -1295,6 +1318,7 @@ enum nft_ct_helper_attributes {
  * @NFTA_OBJ_TYPE: stateful object type (NLA_U32)
  * @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
  * @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
+ * @NFTA_OBJ_HANDLE: object handle (NLA_U64)
  */
 enum nft_object_attributes {
 	NFTA_OBJ_UNSPEC,
@@ -1303,11 +1327,63 @@ enum nft_object_attributes {
 	NFTA_OBJ_TYPE,
 	NFTA_OBJ_DATA,
 	NFTA_OBJ_USE,
+	NFTA_OBJ_HANDLE,
+	NFTA_OBJ_PAD,
 	__NFTA_OBJ_MAX
 };
 #define NFTA_OBJ_MAX		(__NFTA_OBJ_MAX - 1)
 
 /**
+ * enum nft_flowtable_attributes - nf_tables flow table netlink attributes
+ *
+ * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
+ * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
+ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
+ * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
+ * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ */
+enum nft_flowtable_attributes {
+	NFTA_FLOWTABLE_UNSPEC,
+	NFTA_FLOWTABLE_TABLE,
+	NFTA_FLOWTABLE_NAME,
+	NFTA_FLOWTABLE_HOOK,
+	NFTA_FLOWTABLE_USE,
+	NFTA_FLOWTABLE_HANDLE,
+	NFTA_FLOWTABLE_PAD,
+	__NFTA_FLOWTABLE_MAX
+};
+#define NFTA_FLOWTABLE_MAX	(__NFTA_FLOWTABLE_MAX - 1)
+
+/**
+ * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes
+ *
+ * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED)
+ */
+enum nft_flowtable_hook_attributes {
+	NFTA_FLOWTABLE_HOOK_UNSPEC,
+	NFTA_FLOWTABLE_HOOK_NUM,
+	NFTA_FLOWTABLE_HOOK_PRIORITY,
+	NFTA_FLOWTABLE_HOOK_DEVS,
+	__NFTA_FLOWTABLE_HOOK_MAX
+};
+#define NFTA_FLOWTABLE_HOOK_MAX	(__NFTA_FLOWTABLE_HOOK_MAX - 1)
+
+/**
+ * enum nft_device_attributes - nf_tables device netlink attributes
+ *
+ * @NFTA_DEVICE_NAME: name of this device (NLA_STRING)
+ */
+enum nft_devices_attributes {
+	NFTA_DEVICE_UNSPEC,
+	NFTA_DEVICE_NAME,
+	__NFTA_DEVICE_MAX
+};
+#define NFTA_DEVICE_MAX		(__NFTA_DEVICE_MAX - 1)
+
+
+/**
  * enum nft_trace_attributes - nf_tables trace netlink attributes
  *
  * @NFTA_TRACE_TABLE: name of the table (NLA_STRING)
diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h
index 07e5e9d47882..d4d1943dcd11 100644
--- a/include/uapi/linux/netfilter/xt_connlimit.h
+++ b/include/uapi/linux/netfilter/xt_connlimit.h
@@ -27,7 +27,7 @@ struct xt_connlimit_info {
 	__u32 flags;
 
 	/* Used internally by the kernel */
-	struct xt_connlimit_data *data __attribute__((aligned(8)));
+	struct nf_conncount_data *data __attribute__((aligned(8)));
 };
 
 #endif /* _XT_CONNLIMIT_H */
diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h
index 81b6a4cbcb72..791dfc5ae907 100644
--- a/include/uapi/linux/netfilter_arp.h
+++ b/include/uapi/linux/netfilter_arp.h
@@ -15,6 +15,9 @@
 #define NF_ARP_IN	0
 #define NF_ARP_OUT	1
 #define NF_ARP_FORWARD	2
+
+#ifndef __KERNEL__
 #define NF_ARP_NUMHOOKS	3
+#endif
 
 #endif /* __LINUX_ARP_NETFILTER_H */
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 9089c38f6abe..61f1c7dfd033 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -24,6 +24,9 @@
 #define NFC_DN_IF_IN		0x0004
 /* Output device. */
 #define NFC_DN_IF_OUT		0x0008
+
+/* kernel define is in netfilter_defs.h */
+#define NF_DN_NUMHOOKS		7
 #endif /* ! __KERNEL__ */
 
 /* DECnet Hooks */
@@ -41,7 +44,6 @@
 #define NF_DN_HELLO		5
 /* Input Routing Packets */
 #define NF_DN_ROUTE		6
-#define NF_DN_NUMHOOKS		7
 
 enum nf_dn_hook_priorities {
 	NF_DN_PRI_FIRST = INT_MIN,
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index e6b1a84f5dd3..c3b060775e13 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -57,6 +57,7 @@
 
 enum nf_ip_hook_priorities {
 	NF_IP_PRI_FIRST = INT_MIN,
+	NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
 	NF_IP_PRI_CONNTRACK_DEFRAG = -400,
 	NF_IP_PRI_RAW = -300,
 	NF_IP_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index 2f9724611cc2..dc624fd24d25 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -62,6 +62,7 @@
 
 enum nf_ip6_hook_priorities {
 	NF_IP6_PRI_FIRST = INT_MIN,
+	NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
 	NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
 	NF_IP6_PRI_RAW = -300,
 	NF_IP6_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
new file mode 100644
index 000000000000..f3cc0ef514a7
--- /dev/null
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _IP6T_SRH_H
+#define _IP6T_SRH_H
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+
+/* Values for "mt_flags" field in struct ip6t_srh */
+#define IP6T_SRH_NEXTHDR        0x0001
+#define IP6T_SRH_LEN_EQ         0x0002
+#define IP6T_SRH_LEN_GT         0x0004
+#define IP6T_SRH_LEN_LT         0x0008
+#define IP6T_SRH_SEGS_EQ        0x0010
+#define IP6T_SRH_SEGS_GT        0x0020
+#define IP6T_SRH_SEGS_LT        0x0040
+#define IP6T_SRH_LAST_EQ        0x0080
+#define IP6T_SRH_LAST_GT        0x0100
+#define IP6T_SRH_LAST_LT        0x0200
+#define IP6T_SRH_TAG            0x0400
+#define IP6T_SRH_MASK           0x07FF
+
+/* Values for "mt_invflags" field in struct ip6t_srh */
+#define IP6T_SRH_INV_NEXTHDR    0x0001
+#define IP6T_SRH_INV_LEN_EQ     0x0002
+#define IP6T_SRH_INV_LEN_GT     0x0004
+#define IP6T_SRH_INV_LEN_LT     0x0008
+#define IP6T_SRH_INV_SEGS_EQ    0x0010
+#define IP6T_SRH_INV_SEGS_GT    0x0020
+#define IP6T_SRH_INV_SEGS_LT    0x0040
+#define IP6T_SRH_INV_LAST_EQ    0x0080
+#define IP6T_SRH_INV_LAST_GT    0x0100
+#define IP6T_SRH_INV_LAST_LT    0x0200
+#define IP6T_SRH_INV_TAG        0x0400
+#define IP6T_SRH_INV_MASK       0x07FF
+
+/**
+ *      struct ip6t_srh - SRH match options
+ *      @ next_hdr: Next header field of SRH
+ *      @ hdr_len: Extension header length field of SRH
+ *      @ segs_left: Segments left field of SRH
+ *      @ last_entry: Last entry field of SRH
+ *      @ tag: Tag field of SRH
+ *      @ mt_flags: match options
+ *      @ mt_invflags: Invert the sense of match options
+ */
+
+struct ip6t_srh {
+	__u8                    next_hdr;
+	__u8                    hdr_len;
+	__u8                    segs_left;
+	__u8                    last_entry;
+	__u16                   tag;
+	__u16                   mt_flags;
+	__u16                   mt_invflags;
+};
+
+#endif /*_IP6T_SRH_H*/
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f882fe1f9709..c587a61c32bf 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width {
  *	@NL80211_BSS_PARENT_BSSID. (u64).
  * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
  *	is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ *	Contains a nested array of signal strength attributes (u8, dBm),
+ *	using the nesting index as the antenna number.
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -3885,6 +3888,7 @@ enum nl80211_bss {
 	NL80211_BSS_PAD,
 	NL80211_BSS_PARENT_TSF,
 	NL80211_BSS_PARENT_BSSID,
+	NL80211_BSS_CHAIN_SIGNAL,
 
 	/* keep last */
 	__NL80211_BSS_AFTER_LAST,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index dcfab5e3b55c..713e56ce681f 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,6 +363,7 @@ enum ovs_tunnel_key_attr {
 	OVS_TUNNEL_KEY_ATTR_IPV6_SRC,		/* struct in6_addr src IPv6 address. */
 	OVS_TUNNEL_KEY_ATTR_IPV6_DST,		/* struct in6_addr dst IPv6 address. */
 	OVS_TUNNEL_KEY_ATTR_PAD,
+	OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,	/* struct erspan_metadata */
 	__OVS_TUNNEL_KEY_ATTR_MAX
 };
 
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index c77c9a2ebbbb..e0739a1aa4b2 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
 	__u16	__reserved_2;	/* align to __u64 */
 };
 
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+	/*
+	 * The below ids array length
+	 */
+	__u32	ids_len;
+	/*
+	 * Set by the kernel to indicate the number of
+	 * available programs
+	 */
+	__u32	prog_cnt;
+	/*
+	 * User provided buffer to store program ids
+	 */
+	__u32	ids[0];
+};
+
 #define perf_flags(attr)	(*(&(attr)->read_format + 1))
 
 /*
@@ -433,6 +454,7 @@ struct perf_event_attr {
 #define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
 #define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
 #define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF	_IOWR('$', 10, struct perf_event_query_bpf *)
 
 enum perf_event_ioc_flags {
 	PERF_IOC_FLAG_GROUP		= 1U << 0,
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 843e29aa3cac..9b15005955fa 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -541,9 +541,19 @@ struct tcmsg {
 	int		tcm_ifindex;
 	__u32		tcm_handle;
 	__u32		tcm_parent;
+/* tcm_block_index is used instead of tcm_parent
+ * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
+ */
+#define tcm_block_index tcm_parent
 	__u32		tcm_info;
 };
 
+/* For manipulation of filters in shared block, tcm_ifindex is set to
+ * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
+ * which is the block index.
+ */
+#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
+
 enum {
 	TCA_UNSPEC,
 	TCA_KIND,
@@ -558,6 +568,8 @@ enum {
 	TCA_DUMP_INVISIBLE,
 	TCA_CHAIN,
 	TCA_HW_OFFLOAD,
+	TCA_INGRESS_BLOCK,
+	TCA_EGRESS_BLOCK,
 	__TCA_MAX
 };
 
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d9adab32dbee..4c4db14786bd 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_SOCKOPT_PEELOFF_FLAGS 122
 #define SCTP_STREAM_SCHEDULER	123
 #define SCTP_STREAM_SCHEDULER_VALUE	124
+#define SCTP_INTERLEAVING_SUPPORTED	125
 
 /* PR-SCTP policies */
 #define SCTP_PR_SCTP_NONE	0x0000
@@ -459,6 +460,8 @@ struct sctp_pdapi_event {
 	__u32 pdapi_length;
 	__u32 pdapi_indication;
 	sctp_assoc_t pdapi_assoc_id;
+	__u32 pdapi_stream;
+	__u32 pdapi_seq;
 };
 
 enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 35f79d1f8c3a..14bacc7e6cef 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -117,10 +117,9 @@ static inline unsigned int tipc_node(__u32 addr)
 /*
  * Publication scopes when binding port names and port name sequences
  */
-
-#define TIPC_ZONE_SCOPE		1
-#define TIPC_CLUSTER_SCOPE	2
-#define TIPC_NODE_SCOPE		3
+#define TIPC_ZONE_SCOPE         1
+#define TIPC_CLUSTER_SCOPE      2
+#define TIPC_NODE_SCOPE         3
 
 /*
  * Limiting values for messages
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index fc353b518288..5de6ed37695b 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -57,6 +57,8 @@
 					 * Steering */
 #define VIRTIO_NET_F_CTRL_MAC_ADDR 23	/* Set MAC address */
 
+#define VIRTIO_NET_F_SPEED_DUPLEX 63	/* Device set linkspeed and duplex */
+
 #ifndef VIRTIO_NET_NO_LEGACY
 #define VIRTIO_NET_F_GSO	6	/* Host handles pkts w/ any GSO type */
 #endif /* VIRTIO_NET_NO_LEGACY */
@@ -76,6 +78,17 @@ struct virtio_net_config {
 	__u16 max_virtqueue_pairs;
 	/* Default maximum transmit unit advice */
 	__u16 mtu;
+	/*
+	 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+	 * Any other value stands for unknown.
+	 */
+	__u32 speed;
+	/*
+	 * 0x00 - half duplex
+	 * 0x01 - full duplex
+	 * Any other value stands for unknown.
+	 */
+	__u8 duplex;
 } __attribute__((packed));
 
 /*
diff --git a/kernel/Makefile b/kernel/Makefile
index 172d151d429c..f85ae5dfa474 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 obj-$(CONFIG_GCOV_KERNEL) += gcov/
 obj-$(CONFIG_KCOV) += kcov.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_FAIL_FUNCTION) += fail_function.o
 obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index e691da0b3bab..a713fd23ec88 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_BPF_SYSCALL) += devmap.o
 obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
 obj-$(CONFIG_BPF_SYSCALL) += offload.o
 ifeq ($(CONFIG_STREAM_PARSER),y)
+ifeq ($(CONFIG_INET),y)
 obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
 endif
 endif
+endif
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index ab94d304a634..b1f66480135b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -49,27 +49,35 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
 }
 
 /* Called from syscall */
-static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+static int array_map_alloc_check(union bpf_attr *attr)
 {
 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 	int numa_node = bpf_map_attr_numa_node(attr);
-	u32 elem_size, index_mask, max_entries;
-	bool unpriv = !capable(CAP_SYS_ADMIN);
-	struct bpf_array *array;
-	u64 array_size, mask64;
 
 	/* check sanity of attributes */
 	if (attr->max_entries == 0 || attr->key_size != 4 ||
 	    attr->value_size == 0 ||
 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
 	    (percpu && numa_node != NUMA_NO_NODE))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	if (attr->value_size > KMALLOC_MAX_SIZE)
 		/* if value_size is bigger, the user space won't be able to
 		 * access the elements.
 		 */
-		return ERR_PTR(-E2BIG);
+		return -E2BIG;
+
+	return 0;
+}
+
+static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+{
+	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
+	int numa_node = bpf_map_attr_numa_node(attr);
+	u32 elem_size, index_mask, max_entries;
+	bool unpriv = !capable(CAP_SYS_ADMIN);
+	struct bpf_array *array;
+	u64 array_size, mask64;
 
 	elem_size = round_up(attr->value_size, 8);
 
@@ -112,12 +120,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 	array->map.unpriv_array = unpriv;
 
 	/* copy mandatory map attributes */
-	array->map.map_type = attr->map_type;
-	array->map.key_size = attr->key_size;
-	array->map.value_size = attr->value_size;
-	array->map.max_entries = attr->max_entries;
-	array->map.map_flags = attr->map_flags;
-	array->map.numa_node = numa_node;
+	bpf_map_init_from_attr(&array->map, attr);
 	array->elem_size = elem_size;
 
 	if (!percpu)
@@ -327,6 +330,7 @@ static void array_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops array_map_ops = {
+	.map_alloc_check = array_map_alloc_check,
 	.map_alloc = array_map_alloc,
 	.map_free = array_map_free,
 	.map_get_next_key = array_map_get_next_key,
@@ -337,6 +341,7 @@ const struct bpf_map_ops array_map_ops = {
 };
 
 const struct bpf_map_ops percpu_array_map_ops = {
+	.map_alloc_check = array_map_alloc_check,
 	.map_alloc = array_map_alloc,
 	.map_free = array_map_free,
 	.map_get_next_key = array_map_get_next_key,
@@ -345,12 +350,12 @@ const struct bpf_map_ops percpu_array_map_ops = {
 	.map_delete_elem = array_map_delete_elem,
 };
 
-static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
+static int fd_array_map_alloc_check(union bpf_attr *attr)
 {
 	/* only file descriptors can be stored in this type of map */
 	if (attr->value_size != sizeof(u32))
-		return ERR_PTR(-EINVAL);
-	return array_map_alloc(attr);
+		return -EINVAL;
+	return array_map_alloc_check(attr);
 }
 
 static void fd_array_map_free(struct bpf_map *map)
@@ -474,7 +479,8 @@ void bpf_fd_array_map_clear(struct bpf_map *map)
 }
 
 const struct bpf_map_ops prog_array_map_ops = {
-	.map_alloc = fd_array_map_alloc,
+	.map_alloc_check = fd_array_map_alloc_check,
+	.map_alloc = array_map_alloc,
 	.map_free = fd_array_map_free,
 	.map_get_next_key = array_map_get_next_key,
 	.map_lookup_elem = fd_array_map_lookup_elem,
@@ -561,7 +567,8 @@ static void perf_event_fd_array_release(struct bpf_map *map,
 }
 
 const struct bpf_map_ops perf_event_array_map_ops = {
-	.map_alloc = fd_array_map_alloc,
+	.map_alloc_check = fd_array_map_alloc_check,
+	.map_alloc = array_map_alloc,
 	.map_free = fd_array_map_free,
 	.map_get_next_key = array_map_get_next_key,
 	.map_lookup_elem = fd_array_map_lookup_elem,
@@ -592,7 +599,8 @@ static void cgroup_fd_array_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops cgroup_array_map_ops = {
-	.map_alloc = fd_array_map_alloc,
+	.map_alloc_check = fd_array_map_alloc_check,
+	.map_alloc = array_map_alloc,
 	.map_free = cgroup_fd_array_free,
 	.map_get_next_key = array_map_get_next_key,
 	.map_lookup_elem = fd_array_map_lookup_elem,
@@ -610,7 +618,7 @@ static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
 	if (IS_ERR(inner_map_meta))
 		return inner_map_meta;
 
-	map = fd_array_map_alloc(attr);
+	map = array_map_alloc(attr);
 	if (IS_ERR(map)) {
 		bpf_map_meta_free(inner_map_meta);
 		return map;
@@ -673,6 +681,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
 }
 
 const struct bpf_map_ops array_of_maps_map_ops = {
+	.map_alloc_check = fd_array_map_alloc_check,
 	.map_alloc = array_of_map_alloc,
 	.map_free = array_of_map_free,
 	.map_get_next_key = array_map_get_next_key,
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index b789ab78d28f..c1c0b60d3f2f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size,
 				       enum bpf_access_type type,
 				       struct bpf_insn_access_aux *info)
 {
+	const int size_default = sizeof(__u32);
+
 	if (type == BPF_WRITE)
 		return false;
 
@@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
 	/* The verifier guarantees that size > 0. */
 	if (off % size != 0)
 		return false;
-	if (size != sizeof(__u32))
-		return false;
+
+	switch (off) {
+	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
+		bpf_ctx_record_field_size(info, size_default);
+		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
+			return false;
+		break;
+	default:
+		if (size != size_default)
+			return false;
+	}
 
 	return true;
 }
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7949e8b8f94e..5f35f93dcab2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 	fp->pages = size / PAGE_SIZE;
 	fp->aux = aux;
 	fp->aux->prog = fp;
+	fp->jit_requested = ebpf_jit_enabled();
 
 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 
@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
 	return 0;
 }
 
-static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
-{
-	return BPF_CLASS(insn->code) == BPF_JMP  &&
-	       /* Call and Exit are both special jumps with no
-		* target inside the BPF instruction image.
-		*/
-	       BPF_OP(insn->code) != BPF_CALL &&
-	       BPF_OP(insn->code) != BPF_EXIT;
-}
-
 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
 {
 	struct bpf_insn *insn = prog->insnsi;
 	u32 i, insn_cnt = prog->len;
+	bool pseudo_call;
+	u8 code;
+	int off;
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
-		if (!bpf_is_jmp_and_has_target(insn))
+		code = insn->code;
+		if (BPF_CLASS(code) != BPF_JMP)
 			continue;
+		if (BPF_OP(code) == BPF_EXIT)
+			continue;
+		if (BPF_OP(code) == BPF_CALL) {
+			if (insn->src_reg == BPF_PSEUDO_CALL)
+				pseudo_call = true;
+			else
+				continue;
+		} else {
+			pseudo_call = false;
+		}
+		off = pseudo_call ? insn->imm : insn->off;
 
 		/* Adjust offset of jmps if we cross boundaries. */
-		if (i < pos && i + insn->off + 1 > pos)
-			insn->off += delta;
-		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
-			insn->off -= delta;
+		if (i < pos && i + off + 1 > pos)
+			off += delta;
+		else if (i > pos + delta && i + off + 1 <= pos + delta)
+			off -= delta;
+
+		if (pseudo_call)
+			insn->imm = off;
+		else
+			insn->off = off;
 	}
 }
 
@@ -289,6 +300,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 }
 
 #ifdef CONFIG_BPF_JIT
+/* All BPF JIT sysctl knobs here. */
+int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
+int bpf_jit_harden   __read_mostly;
+int bpf_jit_kallsyms __read_mostly;
+
 static __always_inline void
 bpf_get_prog_addr_region(const struct bpf_prog *prog,
 			 unsigned long *symbol_start,
@@ -370,8 +386,6 @@ static DEFINE_SPINLOCK(bpf_lock);
 static LIST_HEAD(bpf_kallsyms);
 static struct latch_tree_root bpf_tree __cacheline_aligned;
 
-int bpf_jit_kallsyms __read_mostly;
-
 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 {
 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
@@ -552,8 +566,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
 	bpf_prog_unlock_free(fp);
 }
 
-int bpf_jit_harden __read_mostly;
-
 static int bpf_jit_blind_insn(const struct bpf_insn *from,
 			      const struct bpf_insn *aux,
 			      struct bpf_insn *to_buff)
@@ -711,7 +723,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 	struct bpf_insn *insn;
 	int i, rewritten;
 
-	if (!bpf_jit_blinding_enabled())
+	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
 		return prog;
 
 	clone = bpf_prog_clone_create(prog, GFP_USER);
@@ -753,13 +765,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 		i        += insn_delta;
 	}
 
+	clone->blinded = 1;
 	return clone;
 }
 #endif /* CONFIG_BPF_JIT */
 
 /* Base function for offset calculation. Needs to go into .text section,
  * therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
+ * anyway later on, so do not let the compiler omit it. This also needs
+ * to go into kallsyms for correlation from e.g. bpftool, so naming
+ * must not change.
  */
 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
@@ -767,6 +782,137 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 }
 EXPORT_SYMBOL_GPL(__bpf_call_base);
 
+/* All UAPI available opcodes. */
+#define BPF_INSN_MAP(INSN_2, INSN_3)		\
+	/* 32 bit ALU operations. */		\
+	/*   Register based. */			\
+	INSN_3(ALU, ADD, X),			\
+	INSN_3(ALU, SUB, X),			\
+	INSN_3(ALU, AND, X),			\
+	INSN_3(ALU, OR,  X),			\
+	INSN_3(ALU, LSH, X),			\
+	INSN_3(ALU, RSH, X),			\
+	INSN_3(ALU, XOR, X),			\
+	INSN_3(ALU, MUL, X),			\
+	INSN_3(ALU, MOV, X),			\
+	INSN_3(ALU, DIV, X),			\
+	INSN_3(ALU, MOD, X),			\
+	INSN_2(ALU, NEG),			\
+	INSN_3(ALU, END, TO_BE),		\
+	INSN_3(ALU, END, TO_LE),		\
+	/*   Immediate based. */		\
+	INSN_3(ALU, ADD, K),			\
+	INSN_3(ALU, SUB, K),			\
+	INSN_3(ALU, AND, K),			\
+	INSN_3(ALU, OR,  K),			\
+	INSN_3(ALU, LSH, K),			\
+	INSN_3(ALU, RSH, K),			\
+	INSN_3(ALU, XOR, K),			\
+	INSN_3(ALU, MUL, K),			\
+	INSN_3(ALU, MOV, K),			\
+	INSN_3(ALU, DIV, K),			\
+	INSN_3(ALU, MOD, K),			\
+	/* 64 bit ALU operations. */		\
+	/*   Register based. */			\
+	INSN_3(ALU64, ADD,  X),			\
+	INSN_3(ALU64, SUB,  X),			\
+	INSN_3(ALU64, AND,  X),			\
+	INSN_3(ALU64, OR,   X),			\
+	INSN_3(ALU64, LSH,  X),			\
+	INSN_3(ALU64, RSH,  X),			\
+	INSN_3(ALU64, XOR,  X),			\
+	INSN_3(ALU64, MUL,  X),			\
+	INSN_3(ALU64, MOV,  X),			\
+	INSN_3(ALU64, ARSH, X),			\
+	INSN_3(ALU64, DIV,  X),			\
+	INSN_3(ALU64, MOD,  X),			\
+	INSN_2(ALU64, NEG),			\
+	/*   Immediate based. */		\
+	INSN_3(ALU64, ADD,  K),			\
+	INSN_3(ALU64, SUB,  K),			\
+	INSN_3(ALU64, AND,  K),			\
+	INSN_3(ALU64, OR,   K),			\
+	INSN_3(ALU64, LSH,  K),			\
+	INSN_3(ALU64, RSH,  K),			\
+	INSN_3(ALU64, XOR,  K),			\
+	INSN_3(ALU64, MUL,  K),			\
+	INSN_3(ALU64, MOV,  K),			\
+	INSN_3(ALU64, ARSH, K),			\
+	INSN_3(ALU64, DIV,  K),			\
+	INSN_3(ALU64, MOD,  K),			\
+	/* Call instruction. */			\
+	INSN_2(JMP, CALL),			\
+	/* Exit instruction. */			\
+	INSN_2(JMP, EXIT),			\
+	/* Jump instructions. */		\
+	/*   Register based. */			\
+	INSN_3(JMP, JEQ,  X),			\
+	INSN_3(JMP, JNE,  X),			\
+	INSN_3(JMP, JGT,  X),			\
+	INSN_3(JMP, JLT,  X),			\
+	INSN_3(JMP, JGE,  X),			\
+	INSN_3(JMP, JLE,  X),			\
+	INSN_3(JMP, JSGT, X),			\
+	INSN_3(JMP, JSLT, X),			\
+	INSN_3(JMP, JSGE, X),			\
+	INSN_3(JMP, JSLE, X),			\
+	INSN_3(JMP, JSET, X),			\
+	/*   Immediate based. */		\
+	INSN_3(JMP, JEQ,  K),			\
+	INSN_3(JMP, JNE,  K),			\
+	INSN_3(JMP, JGT,  K),			\
+	INSN_3(JMP, JLT,  K),			\
+	INSN_3(JMP, JGE,  K),			\
+	INSN_3(JMP, JLE,  K),			\
+	INSN_3(JMP, JSGT, K),			\
+	INSN_3(JMP, JSLT, K),			\
+	INSN_3(JMP, JSGE, K),			\
+	INSN_3(JMP, JSLE, K),			\
+	INSN_3(JMP, JSET, K),			\
+	INSN_2(JMP, JA),			\
+	/* Store instructions. */		\
+	/*   Register based. */			\
+	INSN_3(STX, MEM,  B),			\
+	INSN_3(STX, MEM,  H),			\
+	INSN_3(STX, MEM,  W),			\
+	INSN_3(STX, MEM,  DW),			\
+	INSN_3(STX, XADD, W),			\
+	INSN_3(STX, XADD, DW),			\
+	/*   Immediate based. */		\
+	INSN_3(ST, MEM, B),			\
+	INSN_3(ST, MEM, H),			\
+	INSN_3(ST, MEM, W),			\
+	INSN_3(ST, MEM, DW),			\
+	/* Load instructions. */		\
+	/*   Register based. */			\
+	INSN_3(LDX, MEM, B),			\
+	INSN_3(LDX, MEM, H),			\
+	INSN_3(LDX, MEM, W),			\
+	INSN_3(LDX, MEM, DW),			\
+	/*   Immediate based. */		\
+	INSN_3(LD, IMM, DW),			\
+	/*   Misc (old cBPF carry-over). */	\
+	INSN_3(LD, ABS, B),			\
+	INSN_3(LD, ABS, H),			\
+	INSN_3(LD, ABS, W),			\
+	INSN_3(LD, IND, B),			\
+	INSN_3(LD, IND, H),			\
+	INSN_3(LD, IND, W)
+
+bool bpf_opcode_in_insntable(u8 code)
+{
+#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
+#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
+	static const bool public_insntable[256] = {
+		[0 ... 255] = false,
+		/* Now overwrite non-defaults ... */
+		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
+	};
+#undef BPF_INSN_3_TBL
+#undef BPF_INSN_2_TBL
+	return public_insntable[code];
+}
+
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
 /**
  *	__bpf_prog_run - run eBPF program on a given context
@@ -775,118 +921,21 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
  *
  * Decode and execute eBPF instructions.
  */
-static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
-				    u64 *stack)
+static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 {
 	u64 tmp;
+#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
+#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
 	static const void *jumptable[256] = {
 		[0 ... 255] = &&default_label,
 		/* Now overwrite non-defaults ... */
-		/* 32 bit ALU operations */
-		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
-		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
-		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
-		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
-		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
-		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
-		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
-		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
-		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
-		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
-		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
-		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
-		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
-		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
-		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
-		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
-		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
-		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
-		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
-		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
-		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
-		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
-		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
-		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
-		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
-		/* 64 bit ALU operations */
-		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
-		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
-		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
-		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
-		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
-		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
-		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
-		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
-		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
-		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
-		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
-		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
-		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
-		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
-		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
-		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
-		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
-		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
-		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
-		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
-		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
-		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
-		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
-		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
-		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
-		/* Call instruction */
-		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
+		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
+		/* Non-UAPI available opcodes. */
+		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
-		/* Jumps */
-		[BPF_JMP | BPF_JA] = &&JMP_JA,
-		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
-		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
-		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
-		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
-		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
-		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
-		[BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
-		[BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
-		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
-		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
-		[BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
-		[BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
-		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
-		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
-		[BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
-		[BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
-		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
-		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
-		[BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
-		[BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
-		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
-		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
-		/* Program return */
-		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
-		/* Store instructions */
-		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
-		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
-		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
-		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
-		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
-		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
-		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
-		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
-		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
-		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
-		/* Load instructions */
-		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
-		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
-		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
-		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
-		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
-		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
-		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
-		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
-		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
-		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
-		[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
 	};
+#undef BPF_INSN_3_LBL
+#undef BPF_INSN_2_LBL
 	u32 tail_call_cnt = 0;
 	void *ptr;
 	int off;
@@ -950,14 +999,10 @@ select_insn:
 		(*(s64 *) &DST) >>= IMM;
 		CONT;
 	ALU64_MOD_X:
-		if (unlikely(SRC == 0))
-			return 0;
 		div64_u64_rem(DST, SRC, &tmp);
 		DST = tmp;
 		CONT;
 	ALU_MOD_X:
-		if (unlikely((u32)SRC == 0))
-			return 0;
 		tmp = (u32) DST;
 		DST = do_div(tmp, (u32) SRC);
 		CONT;
@@ -970,13 +1015,9 @@ select_insn:
 		DST = do_div(tmp, (u32) IMM);
 		CONT;
 	ALU64_DIV_X:
-		if (unlikely(SRC == 0))
-			return 0;
 		DST = div64_u64(DST, SRC);
 		CONT;
 	ALU_DIV_X:
-		if (unlikely((u32)SRC == 0))
-			return 0;
 		tmp = (u32) DST;
 		do_div(tmp, (u32) SRC);
 		DST = (u32) tmp;
@@ -1026,6 +1067,13 @@ select_insn:
 						       BPF_R4, BPF_R5);
 		CONT;
 
+	JMP_CALL_ARGS:
+		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
+							    BPF_R3, BPF_R4,
+							    BPF_R5,
+							    insn + insn->off + 1);
+		CONT;
+
 	JMP_TAIL_CALL: {
 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
 		struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1280,8 +1328,14 @@ load_byte:
 		goto load_byte;
 
 	default_label:
-		/* If we ever reach this, we have a bug somewhere. */
-		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
+		/* If we ever reach this, we have a bug somewhere. Die hard here
+		 * instead of just returning 0; we could be somewhere in a subprog,
+		 * so execution could continue otherwise which we do /not/ want.
+		 *
+		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
+		 */
+		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
+		BUG_ON(1);
 		return 0;
 }
 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
@@ -1298,6 +1352,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
 	return ___bpf_prog_run(regs, insn, stack); \
 }
 
+#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
+#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
+static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+				      const struct bpf_insn *insn) \
+{ \
+	u64 stack[stack_size / sizeof(u64)]; \
+	u64 regs[MAX_BPF_REG]; \
+\
+	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+	BPF_R1 = r1; \
+	BPF_R2 = r2; \
+	BPF_R3 = r3; \
+	BPF_R4 = r4; \
+	BPF_R5 = r5; \
+	return ___bpf_prog_run(regs, insn, stack); \
+}
+
 #define EVAL1(FN, X) FN(X)
 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
@@ -1309,6 +1380,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
 
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
+EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
+
 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
 
 static unsigned int (*interpreters[])(const void *ctx,
@@ -1317,11 +1392,33 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
 };
+#undef PROG_NAME_LIST
+#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
+static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
+				  const struct bpf_insn *insn) = {
+EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
+EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
+EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
+};
+#undef PROG_NAME_LIST
+
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
+{
+	stack_depth = max_t(u32, stack_depth, 1);
+	insn->off = (s16) insn->imm;
+	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
+		__bpf_call_base_args;
+	insn->code = BPF_JMP | BPF_CALL_ARGS;
+}
 
 #else
-static unsigned int __bpf_prog_ret0(const void *ctx,
-				    const struct bpf_insn *insn)
+static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+					 const struct bpf_insn *insn)
 {
+	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+	 * is not working properly, so warn about it!
+	 */
+	WARN_ON_ONCE(1);
 	return 0;
 }
 #endif
@@ -1329,6 +1426,9 @@ static unsigned int __bpf_prog_ret0(const void *ctx,
 bool bpf_prog_array_compatible(struct bpf_array *array,
 			       const struct bpf_prog *fp)
 {
+	if (fp->kprobe_override)
+		return false;
+
 	if (!array->owner_prog_type) {
 		/* There's no owner yet where we could check for
 		 * compatibility.
@@ -1378,7 +1478,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 
 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
 #else
-	fp->bpf_func = __bpf_prog_ret0;
+	fp->bpf_func = __bpf_prog_ret0_warn;
 #endif
 
 	/* eBPF JITs can rewrite the program in case constant
@@ -1481,6 +1581,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
 	rcu_read_lock();
 	prog = rcu_dereference(progs)->progs;
 	for (; *prog; prog++) {
+		if (*prog == &dummy_bpf_prog.prog)
+			continue;
 		id = (*prog)->aux->id;
 		if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
 			rcu_read_unlock();
@@ -1564,14 +1666,41 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
 	return 0;
 }
 
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+			     __u32 __user *prog_ids, u32 request_cnt,
+			     __u32 __user *prog_cnt)
+{
+	u32 cnt = 0;
+
+	if (array)
+		cnt = bpf_prog_array_length(array);
+
+	if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
+		return -EFAULT;
+
+	/* return early if user requested only program count or nothing to copy */
+	if (!request_cnt || !cnt)
+		return 0;
+
+	return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+}
+
 static void bpf_prog_free_deferred(struct work_struct *work)
 {
 	struct bpf_prog_aux *aux;
+	int i;
 
 	aux = container_of(work, struct bpf_prog_aux, work);
 	if (bpf_prog_is_dev_bound(aux))
 		bpf_prog_offload_destroy(aux->prog);
-	bpf_jit_free(aux->prog);
+	for (i = 0; i < aux->func_cnt; i++)
+		bpf_jit_free(aux->func[i]);
+	if (aux->func_cnt) {
+		kfree(aux->func);
+		bpf_prog_unlock_free(aux->prog);
+	} else {
+		bpf_jit_free(aux->prog);
+	}
 }
 
 /* Free internal BPF program */
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ce5b669003b2..fbfdada6caee 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 	if (!cmap)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	cmap->map.map_type = attr->map_type;
-	cmap->map.key_size = attr->key_size;
-	cmap->map.value_size = attr->value_size;
-	cmap->map.max_entries = attr->max_entries;
-	cmap->map.map_flags = attr->map_flags;
-	cmap->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&cmap->map, attr);
 
 	/* Pre-limit array size based on NR_CPUS, not final CPU check */
 	if (cmap->map.max_entries > NR_CPUS) {
@@ -143,7 +137,7 @@ free_cmap:
 	return ERR_PTR(err);
 }
 
-void __cpu_map_queue_destructor(void *ptr)
+static void __cpu_map_queue_destructor(void *ptr)
 {
 	/* The tear-down procedure should have made sure that queue is
 	 * empty.  See __cpu_map_entry_replace() and work-queue
@@ -222,8 +216,8 @@ static struct xdp_pkt *convert_to_xdp_pkt(struct xdp_buff *xdp)
 	return xdp_pkt;
 }
 
-struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
-				  struct xdp_pkt *xdp_pkt)
+static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
+					 struct xdp_pkt *xdp_pkt)
 {
 	unsigned int frame_size;
 	void *pkt_data_start;
@@ -337,7 +331,8 @@ static int cpu_map_kthread_run(void *data)
 	return 0;
 }
 
-struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
+static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
+						       int map_id)
 {
 	gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
 	struct bpf_cpu_map_entry *rcpu;
@@ -395,7 +390,7 @@ free_rcu:
 	return NULL;
 }
 
-void __cpu_map_entry_free(struct rcu_head *rcu)
+static void __cpu_map_entry_free(struct rcu_head *rcu)
 {
 	struct bpf_cpu_map_entry *rcpu;
 	int cpu;
@@ -438,8 +433,8 @@ void __cpu_map_entry_free(struct rcu_head *rcu)
  * cpu_map_kthread_stop, which waits for an RCU graze period before
  * stopping kthread, emptying the queue.
  */
-void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
-			     u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
+static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
+				    u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
 {
 	struct bpf_cpu_map_entry *old_rcpu;
 
@@ -451,7 +446,7 @@ void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
 	}
 }
 
-int cpu_map_delete_elem(struct bpf_map *map, void *key)
+static int cpu_map_delete_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 	u32 key_cpu = *(u32 *)key;
@@ -464,8 +459,8 @@ int cpu_map_delete_elem(struct bpf_map *map, void *key)
 	return 0;
 }
 
-int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
-				u64 map_flags)
+static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
+			       u64 map_flags)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 	struct bpf_cpu_map_entry *rcpu;
@@ -502,7 +497,7 @@ int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
 	return 0;
 }
 
-void cpu_map_free(struct bpf_map *map)
+static void cpu_map_free(struct bpf_map *map)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 	int cpu;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index ebdef54bf7df..565f9ece9115 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 	if (!dtab)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	dtab->map.map_type = attr->map_type;
-	dtab->map.key_size = attr->key_size;
-	dtab->map.value_size = attr->value_size;
-	dtab->map.max_entries = attr->max_entries;
-	dtab->map.map_flags = attr->map_flags;
-	dtab->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&dtab->map, attr);
 
 	/* make sure page count doesn't overflow */
 	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index e682850c9715..8740406df2cd 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
 };
 #undef __BPF_FUNC_STR_FN
 
-const char *func_id_name(int id)
+static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
+				   const struct bpf_insn *insn,
+				   char *buff, size_t len)
 {
 	BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
 
+	if (insn->src_reg != BPF_PSEUDO_CALL &&
+	    insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
+	    func_id_str[insn->imm])
+		return func_id_str[insn->imm];
+
+	if (cbs && cbs->cb_call)
+		return cbs->cb_call(cbs->private_data, insn);
+
+	if (insn->src_reg == BPF_PSEUDO_CALL)
+		snprintf(buff, len, "%+d", insn->imm);
+
+	return buff;
+}
+
+static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
+				   const struct bpf_insn *insn,
+				   u64 full_imm, char *buff, size_t len)
+{
+	if (cbs && cbs->cb_imm)
+		return cbs->cb_imm(cbs->private_data, insn, full_imm);
+
+	snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
+	return buff;
+}
+
+const char *func_id_name(int id)
+{
 	if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
 		return func_id_str[id];
 	else
@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
 	[BPF_EXIT >> 4] = "exit",
 };
 
-static void print_bpf_end_insn(bpf_insn_print_cb verbose,
+static void print_bpf_end_insn(bpf_insn_print_t verbose,
 			       struct bpf_verifier_env *env,
 			       const struct bpf_insn *insn)
 {
@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
 		insn->imm, insn->dst_reg);
 }
 
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
-		    const struct bpf_insn *insn, bool allow_ptr_leaks)
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+		    struct bpf_verifier_env *env,
+		    const struct bpf_insn *insn,
+		    bool allow_ptr_leaks)
 {
+	const bpf_insn_print_t verbose = cbs->cb_print;
 	u8 class = BPF_CLASS(insn->code);
 
 	if (class == BPF_ALU || class == BPF_ALU64) {
@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
 			 */
 			u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
 			bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+			char tmp[64];
 
 			if (map_ptr && !allow_ptr_leaks)
 				imm = 0;
 
-			verbose(env, "(%02x) r%d = 0x%llx\n", insn->code,
-				insn->dst_reg, (unsigned long long)imm);
+			verbose(env, "(%02x) r%d = %s\n",
+				insn->code, insn->dst_reg,
+				__func_imm_name(cbs, insn, imm,
+						tmp, sizeof(tmp)));
 		} else {
 			verbose(env, "BUG_ld_%02x\n", insn->code);
 			return;
@@ -189,8 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
 		u8 opcode = BPF_OP(insn->code);
 
 		if (opcode == BPF_CALL) {
-			verbose(env, "(%02x) call %s#%d\n", insn->code,
-				func_id_name(insn->imm), insn->imm);
+			char tmp[64];
+
+			if (insn->src_reg == BPF_PSEUDO_CALL) {
+				verbose(env, "(%02x) call pc%s\n",
+					insn->code,
+					__func_get_name(cbs, insn,
+							tmp, sizeof(tmp)));
+			} else {
+				strcpy(tmp, "unknown");
+				verbose(env, "(%02x) call %s#%d\n", insn->code,
+					__func_get_name(cbs, insn,
+							tmp, sizeof(tmp)),
+					insn->imm);
+			}
 		} else if (insn->code == (BPF_JMP | BPF_JA)) {
 			verbose(env, "(%02x) goto pc%+d\n",
 				insn->code, insn->off);
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index 8de977e420b6..266fe8ee542b 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -17,16 +17,35 @@
 #include <linux/bpf.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <string.h>
+#endif
+
+struct bpf_verifier_env;
 
 extern const char *const bpf_alu_string[16];
 extern const char *const bpf_class_string[8];
 
 const char *func_id_name(int id);
 
-struct bpf_verifier_env;
-typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
-				  const char *, ...);
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
-		    const struct bpf_insn *insn, bool allow_ptr_leaks);
+typedef __printf(2, 3) void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
+						const char *, ...);
+typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
+					      const struct bpf_insn *insn);
+typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
+					    const struct bpf_insn *insn,
+					    __u64 full_imm);
+
+struct bpf_insn_cbs {
+	bpf_insn_print_t	cb_print;
+	bpf_insn_revmap_call_t	cb_call;
+	bpf_insn_print_imm_t	cb_imm;
+	void			*private_data;
+};
 
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+		    struct bpf_verifier_env *env,
+		    const struct bpf_insn *insn,
+		    bool allow_ptr_leaks);
 #endif
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3905d4bc5b80..b76828f23b49 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
 }
 
 /* Called from syscall */
-static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+static int htab_map_alloc_check(union bpf_attr *attr)
 {
 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 	int numa_node = bpf_map_attr_numa_node(attr);
-	struct bpf_htab *htab;
-	int err, i;
-	u64 cost;
 
 	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
 		     offsetof(struct htab_elem, hash_node.pprev));
@@ -254,40 +251,68 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 		/* LRU implementation is much complicated than other
 		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
 		 */
-		return ERR_PTR(-EPERM);
+		return -EPERM;
 
 	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
 		/* reserved bits should not be used */
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	if (!lru && percpu_lru)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	if (lru && !prealloc)
-		return ERR_PTR(-ENOTSUPP);
+		return -ENOTSUPP;
 
 	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
+
+	/* check sanity of attributes.
+	 * value_size == 0 may be allowed in the future to use map as a set
+	 */
+	if (attr->max_entries == 0 || attr->key_size == 0 ||
+	    attr->value_size == 0)
+		return -EINVAL;
+
+	if (attr->key_size > MAX_BPF_STACK)
+		/* eBPF programs initialize keys on stack, so they cannot be
+		 * larger than max stack size
+		 */
+		return -E2BIG;
+
+	if (attr->value_size >= KMALLOC_MAX_SIZE -
+	    MAX_BPF_STACK - sizeof(struct htab_elem))
+		/* if value_size is bigger, the user space won't be able to
+		 * access the elements via bpf syscall. This check also makes
+		 * sure that the elem_size doesn't overflow and it's
+		 * kmalloc-able later in htab_map_update_elem()
+		 */
+		return -E2BIG;
+
+	return 0;
+}
+
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+	/* percpu_lru means each cpu has its own LRU list.
+	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
+	 * the map's value itself is percpu.  percpu_lru has
+	 * nothing to do with the map's value.
+	 */
+	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
+	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+	struct bpf_htab *htab;
+	int err, i;
+	u64 cost;
 
 	htab = kzalloc(sizeof(*htab), GFP_USER);
 	if (!htab)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	htab->map.map_type = attr->map_type;
-	htab->map.key_size = attr->key_size;
-	htab->map.value_size = attr->value_size;
-	htab->map.max_entries = attr->max_entries;
-	htab->map.map_flags = attr->map_flags;
-	htab->map.numa_node = numa_node;
-
-	/* check sanity of attributes.
-	 * value_size == 0 may be allowed in the future to use map as a set
-	 */
-	err = -EINVAL;
-	if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
-	    htab->map.value_size == 0)
-		goto free_htab;
+	bpf_map_init_from_attr(&htab->map, attr);
 
 	if (percpu_lru) {
 		/* ensure each CPU's lru list has >=1 elements.
@@ -304,22 +329,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 	/* hash table size must be power of 2 */
 	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 
-	err = -E2BIG;
-	if (htab->map.key_size > MAX_BPF_STACK)
-		/* eBPF programs initialize keys on stack, so they cannot be
-		 * larger than max stack size
-		 */
-		goto free_htab;
-
-	if (htab->map.value_size >= KMALLOC_MAX_SIZE -
-	    MAX_BPF_STACK - sizeof(struct htab_elem))
-		/* if value_size is bigger, the user space won't be able to
-		 * access the elements via bpf syscall. This check also makes
-		 * sure that the elem_size doesn't overflow and it's
-		 * kmalloc-able later in htab_map_update_elem()
-		 */
-		goto free_htab;
-
 	htab->elem_size = sizeof(struct htab_elem) +
 			  round_up(htab->map.key_size, 8);
 	if (percpu)
@@ -327,6 +336,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 	else
 		htab->elem_size += round_up(htab->map.value_size, 8);
 
+	err = -E2BIG;
 	/* prevent zero size kmalloc and check for u32 overflow */
 	if (htab->n_buckets == 0 ||
 	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
@@ -1143,6 +1153,7 @@ static void htab_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops htab_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1153,6 +1164,7 @@ const struct bpf_map_ops htab_map_ops = {
 };
 
 const struct bpf_map_ops htab_lru_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1236,6 +1248,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
 }
 
 const struct bpf_map_ops htab_percpu_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1245,6 +1258,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
 };
 
 const struct bpf_map_ops htab_lru_percpu_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1253,11 +1267,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
 	.map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+static int fd_htab_map_alloc_check(union bpf_attr *attr)
 {
 	if (attr->value_size != sizeof(u32))
-		return ERR_PTR(-EINVAL);
-	return htab_map_alloc(attr);
+		return -EINVAL;
+	return htab_map_alloc_check(attr);
 }
 
 static void fd_htab_map_free(struct bpf_map *map)
@@ -1328,7 +1342,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
 	if (IS_ERR(inner_map_meta))
 		return inner_map_meta;
 
-	map = fd_htab_map_alloc(attr);
+	map = htab_map_alloc(attr);
 	if (IS_ERR(map)) {
 		bpf_map_meta_free(inner_map_meta);
 		return map;
@@ -1372,6 +1386,7 @@ static void htab_of_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops htab_of_maps_map_ops = {
+	.map_alloc_check = fd_htab_map_alloc_check,
 	.map_alloc = htab_of_map_alloc,
 	.map_free = htab_of_map_free,
 	.map_get_next_key = htab_map_get_next_key,
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 885e45479680..7b469d10d0e9 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
 		return ERR_PTR(-ENOMEM);
 
 	/* copy mandatory map attributes */
-	trie->map.map_type = attr->map_type;
-	trie->map.key_size = attr->key_size;
-	trie->map.value_size = attr->value_size;
-	trie->map.max_entries = attr->max_entries;
-	trie->map.map_flags = attr->map_flags;
-	trie->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&trie->map, attr);
 	trie->data_size = attr->key_size -
 			  offsetof(struct bpf_lpm_trie_key, data);
 	trie->max_prefixlen = trie->data_size * 8;
@@ -596,9 +591,96 @@ unlock:
 	raw_spin_unlock(&trie->lock);
 }
 
-static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
 {
-	return -ENOTSUPP;
+	struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
+	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+	struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
+	struct lpm_trie_node **node_stack = NULL;
+	int err = 0, stack_ptr = -1;
+	unsigned int next_bit;
+	size_t matchlen;
+
+	/* The get_next_key follows postorder. For the 4 node example in
+	 * the top of this file, the trie_get_next_key() returns the following
+	 * one after another:
+	 *   192.168.0.0/24
+	 *   192.168.1.0/24
+	 *   192.168.128.0/24
+	 *   192.168.0.0/16
+	 *
+	 * The idea is to return more specific keys before less specific ones.
+	 */
+
+	/* Empty trie */
+	search_root = rcu_dereference(trie->root);
+	if (!search_root)
+		return -ENOENT;
+
+	/* For invalid key, find the leftmost node in the trie */
+	if (!key || key->prefixlen > trie->max_prefixlen)
+		goto find_leftmost;
+
+	node_stack = kmalloc(trie->max_prefixlen * sizeof(struct lpm_trie_node *),
+			     GFP_ATOMIC | __GFP_NOWARN);
+	if (!node_stack)
+		return -ENOMEM;
+
+	/* Try to find the exact node for the given key */
+	for (node = search_root; node;) {
+		node_stack[++stack_ptr] = node;
+		matchlen = longest_prefix_match(trie, node, key);
+		if (node->prefixlen != matchlen ||
+		    node->prefixlen == key->prefixlen)
+			break;
+
+		next_bit = extract_bit(key->data, node->prefixlen);
+		node = rcu_dereference(node->child[next_bit]);
+	}
+	if (!node || node->prefixlen != key->prefixlen ||
+	    (node->flags & LPM_TREE_NODE_FLAG_IM))
+		goto find_leftmost;
+
+	/* The node with the exactly-matching key has been found,
+	 * find the first node in postorder after the matched node.
+	 */
+	node = node_stack[stack_ptr];
+	while (stack_ptr > 0) {
+		parent = node_stack[stack_ptr - 1];
+		if (rcu_dereference(parent->child[0]) == node) {
+			search_root = rcu_dereference(parent->child[1]);
+			if (search_root)
+				goto find_leftmost;
+		}
+		if (!(parent->flags & LPM_TREE_NODE_FLAG_IM)) {
+			next_node = parent;
+			goto do_copy;
+		}
+
+		node = parent;
+		stack_ptr--;
+	}
+
+	/* did not find anything */
+	err = -ENOENT;
+	goto free_stack;
+
+find_leftmost:
+	/* Find the leftmost non-intermediate node, all intermediate nodes
+	 * have exact two children, so this function will never return NULL.
+	 */
+	for (node = search_root; node;) {
+		if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+			next_node = node;
+		node = rcu_dereference(node->child[0]);
+	}
+do_copy:
+	next_key->prefixlen = next_node->prefixlen;
+	memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
+	       next_node->data, trie->data_size);
+free_stack:
+	kfree(node_stack);
+	return err;
 }
 
 const struct bpf_map_ops trie_map_ops = {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 8455b89d1bbf..c9401075b58c 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -16,18 +16,35 @@
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
+#include <linux/kdev_t.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
+#include <linux/proc_ns.h>
 #include <linux/rtnetlink.h>
+#include <linux/rwsem.h>
 
-/* protected by RTNL */
+/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
+ * of all progs.
+ * RTNL lock cannot be taken when holding this lock.
+ */
+static DECLARE_RWSEM(bpf_devs_lock);
 static LIST_HEAD(bpf_prog_offload_devs);
+static LIST_HEAD(bpf_map_offload_devs);
+
+static int bpf_dev_offload_check(struct net_device *netdev)
+{
+	if (!netdev)
+		return -EINVAL;
+	if (!netdev->netdev_ops->ndo_bpf)
+		return -EOPNOTSUPP;
+	return 0;
+}
 
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 {
-	struct net *net = current->nsproxy->net_ns;
-	struct bpf_dev_offload *offload;
+	struct bpf_prog_offload *offload;
+	int err;
 
 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
 	    attr->prog_type != BPF_PROG_TYPE_XDP)
@@ -41,34 +58,44 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 		return -ENOMEM;
 
 	offload->prog = prog;
-	init_waitqueue_head(&offload->verifier_done);
 
-	rtnl_lock();
-	offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
-	if (!offload->netdev) {
-		rtnl_unlock();
-		kfree(offload);
-		return -EINVAL;
-	}
+	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
+					   attr->prog_ifindex);
+	err = bpf_dev_offload_check(offload->netdev);
+	if (err)
+		goto err_maybe_put;
 
+	down_write(&bpf_devs_lock);
+	if (offload->netdev->reg_state != NETREG_REGISTERED) {
+		err = -EINVAL;
+		goto err_unlock;
+	}
 	prog->aux->offload = offload;
 	list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
-	rtnl_unlock();
+	dev_put(offload->netdev);
+	up_write(&bpf_devs_lock);
 
 	return 0;
+err_unlock:
+	up_write(&bpf_devs_lock);
+err_maybe_put:
+	if (offload->netdev)
+		dev_put(offload->netdev);
+	kfree(offload);
+	return err;
 }
 
 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
 			     struct netdev_bpf *data)
 {
-	struct net_device *netdev = prog->aux->offload->netdev;
+	struct bpf_prog_offload *offload = prog->aux->offload;
+	struct net_device *netdev;
 
 	ASSERT_RTNL();
 
-	if (!netdev)
+	if (!offload)
 		return -ENODEV;
-	if (!netdev->netdev_ops->ndo_bpf)
-		return -EOPNOTSUPP;
+	netdev = offload->netdev;
 
 	data->command = cmd;
 
@@ -87,62 +114,63 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
 	if (err)
 		goto exit_unlock;
 
-	env->dev_ops = data.verifier.ops;
-
+	env->prog->aux->offload->dev_ops = data.verifier.ops;
 	env->prog->aux->offload->dev_state = true;
-	env->prog->aux->offload->verifier_running = true;
 exit_unlock:
 	rtnl_unlock();
 	return err;
 }
 
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+				 int insn_idx, int prev_insn_idx)
+{
+	struct bpf_prog_offload *offload;
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	offload = env->prog->aux->offload;
+	if (offload)
+		ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
+	struct bpf_prog_offload *offload = prog->aux->offload;
 	struct netdev_bpf data = {};
 
-	/* Caution - if netdev is destroyed before the program, this function
-	 * will be called twice.
-	 */
-
 	data.offload.prog = prog;
 
-	if (offload->verifier_running)
-		wait_event(offload->verifier_done, !offload->verifier_running);
-
 	if (offload->dev_state)
 		WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
 
-	offload->dev_state = false;
+	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+	bpf_prog_free_id(prog, true);
+
 	list_del_init(&offload->offloads);
-	offload->netdev = NULL;
+	kfree(offload);
+	prog->aux->offload = NULL;
 }
 
 void bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
-
-	offload->verifier_running = false;
-	wake_up(&offload->verifier_done);
-
 	rtnl_lock();
-	__bpf_prog_offload_destroy(prog);
+	down_write(&bpf_devs_lock);
+	if (prog->aux->offload)
+		__bpf_prog_offload_destroy(prog);
+	up_write(&bpf_devs_lock);
 	rtnl_unlock();
-
-	kfree(offload);
 }
 
 static int bpf_prog_offload_translate(struct bpf_prog *prog)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
 	struct netdev_bpf data = {};
 	int ret;
 
 	data.offload.prog = prog;
 
-	offload->verifier_running = false;
-	wake_up(&offload->verifier_done);
-
 	rtnl_lock();
 	ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
 	rtnl_unlock();
@@ -164,14 +192,323 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
 	return bpf_prog_offload_translate(prog);
 }
 
+struct ns_get_path_bpf_prog_args {
+	struct bpf_prog *prog;
+	struct bpf_prog_info *info;
+};
+
+static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
+{
+	struct ns_get_path_bpf_prog_args *args = private_data;
+	struct bpf_prog_aux *aux = args->prog->aux;
+	struct ns_common *ns;
+	struct net *net;
+
+	rtnl_lock();
+	down_read(&bpf_devs_lock);
+
+	if (aux->offload) {
+		args->info->ifindex = aux->offload->netdev->ifindex;
+		net = dev_net(aux->offload->netdev);
+		get_net(net);
+		ns = &net->ns;
+	} else {
+		args->info->ifindex = 0;
+		ns = NULL;
+	}
+
+	up_read(&bpf_devs_lock);
+	rtnl_unlock();
+
+	return ns;
+}
+
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+			       struct bpf_prog *prog)
+{
+	struct ns_get_path_bpf_prog_args args = {
+		.prog	= prog,
+		.info	= info,
+	};
+	struct bpf_prog_aux *aux = prog->aux;
+	struct inode *ns_inode;
+	struct path ns_path;
+	char __user *uinsns;
+	void *res;
+	u32 ulen;
+
+	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
+	if (IS_ERR(res)) {
+		if (!info->ifindex)
+			return -ENODEV;
+		return PTR_ERR(res);
+	}
+
+	down_read(&bpf_devs_lock);
+
+	if (!aux->offload) {
+		up_read(&bpf_devs_lock);
+		return -ENODEV;
+	}
+
+	ulen = info->jited_prog_len;
+	info->jited_prog_len = aux->offload->jited_len;
+	if (info->jited_prog_len & ulen) {
+		uinsns = u64_to_user_ptr(info->jited_prog_insns);
+		ulen = min_t(u32, info->jited_prog_len, ulen);
+		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
+			up_read(&bpf_devs_lock);
+			return -EFAULT;
+		}
+	}
+
+	up_read(&bpf_devs_lock);
+
+	ns_inode = ns_path.dentry->d_inode;
+	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+	info->netns_ino = ns_inode->i_ino;
+	path_put(&ns_path);
+
+	return 0;
+}
+
 const struct bpf_prog_ops bpf_offload_prog_ops = {
 };
 
+static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
+			       enum bpf_netdev_command cmd)
+{
+	struct netdev_bpf data = {};
+	struct net_device *netdev;
+
+	ASSERT_RTNL();
+
+	data.command = cmd;
+	data.offmap = offmap;
+	/* Caller must make sure netdev is valid */
+	netdev = offmap->netdev;
+
+	return netdev->netdev_ops->ndo_bpf(netdev, &data);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+	struct net *net = current->nsproxy->net_ns;
+	struct bpf_offloaded_map *offmap;
+	int err;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return ERR_PTR(-EPERM);
+	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
+	    attr->map_type != BPF_MAP_TYPE_HASH)
+		return ERR_PTR(-EINVAL);
+
+	offmap = kzalloc(sizeof(*offmap), GFP_USER);
+	if (!offmap)
+		return ERR_PTR(-ENOMEM);
+
+	bpf_map_init_from_attr(&offmap->map, attr);
+
+	rtnl_lock();
+	down_write(&bpf_devs_lock);
+	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
+	err = bpf_dev_offload_check(offmap->netdev);
+	if (err)
+		goto err_unlock;
+
+	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
+	if (err)
+		goto err_unlock;
+
+	list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
+	up_write(&bpf_devs_lock);
+	rtnl_unlock();
+
+	return &offmap->map;
+
+err_unlock:
+	up_write(&bpf_devs_lock);
+	rtnl_unlock();
+	kfree(offmap);
+	return ERR_PTR(err);
+}
+
+static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
+{
+	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
+	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
+	bpf_map_free_id(&offmap->map, true);
+	list_del_init(&offmap->offloads);
+	offmap->netdev = NULL;
+}
+
+void bpf_map_offload_map_free(struct bpf_map *map)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+
+	rtnl_lock();
+	down_write(&bpf_devs_lock);
+	if (offmap->netdev)
+		__bpf_map_offload_destroy(offmap);
+	up_write(&bpf_devs_lock);
+	rtnl_unlock();
+
+	kfree(offmap);
+}
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+int bpf_map_offload_update_elem(struct bpf_map *map,
+				void *key, void *value, u64 flags)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	if (unlikely(flags > BPF_EXIST))
+		return -EINVAL;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
+						       flags);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_delete_elem(offmap, key);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+struct ns_get_path_bpf_map_args {
+	struct bpf_offloaded_map *offmap;
+	struct bpf_map_info *info;
+};
+
+static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
+{
+	struct ns_get_path_bpf_map_args *args = private_data;
+	struct ns_common *ns;
+	struct net *net;
+
+	rtnl_lock();
+	down_read(&bpf_devs_lock);
+
+	if (args->offmap->netdev) {
+		args->info->ifindex = args->offmap->netdev->ifindex;
+		net = dev_net(args->offmap->netdev);
+		get_net(net);
+		ns = &net->ns;
+	} else {
+		args->info->ifindex = 0;
+		ns = NULL;
+	}
+
+	up_read(&bpf_devs_lock);
+	rtnl_unlock();
+
+	return ns;
+}
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+	struct ns_get_path_bpf_map_args args = {
+		.offmap	= map_to_offmap(map),
+		.info	= info,
+	};
+	struct inode *ns_inode;
+	struct path ns_path;
+	void *res;
+
+	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
+	if (IS_ERR(res)) {
+		if (!info->ifindex)
+			return -ENODEV;
+		return PTR_ERR(res);
+	}
+
+	ns_inode = ns_path.dentry->d_inode;
+	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+	info->netns_ino = ns_inode->i_ino;
+	path_put(&ns_path);
+
+	return 0;
+}
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
+{
+	struct bpf_offloaded_map *offmap;
+	struct bpf_prog_offload *offload;
+	bool ret;
+
+	if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map))
+		return false;
+
+	down_read(&bpf_devs_lock);
+	offload = prog->aux->offload;
+	offmap = map_to_offmap(map);
+
+	ret = offload && offload->netdev == offmap->netdev;
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+static void bpf_offload_orphan_all_progs(struct net_device *netdev)
+{
+	struct bpf_prog_offload *offload, *tmp;
+
+	list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
+		if (offload->netdev == netdev)
+			__bpf_prog_offload_destroy(offload->prog);
+}
+
+static void bpf_offload_orphan_all_maps(struct net_device *netdev)
+{
+	struct bpf_offloaded_map *offmap, *tmp;
+
+	list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
+		if (offmap->netdev == netdev)
+			__bpf_map_offload_destroy(offmap);
+}
+
 static int bpf_offload_notification(struct notifier_block *notifier,
 				    ulong event, void *ptr)
 {
 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-	struct bpf_dev_offload *offload, *tmp;
 
 	ASSERT_RTNL();
 
@@ -181,11 +518,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
 		if (netdev->reg_state != NETREG_UNREGISTERING)
 			break;
 
-		list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
-					 offloads) {
-			if (offload->netdev == netdev)
-				__bpf_prog_offload_destroy(offload->prog);
-		}
+		down_write(&bpf_devs_lock);
+		bpf_offload_orphan_all_progs(netdev);
+		bpf_offload_orphan_all_maps(netdev);
+		up_write(&bpf_devs_lock);
 		break;
 	default:
 		break;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 1712d319c2d8..0314d1783d77 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -96,14 +96,6 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 	return rcu_dereference_sk_user_data(sk);
 }
 
-/* compute the linear packet data range [data, data_end) for skb when
- * sk_skb type programs are in use.
- */
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
-{
-	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
-}
-
 enum __sk_action {
 	__SK_DROP = 0,
 	__SK_PASS,
@@ -521,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 	if (!stab)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	stab->map.map_type = attr->map_type;
-	stab->map.key_size = attr->key_size;
-	stab->map.value_size = attr->value_size;
-	stab->map.max_entries = attr->max_entries;
-	stab->map.map_flags = attr->map_flags;
-	stab->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&stab->map, attr);
 
 	/* make sure page count doesn't overflow */
 	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index a15bc636cc98..b0ecf43f5894 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 	if (cost >= U32_MAX - PAGE_SIZE)
 		goto free_smap;
 
-	smap->map.map_type = attr->map_type;
-	smap->map.key_size = attr->key_size;
+	bpf_map_init_from_attr(&smap->map, attr);
 	smap->map.value_size = value_size;
-	smap->map.max_entries = attr->max_entries;
-	smap->map.map_flags = attr->map_flags;
 	smap->n_buckets = n_buckets;
 	smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-	smap->map.numa_node = bpf_map_attr_numa_node(attr);
 
 	err = bpf_map_precharge_memlock(smap->map.pages);
 	if (err)
@@ -226,9 +222,33 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 	return 0;
 }
 
-static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int stack_map_get_next_key(struct bpf_map *map, void *key,
+				  void *next_key)
 {
-	return -EINVAL;
+	struct bpf_stack_map *smap = container_of(map,
+						  struct bpf_stack_map, map);
+	u32 id;
+
+	WARN_ON_ONCE(!rcu_read_lock_held());
+
+	if (!key) {
+		id = 0;
+	} else {
+		id = *(u32 *)key;
+		if (id >= smap->n_buckets || !smap->buckets[id])
+			id = 0;
+		else
+			id++;
+	}
+
+	while (id < smap->n_buckets && !smap->buckets[id])
+		id++;
+
+	if (id >= smap->n_buckets)
+		return -ENOENT;
+
+	*(u32 *)next_key = id;
+	return 0;
 }
 
 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5cb783fc8224..e24aa3241387 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr,
 	return 0;
 }
 
+const struct bpf_map_ops bpf_map_offload_ops = {
+	.map_alloc = bpf_map_offload_map_alloc,
+	.map_free = bpf_map_offload_map_free,
+};
+
 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 {
+	const struct bpf_map_ops *ops;
 	struct bpf_map *map;
+	int err;
 
-	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
-	    !bpf_map_types[attr->map_type])
+	if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
+		return ERR_PTR(-EINVAL);
+	ops = bpf_map_types[attr->map_type];
+	if (!ops)
 		return ERR_PTR(-EINVAL);
 
-	map = bpf_map_types[attr->map_type]->map_alloc(attr);
+	if (ops->map_alloc_check) {
+		err = ops->map_alloc_check(attr);
+		if (err)
+			return ERR_PTR(err);
+	}
+	if (attr->map_ifindex)
+		ops = &bpf_map_offload_ops;
+	map = ops->map_alloc(attr);
 	if (IS_ERR(map))
 		return map;
-	map->ops = bpf_map_types[attr->map_type];
+	map->ops = ops;
 	map->map_type = attr->map_type;
 	return map;
 }
@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area)
 	kvfree(area);
 }
 
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
+{
+	map->map_type = attr->map_type;
+	map->key_size = attr->key_size;
+	map->value_size = attr->value_size;
+	map->max_entries = attr->max_entries;
+	map->map_flags = attr->map_flags;
+	map->numa_node = bpf_map_attr_numa_node(attr);
+}
+
 int bpf_map_precharge_memlock(u32 pages)
 {
 	struct user_struct *user = get_current_user();
@@ -189,16 +215,25 @@ static int bpf_map_alloc_id(struct bpf_map *map)
 	return id > 0 ? 0 : id;
 }
 
-static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 {
 	unsigned long flags;
 
+	/* Offloaded maps are removed from the IDR store when their device
+	 * disappears - even if someone holds an fd to them they are unusable,
+	 * the memory is gone, all ops will fail; they are simply waiting for
+	 * refcnt to drop to be freed.
+	 */
+	if (!map->id)
+		return;
+
 	if (do_idr_lock)
 		spin_lock_irqsave(&map_idr_lock, flags);
 	else
 		__acquire(&map_idr_lock);
 
 	idr_remove(&map_idr, map->id);
+	map->id = 0;
 
 	if (do_idr_lock)
 		spin_unlock_irqrestore(&map_idr_lock, flags);
@@ -378,7 +413,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
 	return 0;
 }
 
-#define BPF_MAP_CREATE_LAST_FIELD map_name
+#define BPF_MAP_CREATE_LAST_FIELD map_ifindex
 /* called via syscall */
 static int map_create(union bpf_attr *attr)
 {
@@ -566,8 +601,10 @@ static int map_lookup_elem(union bpf_attr *attr)
 	if (!value)
 		goto free_key;
 
-	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_lookup_elem(map, key, value);
+	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 		err = bpf_percpu_hash_copy(map, key, value);
 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 		err = bpf_percpu_array_copy(map, key, value);
@@ -654,7 +691,10 @@ static int map_update_elem(union bpf_attr *attr)
 		goto free_value;
 
 	/* Need to create a kthread, thus must support schedule */
-	if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_update_elem(map, key, value, attr->flags);
+		goto out;
+	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
 		err = map->ops->map_update_elem(map, key, value, attr->flags);
 		goto out;
 	}
@@ -669,10 +709,7 @@ static int map_update_elem(union bpf_attr *attr)
 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 		err = bpf_percpu_array_update(map, key, value, attr->flags);
-	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
-		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
-		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
-		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
+	} else if (IS_FD_ARRAY(map)) {
 		rcu_read_lock();
 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
 						   attr->flags);
@@ -731,6 +768,11 @@ static int map_delete_elem(union bpf_attr *attr)
 		goto err_put;
 	}
 
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_delete_elem(map, key);
+		goto out;
+	}
+
 	preempt_disable();
 	__this_cpu_inc(bpf_prog_active);
 	rcu_read_lock();
@@ -738,7 +780,7 @@ static int map_delete_elem(union bpf_attr *attr)
 	rcu_read_unlock();
 	__this_cpu_dec(bpf_prog_active);
 	preempt_enable();
-
+out:
 	if (!err)
 		trace_bpf_map_delete_elem(map, ufd, key);
 	kfree(key);
@@ -788,9 +830,15 @@ static int map_get_next_key(union bpf_attr *attr)
 	if (!next_key)
 		goto free_key;
 
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_get_next_key(map, key, next_key);
+		goto out;
+	}
+
 	rcu_read_lock();
 	err = map->ops->map_get_next_key(map, key, next_key);
 	rcu_read_unlock();
+out:
 	if (err)
 		goto free_next_key;
 
@@ -905,9 +953,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
 	return id > 0 ? 0 : id;
 }
 
-static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 {
-	/* cBPF to eBPF migrations are currently not in the idr store. */
+	/* cBPF to eBPF migrations are currently not in the idr store.
+	 * Offloaded programs are removed from the store when their device
+	 * disappears - even if someone grabs an fd to them they are unusable,
+	 * simply waiting for refcnt to drop to be freed.
+	 */
 	if (!prog->aux->id)
 		return;
 
@@ -917,6 +969,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 		__acquire(&prog_idr_lock);
 
 	idr_remove(&prog_idr, prog->aux->id);
+	prog->aux->id = 0;
 
 	if (do_idr_lock)
 		spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +990,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
+		int i;
+
 		trace_bpf_prog_put_rcu(prog);
 		/* bpf_prog_free_id() must be called first */
 		bpf_prog_free_id(prog, do_idr_lock);
+
+		for (i = 0; i < prog->aux->func_cnt; i++)
+			bpf_prog_kallsyms_del(prog->aux->func[i]);
 		bpf_prog_kallsyms_del(prog);
+
 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
 	}
 }
@@ -1151,6 +1210,8 @@ static int bpf_prog_load(union bpf_attr *attr)
 	if (!prog)
 		return -ENOMEM;
 
+	prog->aux->offload_requested = !!attr->prog_ifindex;
+
 	err = security_bpf_prog_alloc(prog->aux);
 	if (err)
 		goto free_prog_nouncharge;
@@ -1172,7 +1233,7 @@ static int bpf_prog_load(union bpf_attr *attr)
 	atomic_set(&prog->aux->refcnt, 1);
 	prog->gpl_compatible = is_gpl ? 1 : 0;
 
-	if (attr->prog_ifindex) {
+	if (bpf_prog_is_dev_bound(prog->aux)) {
 		err = bpf_prog_offload_init(prog, attr);
 		if (err)
 			goto free_prog;
@@ -1194,7 +1255,8 @@ static int bpf_prog_load(union bpf_attr *attr)
 		goto free_used_maps;
 
 	/* eBPF program is ready to be JITed */
-	prog = bpf_prog_select_runtime(prog, &err);
+	if (!prog->bpf_func)
+		prog = bpf_prog_select_runtime(prog, &err);
 	if (err < 0)
 		goto free_used_maps;
 
@@ -1439,6 +1501,8 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
 	struct bpf_prog *prog;
 	int ret = -ENOTSUPP;
 
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
 		return -EINVAL;
 
@@ -1551,6 +1615,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
 	return fd;
 }
 
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+					      unsigned long addr)
+{
+	int i;
+
+	for (i = 0; i < prog->aux->used_map_cnt; i++)
+		if (prog->aux->used_maps[i] == (void *)addr)
+			return prog->aux->used_maps[i];
+	return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+	const struct bpf_map *map;
+	struct bpf_insn *insns;
+	u64 imm;
+	int i;
+
+	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+			GFP_USER);
+	if (!insns)
+		return insns;
+
+	for (i = 0; i < prog->len; i++) {
+		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+			insns[i].code = BPF_JMP | BPF_CALL;
+			insns[i].imm = BPF_FUNC_tail_call;
+			/* fall-through */
+		}
+		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+				insns[i].code = BPF_JMP | BPF_CALL;
+			if (!bpf_dump_raw_ok())
+				insns[i].imm = 0;
+			continue;
+		}
+
+		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+			continue;
+
+		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+		map = bpf_map_from_imm(prog, imm);
+		if (map) {
+			insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+			insns[i].imm = map->id;
+			insns[i + 1].imm = 0;
+			continue;
+		}
+
+		if (!bpf_dump_raw_ok() &&
+		    imm == (unsigned long)prog->aux) {
+			insns[i].imm = 0;
+			insns[i + 1].imm = 0;
+			continue;
+		}
+	}
+
+	return insns;
+}
+
 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 				   const union bpf_attr *attr,
 				   union bpf_attr __user *uattr)
@@ -1598,24 +1723,51 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 		goto done;
 	}
 
-	ulen = info.jited_prog_len;
-	info.jited_prog_len = prog->jited_len;
-	if (info.jited_prog_len && ulen) {
-		uinsns = u64_to_user_ptr(info.jited_prog_insns);
-		ulen = min_t(u32, info.jited_prog_len, ulen);
-		if (copy_to_user(uinsns, prog->bpf_func, ulen))
-			return -EFAULT;
-	}
-
 	ulen = info.xlated_prog_len;
 	info.xlated_prog_len = bpf_prog_insn_size(prog);
 	if (info.xlated_prog_len && ulen) {
+		struct bpf_insn *insns_sanitized;
+		bool fault;
+
+		if (prog->blinded && !bpf_dump_raw_ok()) {
+			info.xlated_prog_insns = 0;
+			goto done;
+		}
+		insns_sanitized = bpf_insn_prepare_dump(prog);
+		if (!insns_sanitized)
+			return -ENOMEM;
 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
 		ulen = min_t(u32, info.xlated_prog_len, ulen);
-		if (copy_to_user(uinsns, prog->insnsi, ulen))
+		fault = copy_to_user(uinsns, insns_sanitized, ulen);
+		kfree(insns_sanitized);
+		if (fault)
 			return -EFAULT;
 	}
 
+	if (bpf_prog_is_dev_bound(prog->aux)) {
+		err = bpf_prog_offload_info_fill(&info, prog);
+		if (err)
+			return err;
+		goto done;
+	}
+
+	/* NOTE: the following code is supposed to be skipped for offload.
+	 * bpf_prog_offload_info_fill() is the place to fill similar fields
+	 * for offload.
+	 */
+	ulen = info.jited_prog_len;
+	info.jited_prog_len = prog->jited_len;
+	if (info.jited_prog_len && ulen) {
+		if (bpf_dump_raw_ok()) {
+			uinsns = u64_to_user_ptr(info.jited_prog_insns);
+			ulen = min_t(u32, info.jited_prog_len, ulen);
+			if (copy_to_user(uinsns, prog->bpf_func, ulen))
+				return -EFAULT;
+		} else {
+			info.jited_prog_insns = 0;
+		}
+	}
+
 done:
 	if (copy_to_user(uinfo, &info, info_len) ||
 	    put_user(info_len, &uattr->info.info_len))
@@ -1646,6 +1798,12 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
 	info.map_flags = map->map_flags;
 	memcpy(info.name, map->name, sizeof(map->name));
 
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_info_fill(&info, map);
+		if (err)
+			return err;
+	}
+
 	if (copy_to_user(uinfo, &info, info_len) ||
 	    put_user(info_len, &uattr->info.info_len))
 		return -EFAULT;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 13551e623501..5fb69a85d967 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20,6 +20,8 @@
 #include <linux/file.h>
 #include <linux/vmalloc.h>
 #include <linux/stringify.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
 
 #include "disasm.h"
 
@@ -167,11 +169,11 @@ struct bpf_call_arg_meta {
 static DEFINE_MUTEX(bpf_verifier_lock);
 
 /* log_level controls verbosity level of eBPF verifier.
- * verbose() is used to dump the verification trace to the log, so the user
- * can figure out what's wrong with the program
+ * bpf_verifier_log_write() is used to dump the verification trace to the log,
+ * so the user can figure out what's wrong with the program
  */
-static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
-				   const char *fmt, ...)
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+					   const char *fmt, ...)
 {
 	struct bpf_verifer_log *log = &env->log;
 	unsigned int n;
@@ -195,6 +197,14 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
 	else
 		log->ubuf = NULL;
 }
+EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
+/* Historically bpf_verifier_log_write was called verbose, but the name was too
+ * generic for symbol export. The function was renamed, but not the calls in
+ * the verifier to avoid complicating backports. Hence the alias below.
+ */
+static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
+				   const char *fmt, ...)
+	__attribute__((alias("bpf_verifier_log_write")));
 
 static bool type_is_pkt_pointer(enum bpf_reg_type type)
 {
@@ -216,23 +226,48 @@ static const char * const reg_type_str[] = {
 	[PTR_TO_PACKET_END]	= "pkt_end",
 };
 
+static void print_liveness(struct bpf_verifier_env *env,
+			   enum bpf_reg_liveness live)
+{
+	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
+	    verbose(env, "_");
+	if (live & REG_LIVE_READ)
+		verbose(env, "r");
+	if (live & REG_LIVE_WRITTEN)
+		verbose(env, "w");
+}
+
+static struct bpf_func_state *func(struct bpf_verifier_env *env,
+				   const struct bpf_reg_state *reg)
+{
+	struct bpf_verifier_state *cur = env->cur_state;
+
+	return cur->frame[reg->frameno];
+}
+
 static void print_verifier_state(struct bpf_verifier_env *env,
-				 struct bpf_verifier_state *state)
+				 const struct bpf_func_state *state)
 {
-	struct bpf_reg_state *reg;
+	const struct bpf_reg_state *reg;
 	enum bpf_reg_type t;
 	int i;
 
+	if (state->frameno)
+		verbose(env, " frame%d:", state->frameno);
 	for (i = 0; i < MAX_BPF_REG; i++) {
 		reg = &state->regs[i];
 		t = reg->type;
 		if (t == NOT_INIT)
 			continue;
-		verbose(env, " R%d=%s", i, reg_type_str[t]);
+		verbose(env, " R%d", i);
+		print_liveness(env, reg->live);
+		verbose(env, "=%s", reg_type_str[t]);
 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
 		    tnum_is_const(reg->var_off)) {
 			/* reg->off should be 0 for SCALAR_VALUE */
 			verbose(env, "%lld", reg->var_off.value + reg->off);
+			if (t == PTR_TO_STACK)
+				verbose(env, ",call_%d", func(env, reg)->callsite);
 		} else {
 			verbose(env, "(id=%d", reg->id);
 			if (t != SCALAR_VALUE)
@@ -277,16 +312,21 @@ static void print_verifier_state(struct bpf_verifier_env *env,
 		}
 	}
 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] == STACK_SPILL)
-			verbose(env, " fp%d=%s",
-				-MAX_BPF_STACK + i * BPF_REG_SIZE,
+		if (state->stack[i].slot_type[0] == STACK_SPILL) {
+			verbose(env, " fp%d",
+				(-i - 1) * BPF_REG_SIZE);
+			print_liveness(env, state->stack[i].spilled_ptr.live);
+			verbose(env, "=%s",
 				reg_type_str[state->stack[i].spilled_ptr.type]);
+		}
+		if (state->stack[i].slot_type[0] == STACK_ZERO)
+			verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
 	}
 	verbose(env, "\n");
 }
 
-static int copy_stack_state(struct bpf_verifier_state *dst,
-			    const struct bpf_verifier_state *src)
+static int copy_stack_state(struct bpf_func_state *dst,
+			    const struct bpf_func_state *src)
 {
 	if (!src->stack)
 		return 0;
@@ -302,13 +342,13 @@ static int copy_stack_state(struct bpf_verifier_state *dst,
 
 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
  * make it consume minimal amount of memory. check_stack_write() access from
- * the program calls into realloc_verifier_state() to grow the stack size.
+ * the program calls into realloc_func_state() to grow the stack size.
  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
  * which this function copies over. It points to previous bpf_verifier_state
  * which is never reallocated
  */
-static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
-				  bool copy_old)
+static int realloc_func_state(struct bpf_func_state *state, int size,
+			      bool copy_old)
 {
 	u32 old_size = state->allocated_stack;
 	struct bpf_stack_state *new_stack;
@@ -341,10 +381,23 @@ static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
 	return 0;
 }
 
+static void free_func_state(struct bpf_func_state *state)
+{
+	if (!state)
+		return;
+	kfree(state->stack);
+	kfree(state);
+}
+
 static void free_verifier_state(struct bpf_verifier_state *state,
 				bool free_self)
 {
-	kfree(state->stack);
+	int i;
+
+	for (i = 0; i <= state->curframe; i++) {
+		free_func_state(state->frame[i]);
+		state->frame[i] = NULL;
+	}
 	if (free_self)
 		kfree(state);
 }
@@ -352,18 +405,46 @@ static void free_verifier_state(struct bpf_verifier_state *state,
 /* copy verifier state from src to dst growing dst stack space
  * when necessary to accommodate larger src stack
  */
-static int copy_verifier_state(struct bpf_verifier_state *dst,
-			       const struct bpf_verifier_state *src)
+static int copy_func_state(struct bpf_func_state *dst,
+			   const struct bpf_func_state *src)
 {
 	int err;
 
-	err = realloc_verifier_state(dst, src->allocated_stack, false);
+	err = realloc_func_state(dst, src->allocated_stack, false);
 	if (err)
 		return err;
-	memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
+	memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
 	return copy_stack_state(dst, src);
 }
 
+static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+			       const struct bpf_verifier_state *src)
+{
+	struct bpf_func_state *dst;
+	int i, err;
+
+	/* if dst has more stack frames then src frame, free them */
+	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
+		free_func_state(dst_state->frame[i]);
+		dst_state->frame[i] = NULL;
+	}
+	dst_state->curframe = src->curframe;
+	dst_state->parent = src->parent;
+	for (i = 0; i <= src->curframe; i++) {
+		dst = dst_state->frame[i];
+		if (!dst) {
+			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+			if (!dst)
+				return -ENOMEM;
+			dst_state->frame[i] = dst;
+		}
+		err = copy_func_state(dst, src->frame[i]);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
 		     int *insn_idx)
 {
@@ -416,6 +497,8 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
 	}
 	return &elem->st;
 err:
+	free_verifier_state(env->cur_state, true);
+	env->cur_state = NULL;
 	/* pop all elements and return */
 	while (!pop_stack(env, NULL, NULL));
 	return NULL;
@@ -425,6 +508,10 @@ err:
 static const int caller_saved[CALLER_SAVED_REGS] = {
 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 };
+#define CALLEE_SAVED_REGS 5
+static const int callee_saved[CALLEE_SAVED_REGS] = {
+	BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9
+};
 
 static void __mark_reg_not_init(struct bpf_reg_state *reg);
 
@@ -449,6 +536,13 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
 	__mark_reg_known(reg, 0);
 }
 
+static void __mark_reg_const_zero(struct bpf_reg_state *reg)
+{
+	__mark_reg_known(reg, 0);
+	reg->off = 0;
+	reg->type = SCALAR_VALUE;
+}
+
 static void mark_reg_known_zero(struct bpf_verifier_env *env,
 				struct bpf_reg_state *regs, u32 regno)
 {
@@ -560,6 +654,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
 	reg->id = 0;
 	reg->off = 0;
 	reg->var_off = tnum_unknown;
+	reg->frameno = 0;
 	__mark_reg_unbounded(reg);
 }
 
@@ -568,8 +663,8 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
 {
 	if (WARN_ON(regno >= MAX_BPF_REG)) {
 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
-		/* Something bad happened, let's kill all regs */
-		for (regno = 0; regno < MAX_BPF_REG; regno++)
+		/* Something bad happened, let's kill all regs except FP */
+		for (regno = 0; regno < BPF_REG_FP; regno++)
 			__mark_reg_not_init(regs + regno);
 		return;
 	}
@@ -587,8 +682,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
 {
 	if (WARN_ON(regno >= MAX_BPF_REG)) {
 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
-		/* Something bad happened, let's kill all regs */
-		for (regno = 0; regno < MAX_BPF_REG; regno++)
+		/* Something bad happened, let's kill all regs except FP */
+		for (regno = 0; regno < BPF_REG_FP; regno++)
 			__mark_reg_not_init(regs + regno);
 		return;
 	}
@@ -596,8 +691,9 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
 }
 
 static void init_reg_state(struct bpf_verifier_env *env,
-			   struct bpf_reg_state *regs)
+			   struct bpf_func_state *state)
 {
+	struct bpf_reg_state *regs = state->regs;
 	int i;
 
 	for (i = 0; i < MAX_BPF_REG; i++) {
@@ -608,41 +704,218 @@ static void init_reg_state(struct bpf_verifier_env *env,
 	/* frame pointer */
 	regs[BPF_REG_FP].type = PTR_TO_STACK;
 	mark_reg_known_zero(env, regs, BPF_REG_FP);
+	regs[BPF_REG_FP].frameno = state->frameno;
 
 	/* 1st arg to a function */
 	regs[BPF_REG_1].type = PTR_TO_CTX;
 	mark_reg_known_zero(env, regs, BPF_REG_1);
 }
 
+#define BPF_MAIN_FUNC (-1)
+static void init_func_state(struct bpf_verifier_env *env,
+			    struct bpf_func_state *state,
+			    int callsite, int frameno, int subprogno)
+{
+	state->callsite = callsite;
+	state->frameno = frameno;
+	state->subprogno = subprogno;
+	init_reg_state(env, state);
+}
+
 enum reg_arg_type {
 	SRC_OP,		/* register is used as source operand */
 	DST_OP,		/* register is used as destination operand */
 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
 };
 
-static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
+static int cmp_subprogs(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+static int find_subprog(struct bpf_verifier_env *env, int off)
 {
-	struct bpf_verifier_state *parent = state->parent;
+	u32 *p;
+
+	p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
+		    sizeof(env->subprog_starts[0]), cmp_subprogs);
+	if (!p)
+		return -ENOENT;
+	return p - env->subprog_starts;
+
+}
+
+static int add_subprog(struct bpf_verifier_env *env, int off)
+{
+	int insn_cnt = env->prog->len;
+	int ret;
+
+	if (off >= insn_cnt || off < 0) {
+		verbose(env, "call to invalid destination\n");
+		return -EINVAL;
+	}
+	ret = find_subprog(env, off);
+	if (ret >= 0)
+		return 0;
+	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
+		verbose(env, "too many subprograms\n");
+		return -E2BIG;
+	}
+	env->subprog_starts[env->subprog_cnt++] = off;
+	sort(env->subprog_starts, env->subprog_cnt,
+	     sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
+	return 0;
+}
+
+static int check_subprogs(struct bpf_verifier_env *env)
+{
+	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
+	struct bpf_insn *insn = env->prog->insnsi;
+	int insn_cnt = env->prog->len;
+
+	/* determine subprog starts. The end is one before the next starts */
+	for (i = 0; i < insn_cnt; i++) {
+		if (insn[i].code != (BPF_JMP | BPF_CALL))
+			continue;
+		if (insn[i].src_reg != BPF_PSEUDO_CALL)
+			continue;
+		if (!env->allow_ptr_leaks) {
+			verbose(env, "function calls to other bpf functions are allowed for root only\n");
+			return -EPERM;
+		}
+		if (bpf_prog_is_dev_bound(env->prog->aux)) {
+			verbose(env, "function calls in offloaded programs are not supported yet\n");
+			return -EINVAL;
+		}
+		ret = add_subprog(env, i + insn[i].imm + 1);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (env->log.level > 1)
+		for (i = 0; i < env->subprog_cnt; i++)
+			verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
+
+	/* now check that all jumps are within the same subprog */
+	subprog_start = 0;
+	if (env->subprog_cnt == cur_subprog)
+		subprog_end = insn_cnt;
+	else
+		subprog_end = env->subprog_starts[cur_subprog++];
+	for (i = 0; i < insn_cnt; i++) {
+		u8 code = insn[i].code;
+
+		if (BPF_CLASS(code) != BPF_JMP)
+			goto next;
+		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+			goto next;
+		off = i + insn[i].off + 1;
+		if (off < subprog_start || off >= subprog_end) {
+			verbose(env, "jump out of range from insn %d to %d\n", i, off);
+			return -EINVAL;
+		}
+next:
+		if (i == subprog_end - 1) {
+			/* to avoid fall-through from one subprog into another
+			 * the last insn of the subprog should be either exit
+			 * or unconditional jump back
+			 */
+			if (code != (BPF_JMP | BPF_EXIT) &&
+			    code != (BPF_JMP | BPF_JA)) {
+				verbose(env, "last insn is not an exit or jmp\n");
+				return -EINVAL;
+			}
+			subprog_start = subprog_end;
+			if (env->subprog_cnt == cur_subprog)
+				subprog_end = insn_cnt;
+			else
+				subprog_end = env->subprog_starts[cur_subprog++];
+		}
+	}
+	return 0;
+}
+
+static
+struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
+				       const struct bpf_verifier_state *state,
+				       struct bpf_verifier_state *parent,
+				       u32 regno)
+{
+	struct bpf_verifier_state *tmp = NULL;
+
+	/* 'parent' could be a state of caller and
+	 * 'state' could be a state of callee. In such case
+	 * parent->curframe < state->curframe
+	 * and it's ok for r1 - r5 registers
+	 *
+	 * 'parent' could be a callee's state after it bpf_exit-ed.
+	 * In such case parent->curframe > state->curframe
+	 * and it's ok for r0 only
+	 */
+	if (parent->curframe == state->curframe ||
+	    (parent->curframe < state->curframe &&
+	     regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
+	    (parent->curframe > state->curframe &&
+	       regno == BPF_REG_0))
+		return parent;
+
+	if (parent->curframe > state->curframe &&
+	    regno >= BPF_REG_6) {
+		/* for callee saved regs we have to skip the whole chain
+		 * of states that belong to callee and mark as LIVE_READ
+		 * the registers before the call
+		 */
+		tmp = parent;
+		while (tmp && tmp->curframe != state->curframe) {
+			tmp = tmp->parent;
+		}
+		if (!tmp)
+			goto bug;
+		parent = tmp;
+	} else {
+		goto bug;
+	}
+	return parent;
+bug:
+	verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
+	verbose(env, "regno %d parent frame %d current frame %d\n",
+		regno, parent->curframe, state->curframe);
+	return NULL;
+}
+
+static int mark_reg_read(struct bpf_verifier_env *env,
+			 const struct bpf_verifier_state *state,
+			 struct bpf_verifier_state *parent,
+			 u32 regno)
+{
+	bool writes = parent == state->parent; /* Observe write marks */
 
 	if (regno == BPF_REG_FP)
 		/* We don't need to worry about FP liveness because it's read-only */
-		return;
+		return 0;
 
 	while (parent) {
 		/* if read wasn't screened by an earlier write ... */
-		if (state->regs[regno].live & REG_LIVE_WRITTEN)
+		if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
 			break;
+		parent = skip_callee(env, state, parent, regno);
+		if (!parent)
+			return -EFAULT;
 		/* ... then we depend on parent's value */
-		parent->regs[regno].live |= REG_LIVE_READ;
+		parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
 		state = parent;
 		parent = state->parent;
+		writes = true;
 	}
+	return 0;
 }
 
 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
 			 enum reg_arg_type t)
 {
-	struct bpf_reg_state *regs = env->cur_state->regs;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *regs = state->regs;
 
 	if (regno >= MAX_BPF_REG) {
 		verbose(env, "R%d is invalid\n", regno);
@@ -655,7 +928,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
 			verbose(env, "R%d !read_ok\n", regno);
 			return -EACCES;
 		}
-		mark_reg_read(env->cur_state, regno);
+		return mark_reg_read(env, vstate, vstate->parent, regno);
 	} else {
 		/* check whether register used as dest operand can be written to */
 		if (regno == BPF_REG_FP) {
@@ -686,17 +959,25 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
 	}
 }
 
+/* Does this register contain a constant zero? */
+static bool register_is_null(struct bpf_reg_state *reg)
+{
+	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
+}
+
 /* check_stack_read/write functions track spill/fill of registers,
  * stack boundary and alignment are checked in check_mem_access()
  */
 static int check_stack_write(struct bpf_verifier_env *env,
-			     struct bpf_verifier_state *state, int off,
-			     int size, int value_regno)
+			     struct bpf_func_state *state, /* func where register points to */
+			     int off, int size, int value_regno)
 {
+	struct bpf_func_state *cur; /* state of the current function */
 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+	enum bpf_reg_type type;
 
-	err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
-				     true);
+	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
+				 true);
 	if (err)
 		return err;
 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -709,8 +990,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
 		return -EACCES;
 	}
 
+	cur = env->cur_state->frame[env->cur_state->curframe];
 	if (value_regno >= 0 &&
-	    is_spillable_regtype(state->regs[value_regno].type)) {
+	    is_spillable_regtype((type = cur->regs[value_regno].type))) {
 
 		/* register containing pointer is being spilled into stack */
 		if (size != BPF_REG_SIZE) {
@@ -718,51 +1000,116 @@ static int check_stack_write(struct bpf_verifier_env *env,
 			return -EACCES;
 		}
 
+		if (state != cur && type == PTR_TO_STACK) {
+			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
+			return -EINVAL;
+		}
+
 		/* save register state */
-		state->stack[spi].spilled_ptr = state->regs[value_regno];
+		state->stack[spi].spilled_ptr = cur->regs[value_regno];
 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
 
 		for (i = 0; i < BPF_REG_SIZE; i++)
 			state->stack[spi].slot_type[i] = STACK_SPILL;
 	} else {
+		u8 type = STACK_MISC;
+
 		/* regular write of data into stack */
 		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
 
+		/* only mark the slot as written if all 8 bytes were written
+		 * otherwise read propagation may incorrectly stop too soon
+		 * when stack slots are partially written.
+		 * This heuristic means that read propagation will be
+		 * conservative, since it will add reg_live_read marks
+		 * to stack slots all the way to first state when programs
+		 * writes+reads less than 8 bytes
+		 */
+		if (size == BPF_REG_SIZE)
+			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+		/* when we zero initialize stack slots mark them as such */
+		if (value_regno >= 0 &&
+		    register_is_null(&cur->regs[value_regno]))
+			type = STACK_ZERO;
+
 		for (i = 0; i < size; i++)
 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
-				STACK_MISC;
+				type;
 	}
 	return 0;
 }
 
-static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
+/* registers of every function are unique and mark_reg_read() propagates
+ * the liveness in the following cases:
+ * - from callee into caller for R1 - R5 that were used as arguments
+ * - from caller into callee for R0 that used as result of the call
+ * - from caller to the same caller skipping states of the callee for R6 - R9,
+ *   since R6 - R9 are callee saved by implicit function prologue and
+ *   caller's R6 != callee's R6, so when we propagate liveness up to
+ *   parent states we need to skip callee states for R6 - R9.
+ *
+ * stack slot marking is different, since stacks of caller and callee are
+ * accessible in both (since caller can pass a pointer to caller's stack to
+ * callee which can pass it to another function), hence mark_stack_slot_read()
+ * has to propagate the stack liveness to all parent states at given frame number.
+ * Consider code:
+ * f1() {
+ *   ptr = fp - 8;
+ *   *ptr = ctx;
+ *   call f2 {
+ *      .. = *ptr;
+ *   }
+ *   .. = *ptr;
+ * }
+ * First *ptr is reading from f1's stack and mark_stack_slot_read() has
+ * to mark liveness at the f1's frame and not f2's frame.
+ * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
+ * to propagate liveness to f2 states at f1's frame level and further into
+ * f1 states at f1's frame level until write into that stack slot
+ */
+static void mark_stack_slot_read(struct bpf_verifier_env *env,
+				 const struct bpf_verifier_state *state,
+				 struct bpf_verifier_state *parent,
+				 int slot, int frameno)
 {
-	struct bpf_verifier_state *parent = state->parent;
+	bool writes = parent == state->parent; /* Observe write marks */
 
 	while (parent) {
+		if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
+			/* since LIVE_WRITTEN mark is only done for full 8-byte
+			 * write the read marks are conservative and parent
+			 * state may not even have the stack allocated. In such case
+			 * end the propagation, since the loop reached beginning
+			 * of the function
+			 */
+			break;
 		/* if read wasn't screened by an earlier write ... */
-		if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
+		if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
 			break;
 		/* ... then we depend on parent's value */
-		parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
+		parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
 		state = parent;
 		parent = state->parent;
+		writes = true;
 	}
 }
 
 static int check_stack_read(struct bpf_verifier_env *env,
-			    struct bpf_verifier_state *state, int off, int size,
-			    int value_regno)
+			    struct bpf_func_state *reg_state /* func where register points to */,
+			    int off, int size, int value_regno)
 {
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
 	u8 *stype;
 
-	if (state->allocated_stack <= slot) {
+	if (reg_state->allocated_stack <= slot) {
 		verbose(env, "invalid read from stack off %d+0 size %d\n",
 			off, size);
 		return -EACCES;
 	}
-	stype = state->stack[spi].slot_type;
+	stype = reg_state->stack[spi].slot_type;
 
 	if (stype[0] == STACK_SPILL) {
 		if (size != BPF_REG_SIZE) {
@@ -778,21 +1125,44 @@ static int check_stack_read(struct bpf_verifier_env *env,
 
 		if (value_regno >= 0) {
 			/* restore register state from stack */
-			state->regs[value_regno] = state->stack[spi].spilled_ptr;
-			mark_stack_slot_read(state, spi);
+			state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
+			/* mark reg as written since spilled pointer state likely
+			 * has its liveness marks cleared by is_state_visited()
+			 * which resets stack/reg liveness for state transitions
+			 */
+			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
 		}
+		mark_stack_slot_read(env, vstate, vstate->parent, spi,
+				     reg_state->frameno);
 		return 0;
 	} else {
+		int zeros = 0;
+
 		for (i = 0; i < size; i++) {
-			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
-				verbose(env, "invalid read from stack off %d+%d size %d\n",
-					off, i, size);
-				return -EACCES;
+			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
+				continue;
+			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
+				zeros++;
+				continue;
+			}
+			verbose(env, "invalid read from stack off %d+%d size %d\n",
+				off, i, size);
+			return -EACCES;
+		}
+		mark_stack_slot_read(env, vstate, vstate->parent, spi,
+				     reg_state->frameno);
+		if (value_regno >= 0) {
+			if (zeros == size) {
+				/* any size read into register is zero extended,
+				 * so the whole register == const_zero
+				 */
+				__mark_reg_const_zero(&state->regs[value_regno]);
+			} else {
+				/* have read misc data from the stack */
+				mark_reg_unknown(env, state->regs, value_regno);
 			}
+			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
 		}
-		if (value_regno >= 0)
-			/* have read misc data from the stack */
-			mark_reg_unknown(env, state->regs, value_regno);
 		return 0;
 	}
 }
@@ -817,7 +1187,8 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
 			    int off, int size, bool zero_size_allowed)
 {
-	struct bpf_verifier_state *state = env->cur_state;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	struct bpf_reg_state *reg = &state->regs[regno];
 	int err;
 
@@ -1079,6 +1450,103 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
 					   strict);
 }
 
+static int update_stack_depth(struct bpf_verifier_env *env,
+			      const struct bpf_func_state *func,
+			      int off)
+{
+	u16 stack = env->subprog_stack_depth[func->subprogno];
+
+	if (stack >= -off)
+		return 0;
+
+	/* update known max for given subprogram */
+	env->subprog_stack_depth[func->subprogno] = -off;
+	return 0;
+}
+
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+	int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+	struct bpf_insn *insn = env->prog->insnsi;
+	int insn_cnt = env->prog->len;
+	int ret_insn[MAX_CALL_FRAMES];
+	int ret_prog[MAX_CALL_FRAMES];
+
+process_func:
+	/* round up to 32-bytes, since this is granularity
+	 * of interpreter stack size
+	 */
+	depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+	if (depth > MAX_BPF_STACK) {
+		verbose(env, "combined stack size of %d calls is %d. Too large\n",
+			frame + 1, depth);
+		return -EACCES;
+	}
+continue_func:
+	if (env->subprog_cnt == subprog)
+		subprog_end = insn_cnt;
+	else
+		subprog_end = env->subprog_starts[subprog];
+	for (; i < subprog_end; i++) {
+		if (insn[i].code != (BPF_JMP | BPF_CALL))
+			continue;
+		if (insn[i].src_reg != BPF_PSEUDO_CALL)
+			continue;
+		/* remember insn and function to return to */
+		ret_insn[frame] = i + 1;
+		ret_prog[frame] = subprog;
+
+		/* find the callee */
+		i = i + insn[i].imm + 1;
+		subprog = find_subprog(env, i);
+		if (subprog < 0) {
+			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+				  i);
+			return -EFAULT;
+		}
+		subprog++;
+		frame++;
+		if (frame >= MAX_CALL_FRAMES) {
+			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+			return -EFAULT;
+		}
+		goto process_func;
+	}
+	/* end of for() loop means the last insn of the 'subprog'
+	 * was reached. Doesn't matter whether it was JA or EXIT
+	 */
+	if (frame == 0)
+		return 0;
+	depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+	frame--;
+	i = ret_insn[frame];
+	subprog = ret_prog[frame];
+	goto continue_func;
+}
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+static int get_callee_stack_depth(struct bpf_verifier_env *env,
+				  const struct bpf_insn *insn, int idx)
+{
+	int start = idx + insn->imm + 1, subprog;
+
+	subprog = find_subprog(env, start);
+	if (subprog < 0) {
+		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+			  start);
+		return -EFAULT;
+	}
+	subprog++;
+	return env->subprog_stack_depth[subprog];
+}
+#endif
+
 /* truncate register to smaller size (in bytes)
  * must be called with size < BPF_REG_SIZE
  */
@@ -1112,9 +1580,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 			    int bpf_size, enum bpf_access_type t,
 			    int value_regno)
 {
-	struct bpf_verifier_state *state = env->cur_state;
 	struct bpf_reg_state *regs = cur_regs(env);
 	struct bpf_reg_state *reg = regs + regno;
+	struct bpf_func_state *state;
 	int size, err = 0;
 
 	size = bpf_size_to_bytes(bpf_size);
@@ -1203,8 +1671,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 			return -EACCES;
 		}
 
-		if (env->prog->aux->stack_depth < -off)
-			env->prog->aux->stack_depth = -off;
+		state = func(env, reg);
+		err = update_stack_depth(env, state, off);
+		if (err)
+			return err;
 
 		if (t == BPF_WRITE)
 			err = check_stack_write(env, state, off, size,
@@ -1282,12 +1752,6 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
 				BPF_SIZE(insn->code), BPF_WRITE, -1);
 }
 
-/* Does this register contain a constant zero? */
-static bool register_is_null(struct bpf_reg_state reg)
-{
-	return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
-}
-
 /* when register 'regno' is passed into function that will read 'access_size'
  * bytes from that pointer, make sure that it's within stack boundary
  * and all elements of stack are initialized.
@@ -1298,32 +1762,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 				int access_size, bool zero_size_allowed,
 				struct bpf_call_arg_meta *meta)
 {
-	struct bpf_verifier_state *state = env->cur_state;
-	struct bpf_reg_state *regs = state->regs;
+	struct bpf_reg_state *reg = cur_regs(env) + regno;
+	struct bpf_func_state *state = func(env, reg);
 	int off, i, slot, spi;
 
-	if (regs[regno].type != PTR_TO_STACK) {
+	if (reg->type != PTR_TO_STACK) {
 		/* Allow zero-byte read from NULL, regardless of pointer type */
 		if (zero_size_allowed && access_size == 0 &&
-		    register_is_null(regs[regno]))
+		    register_is_null(reg))
 			return 0;
 
 		verbose(env, "R%d type=%s expected=%s\n", regno,
-			reg_type_str[regs[regno].type],
+			reg_type_str[reg->type],
 			reg_type_str[PTR_TO_STACK]);
 		return -EACCES;
 	}
 
 	/* Only allow fixed-offset stack reads */
-	if (!tnum_is_const(regs[regno].var_off)) {
+	if (!tnum_is_const(reg->var_off)) {
 		char tn_buf[48];
 
-		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
+		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
 			regno, tn_buf);
 		return -EACCES;
 	}
-	off = regs[regno].off + regs[regno].var_off.value;
+	off = reg->off + reg->var_off.value;
 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
@@ -1331,9 +1795,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 		return -EACCES;
 	}
 
-	if (env->prog->aux->stack_depth < -off)
-		env->prog->aux->stack_depth = -off;
-
 	if (meta && meta->raw_mode) {
 		meta->access_size = access_size;
 		meta->regno = regno;
@@ -1341,17 +1802,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 	}
 
 	for (i = 0; i < access_size; i++) {
+		u8 *stype;
+
 		slot = -(off + i) - 1;
 		spi = slot / BPF_REG_SIZE;
-		if (state->allocated_stack <= slot ||
-		    state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
-			STACK_MISC) {
-			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
-				off, i, access_size);
-			return -EACCES;
+		if (state->allocated_stack <= slot)
+			goto err;
+		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+		if (*stype == STACK_MISC)
+			goto mark;
+		if (*stype == STACK_ZERO) {
+			/* helper can write anything into the stack */
+			*stype = STACK_MISC;
+			goto mark;
 		}
+err:
+		verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+			off, i, access_size);
+		return -EACCES;
+mark:
+		/* reading any byte out of 8-byte 'spill_slot' will cause
+		 * the whole slot to be marked as 'read'
+		 */
+		mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
+				     spi, state->frameno);
 	}
-	return 0;
+	return update_stack_depth(env, state, off);
 }
 
 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -1374,6 +1850,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
 	}
 }
 
+static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
+{
+	return type == ARG_PTR_TO_MEM ||
+	       type == ARG_PTR_TO_MEM_OR_NULL ||
+	       type == ARG_PTR_TO_UNINIT_MEM;
+}
+
+static bool arg_type_is_mem_size(enum bpf_arg_type type)
+{
+	return type == ARG_CONST_SIZE ||
+	       type == ARG_CONST_SIZE_OR_ZERO;
+}
+
 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
 			  enum bpf_arg_type arg_type,
 			  struct bpf_call_arg_meta *meta)
@@ -1423,15 +1912,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
 		expected_type = PTR_TO_CTX;
 		if (type != expected_type)
 			goto err_type;
-	} else if (arg_type == ARG_PTR_TO_MEM ||
-		   arg_type == ARG_PTR_TO_MEM_OR_NULL ||
-		   arg_type == ARG_PTR_TO_UNINIT_MEM) {
+	} else if (arg_type_is_mem_ptr(arg_type)) {
 		expected_type = PTR_TO_STACK;
 		/* One exception here. In case function allows for NULL to be
 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
 		 * happens during stack boundary checking.
 		 */
-		if (register_is_null(*reg) &&
+		if (register_is_null(reg) &&
 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
 			/* final test in check_stack_boundary() */;
 		else if (!type_is_pkt_pointer(type) &&
@@ -1486,25 +1973,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
 			err = check_stack_boundary(env, regno,
 						   meta->map_ptr->value_size,
 						   false, NULL);
-	} else if (arg_type == ARG_CONST_SIZE ||
-		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
+	} else if (arg_type_is_mem_size(arg_type)) {
 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
 
-		/* bpf_xxx(..., buf, len) call will access 'len' bytes
-		 * from stack pointer 'buf'. Check it
-		 * note: regno == len, regno - 1 == buf
-		 */
-		if (regno == 0) {
-			/* kernel subsystem misconfigured verifier */
-			verbose(env,
-				"ARG_CONST_SIZE cannot be first argument\n");
-			return -EACCES;
-		}
-
 		/* The register is SCALAR_VALUE; the access check
 		 * happens using its boundaries.
 		 */
-
 		if (!tnum_is_const(reg->var_off))
 			/* For unprivileged variable accesses, disable raw
 			 * mode so that the program is required to
@@ -1604,6 +2078,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
 	case BPF_FUNC_tail_call:
 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
 			goto error;
+		if (env->subprog_cnt) {
+			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
+			return -EINVAL;
+		}
 		break;
 	case BPF_FUNC_perf_event_read:
 	case BPF_FUNC_perf_event_output:
@@ -1644,7 +2122,7 @@ error:
 	return -EINVAL;
 }
 
-static int check_raw_mode(const struct bpf_func_proto *fn)
+static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
 {
 	int count = 0;
 
@@ -1659,15 +2137,52 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
 		count++;
 
-	return count > 1 ? -EINVAL : 0;
+	/* We only support one arg being in raw mode at the moment,
+	 * which is sufficient for the helper functions we have
+	 * right now.
+	 */
+	return count <= 1;
+}
+
+static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
+				    enum bpf_arg_type arg_next)
+{
+	return (arg_type_is_mem_ptr(arg_curr) &&
+	        !arg_type_is_mem_size(arg_next)) ||
+	       (!arg_type_is_mem_ptr(arg_curr) &&
+		arg_type_is_mem_size(arg_next));
+}
+
+static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
+{
+	/* bpf_xxx(..., buf, len) call will access 'len'
+	 * bytes from memory 'buf'. Both arg types need
+	 * to be paired, so make sure there's no buggy
+	 * helper function specification.
+	 */
+	if (arg_type_is_mem_size(fn->arg1_type) ||
+	    arg_type_is_mem_ptr(fn->arg5_type)  ||
+	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
+	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
+	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
+	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
+		return false;
+
+	return true;
+}
+
+static int check_func_proto(const struct bpf_func_proto *fn)
+{
+	return check_raw_mode_ok(fn) &&
+	       check_arg_pair_ok(fn) ? 0 : -EINVAL;
 }
 
 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
  * are now invalid, so turn them into unknown SCALAR_VALUE.
  */
-static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
+				     struct bpf_func_state *state)
 {
-	struct bpf_verifier_state *state = env->cur_state;
 	struct bpf_reg_state *regs = state->regs, *reg;
 	int i;
 
@@ -1684,7 +2199,121 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 	}
 }
 
-static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+{
+	struct bpf_verifier_state *vstate = env->cur_state;
+	int i;
+
+	for (i = 0; i <= vstate->curframe; i++)
+		__clear_all_pkt_pointers(env, vstate->frame[i]);
+}
+
+static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+			   int *insn_idx)
+{
+	struct bpf_verifier_state *state = env->cur_state;
+	struct bpf_func_state *caller, *callee;
+	int i, subprog, target_insn;
+
+	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
+		verbose(env, "the call stack of %d frames is too deep\n",
+			state->curframe + 2);
+		return -E2BIG;
+	}
+
+	target_insn = *insn_idx + insn->imm;
+	subprog = find_subprog(env, target_insn + 1);
+	if (subprog < 0) {
+		verbose(env, "verifier bug. No program starts at insn %d\n",
+			target_insn + 1);
+		return -EFAULT;
+	}
+
+	caller = state->frame[state->curframe];
+	if (state->frame[state->curframe + 1]) {
+		verbose(env, "verifier bug. Frame %d already allocated\n",
+			state->curframe + 1);
+		return -EFAULT;
+	}
+
+	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
+	if (!callee)
+		return -ENOMEM;
+	state->frame[state->curframe + 1] = callee;
+
+	/* callee cannot access r0, r6 - r9 for reading and has to write
+	 * into its own stack before reading from it.
+	 * callee can read/write into caller's stack
+	 */
+	init_func_state(env, callee,
+			/* remember the callsite, it will be used by bpf_exit */
+			*insn_idx /* callsite */,
+			state->curframe + 1 /* frameno within this callchain */,
+			subprog + 1 /* subprog number within this prog */);
+
+	/* copy r1 - r5 args that callee can access */
+	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+		callee->regs[i] = caller->regs[i];
+
+	/* after the call regsiters r0 - r5 were scratched */
+	for (i = 0; i < CALLER_SAVED_REGS; i++) {
+		mark_reg_not_init(env, caller->regs, caller_saved[i]);
+		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+	}
+
+	/* only increment it after check_reg_arg() finished */
+	state->curframe++;
+
+	/* and go analyze first insn of the callee */
+	*insn_idx = target_insn;
+
+	if (env->log.level) {
+		verbose(env, "caller:\n");
+		print_verifier_state(env, caller);
+		verbose(env, "callee:\n");
+		print_verifier_state(env, callee);
+	}
+	return 0;
+}
+
+static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+{
+	struct bpf_verifier_state *state = env->cur_state;
+	struct bpf_func_state *caller, *callee;
+	struct bpf_reg_state *r0;
+
+	callee = state->frame[state->curframe];
+	r0 = &callee->regs[BPF_REG_0];
+	if (r0->type == PTR_TO_STACK) {
+		/* technically it's ok to return caller's stack pointer
+		 * (or caller's caller's pointer) back to the caller,
+		 * since these pointers are valid. Only current stack
+		 * pointer will be invalid as soon as function exits,
+		 * but let's be conservative
+		 */
+		verbose(env, "cannot return stack pointer to the caller\n");
+		return -EINVAL;
+	}
+
+	state->curframe--;
+	caller = state->frame[state->curframe];
+	/* return to the caller whatever r0 had in the callee */
+	caller->regs[BPF_REG_0] = *r0;
+
+	*insn_idx = callee->callsite + 1;
+	if (env->log.level) {
+		verbose(env, "returning from callee:\n");
+		print_verifier_state(env, callee);
+		verbose(env, "to caller at %d:\n", *insn_idx);
+		print_verifier_state(env, caller);
+	}
+	/* clear everything in the callee */
+	free_func_state(callee);
+	state->frame[state->curframe + 1] = NULL;
+	return 0;
+}
+
+static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 {
 	const struct bpf_func_proto *fn = NULL;
 	struct bpf_reg_state *regs;
@@ -1701,7 +2330,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 
 	if (env->ops->get_func_proto)
 		fn = env->ops->get_func_proto(func_id);
-
 	if (!fn) {
 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
 			func_id);
@@ -1725,10 +2353,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 	memset(&meta, 0, sizeof(meta));
 	meta.pkt_access = fn->pkt_access;
 
-	/* We only support one arg being in raw mode at the moment, which
-	 * is sufficient for the helper functions we have right now.
-	 */
-	err = check_raw_mode(fn);
+	err = check_func_proto(fn);
 	if (err) {
 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
 			func_id_name(func_id), func_id);
@@ -1884,7 +2509,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 				   const struct bpf_reg_state *ptr_reg,
 				   const struct bpf_reg_state *off_reg)
 {
-	struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *regs = state->regs, *dst_reg;
 	bool known = tnum_is_const(off_reg->var_off);
 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
@@ -2319,7 +2946,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 				   struct bpf_insn *insn)
 {
-	struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
 	u8 opcode = BPF_OP(insn->code);
 
@@ -2370,12 +2999,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 
 	/* Got here implies adding two SCALAR_VALUEs */
 	if (WARN_ON_ONCE(ptr_reg)) {
-		print_verifier_state(env, env->cur_state);
+		print_verifier_state(env, state);
 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
 		return -EINVAL;
 	}
 	if (WARN_ON(!src_reg)) {
-		print_verifier_state(env, env->cur_state);
+		print_verifier_state(env, state);
 		verbose(env, "verifier internal error: no src_reg\n");
 		return -EINVAL;
 	}
@@ -2537,14 +3166,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 	return 0;
 }
 
-static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 				   struct bpf_reg_state *dst_reg,
 				   enum bpf_reg_type type,
 				   bool range_right_open)
 {
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	struct bpf_reg_state *regs = state->regs, *reg;
 	u16 new_range;
-	int i;
+	int i, j;
 
 	if (dst_reg->off < 0 ||
 	    (dst_reg->off == 0 && range_right_open))
@@ -2614,12 +3244,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
 			/* keep the maximum range already checked */
 			regs[i].range = max(regs[i].range, new_range);
 
-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		reg = &state->stack[i].spilled_ptr;
-		if (reg->type == type && reg->id == dst_reg->id)
-			reg->range = max(reg->range, new_range);
+	for (j = 0; j <= vstate->curframe; j++) {
+		state = vstate->frame[j];
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+			if (state->stack[i].slot_type[0] != STACK_SPILL)
+				continue;
+			reg = &state->stack[i].spilled_ptr;
+			if (reg->type == type && reg->id == dst_reg->id)
+				reg->range = max(reg->range, new_range);
+		}
 	}
 }
 
@@ -2857,20 +3490,24 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
 /* The logic is similar to find_good_pkt_pointers(), both could eventually
  * be folded together at some point.
  */
-static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
 			  bool is_null)
 {
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	struct bpf_reg_state *regs = state->regs;
 	u32 id = regs[regno].id;
-	int i;
+	int i, j;
 
 	for (i = 0; i < MAX_BPF_REG; i++)
 		mark_map_reg(regs, i, id, is_null);
 
-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+	for (j = 0; j <= vstate->curframe; j++) {
+		state = vstate->frame[j];
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+			if (state->stack[i].slot_type[0] != STACK_SPILL)
+				continue;
+			mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+		}
 	}
 }
 
@@ -2970,8 +3607,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 static int check_cond_jmp_op(struct bpf_verifier_env *env,
 			     struct bpf_insn *insn, int *insn_idx)
 {
-	struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
-	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
+	struct bpf_verifier_state *this_branch = env->cur_state;
+	struct bpf_verifier_state *other_branch;
+	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
+	struct bpf_reg_state *dst_reg, *other_branch_regs;
 	u8 opcode = BPF_OP(insn->code);
 	int err;
 
@@ -3014,8 +3653,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	if (BPF_SRC(insn->code) == BPF_K &&
 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
 	    dst_reg->type == SCALAR_VALUE &&
-	    tnum_equals_const(dst_reg->var_off, insn->imm)) {
-		if (opcode == BPF_JEQ) {
+	    tnum_is_const(dst_reg->var_off)) {
+		if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
+		    (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
 			/* if (imm == imm) goto pc+off;
 			 * only follow the goto, ignore fall-through
 			 */
@@ -3033,6 +3673,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
 	if (!other_branch)
 		return -EFAULT;
+	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
 
 	/* detect if we are comparing against a constant value so we can adjust
 	 * our min/max values for our dst register.
@@ -3045,22 +3686,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		if (dst_reg->type == SCALAR_VALUE &&
 		    regs[insn->src_reg].type == SCALAR_VALUE) {
 			if (tnum_is_const(regs[insn->src_reg].var_off))
-				reg_set_min_max(&other_branch->regs[insn->dst_reg],
+				reg_set_min_max(&other_branch_regs[insn->dst_reg],
 						dst_reg, regs[insn->src_reg].var_off.value,
 						opcode);
 			else if (tnum_is_const(dst_reg->var_off))
-				reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
+				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
 						    &regs[insn->src_reg],
 						    dst_reg->var_off.value, opcode);
 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
 				/* Comparing for equality, we can combine knowledge */
-				reg_combine_min_max(&other_branch->regs[insn->src_reg],
-						    &other_branch->regs[insn->dst_reg],
+				reg_combine_min_max(&other_branch_regs[insn->src_reg],
+						    &other_branch_regs[insn->dst_reg],
 						    &regs[insn->src_reg],
 						    &regs[insn->dst_reg], opcode);
 		}
 	} else if (dst_reg->type == SCALAR_VALUE) {
-		reg_set_min_max(&other_branch->regs[insn->dst_reg],
+		reg_set_min_max(&other_branch_regs[insn->dst_reg],
 					dst_reg, insn->imm, opcode);
 	}
 
@@ -3081,7 +3722,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		return -EACCES;
 	}
 	if (env->log.level)
-		print_verifier_state(env, this_branch);
+		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
 	return 0;
 }
 
@@ -3166,6 +3807,18 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
 		return -EINVAL;
 	}
 
+	if (env->subprog_cnt) {
+		/* when program has LD_ABS insn JITs and interpreter assume
+		 * that r1 == ctx == skb which is not the case for callees
+		 * that can have arbitrary arguments. It's problematic
+		 * for main prog as well since JITs would need to analyze
+		 * all functions in order to make proper register save/restore
+		 * decisions in the main prog. Hence disallow LD_ABS with calls
+		 */
+		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
+		return -EINVAL;
+	}
+
 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
 	    BPF_SIZE(insn->code) == BPF_DW ||
 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
@@ -3342,6 +3995,10 @@ static int check_cfg(struct bpf_verifier_env *env)
 	int ret = 0;
 	int i, t;
 
+	ret = check_subprogs(env);
+	if (ret < 0)
+		return ret;
+
 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
 	if (!insn_state)
 		return -ENOMEM;
@@ -3374,6 +4031,14 @@ peek_stack:
 				goto err_free;
 			if (t + 1 < insn_cnt)
 				env->explored_states[t + 1] = STATE_LIST_MARK;
+			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+				env->explored_states[t] = STATE_LIST_MARK;
+				ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+				if (ret == 1)
+					goto peek_stack;
+				else if (ret < 0)
+					goto err_free;
+			}
 		} else if (opcode == BPF_JA) {
 			if (BPF_SRC(insns[t].code) != BPF_K) {
 				ret = -EINVAL;
@@ -3492,11 +4157,21 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 		    struct idpair *idmap)
 {
+	bool equal;
+
 	if (!(rold->live & REG_LIVE_READ))
 		/* explored state didn't use this */
 		return true;
 
-	if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
+	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
+
+	if (rold->type == PTR_TO_STACK)
+		/* two stack pointers are equal only if they're pointing to
+		 * the same stack frame, since fp-8 in foo != fp-8 in bar
+		 */
+		return equal && rold->frameno == rcur->frameno;
+
+	if (equal)
 		return true;
 
 	if (rold->type == NOT_INIT)
@@ -3568,7 +4243,6 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 		       tnum_in(rold->var_off, rcur->var_off);
 	case PTR_TO_CTX:
 	case CONST_PTR_TO_MAP:
-	case PTR_TO_STACK:
 	case PTR_TO_PACKET_END:
 		/* Only valid matches are exact, which memcmp() above
 		 * would have accepted
@@ -3583,8 +4257,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 	return false;
 }
 
-static bool stacksafe(struct bpf_verifier_state *old,
-		      struct bpf_verifier_state *cur,
+static bool stacksafe(struct bpf_func_state *old,
+		      struct bpf_func_state *cur,
 		      struct idpair *idmap)
 {
 	int i, spi;
@@ -3602,8 +4276,19 @@ static bool stacksafe(struct bpf_verifier_state *old,
 	for (i = 0; i < old->allocated_stack; i++) {
 		spi = i / BPF_REG_SIZE;
 
+		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
+			/* explored state didn't use this */
+			continue;
+
 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
 			continue;
+		/* if old state was safe with misc data in the stack
+		 * it will be safe with zero-initialized stack.
+		 * The opposite is not true
+		 */
+		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
+		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
+			continue;
 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
 			/* Ex: old explored (safe) state has STACK_SPILL in
@@ -3660,9 +4345,8 @@ static bool stacksafe(struct bpf_verifier_state *old,
  * whereas register type in current state is meaningful, it means that
  * the current state will reach 'bpf_exit' instruction safely
  */
-static bool states_equal(struct bpf_verifier_env *env,
-			 struct bpf_verifier_state *old,
-			 struct bpf_verifier_state *cur)
+static bool func_states_equal(struct bpf_func_state *old,
+			      struct bpf_func_state *cur)
 {
 	struct idpair *idmap;
 	bool ret = false;
@@ -3686,71 +4370,72 @@ out_free:
 	return ret;
 }
 
+static bool states_equal(struct bpf_verifier_env *env,
+			 struct bpf_verifier_state *old,
+			 struct bpf_verifier_state *cur)
+{
+	int i;
+
+	if (old->curframe != cur->curframe)
+		return false;
+
+	/* for states to be equal callsites have to be the same
+	 * and all frame states need to be equivalent
+	 */
+	for (i = 0; i <= old->curframe; i++) {
+		if (old->frame[i]->callsite != cur->frame[i]->callsite)
+			return false;
+		if (!func_states_equal(old->frame[i], cur->frame[i]))
+			return false;
+	}
+	return true;
+}
+
 /* A write screens off any subsequent reads; but write marks come from the
- * straight-line code between a state and its parent.  When we arrive at a
- * jump target (in the first iteration of the propagate_liveness() loop),
- * we didn't arrive by the straight-line code, so read marks in state must
- * propagate to parent regardless of state's write marks.
+ * straight-line code between a state and its parent.  When we arrive at an
+ * equivalent state (jump target or such) we didn't arrive by the straight-line
+ * code, so read marks in the state must propagate to the parent regardless
+ * of the state's write marks. That's what 'parent == state->parent' comparison
+ * in mark_reg_read() and mark_stack_slot_read() is for.
  */
-static bool do_propagate_liveness(const struct bpf_verifier_state *state,
-				  struct bpf_verifier_state *parent)
+static int propagate_liveness(struct bpf_verifier_env *env,
+			      const struct bpf_verifier_state *vstate,
+			      struct bpf_verifier_state *vparent)
 {
-	bool writes = parent == state->parent; /* Observe write marks */
-	bool touched = false; /* any changes made? */
-	int i;
+	int i, frame, err = 0;
+	struct bpf_func_state *state, *parent;
 
-	if (!parent)
-		return touched;
+	if (vparent->curframe != vstate->curframe) {
+		WARN(1, "propagate_live: parent frame %d current frame %d\n",
+		     vparent->curframe, vstate->curframe);
+		return -EFAULT;
+	}
 	/* Propagate read liveness of registers... */
 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 	/* We don't need to worry about FP liveness because it's read-only */
 	for (i = 0; i < BPF_REG_FP; i++) {
-		if (parent->regs[i].live & REG_LIVE_READ)
+		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
 			continue;
-		if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
-			continue;
-		if (state->regs[i].live & REG_LIVE_READ) {
-			parent->regs[i].live |= REG_LIVE_READ;
-			touched = true;
+		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
+			err = mark_reg_read(env, vstate, vparent, i);
+			if (err)
+				return err;
 		}
 	}
+
 	/* ... and stack slots */
-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
-		    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
-		if (parent->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
-			continue;
-		if (writes &&
-		    (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
-			continue;
-		if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
-			parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
-			touched = true;
+	for (frame = 0; frame <= vstate->curframe; frame++) {
+		state = vstate->frame[frame];
+		parent = vparent->frame[frame];
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
+			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
+			if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
+				continue;
+			if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
+				mark_stack_slot_read(env, vstate, vparent, i, frame);
 		}
 	}
-	return touched;
-}
-
-/* "parent" is "a state from which we reach the current state", but initially
- * it is not the state->parent (i.e. "the state whose straight-line code leads
- * to the current state"), instead it is the state that happened to arrive at
- * a (prunable) equivalent of the current state.  See comment above
- * do_propagate_liveness() for consequences of this.
- * This function is just a more efficient way of calling mark_reg_read() or
- * mark_stack_slot_read() on each reg in "parent" that is read in "state",
- * though it requires that parent != state->parent in the call arguments.
- */
-static void propagate_liveness(const struct bpf_verifier_state *state,
-			       struct bpf_verifier_state *parent)
-{
-	while (do_propagate_liveness(state, parent)) {
-		/* Something changed, so we need to feed those changes onward */
-		state = parent;
-		parent = state->parent;
-	}
+	return err;
 }
 
 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
@@ -3758,7 +4443,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 	struct bpf_verifier_state_list *new_sl;
 	struct bpf_verifier_state_list *sl;
 	struct bpf_verifier_state *cur = env->cur_state;
-	int i, err;
+	int i, j, err;
 
 	sl = env->explored_states[insn_idx];
 	if (!sl)
@@ -3779,7 +4464,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 			 * they'll be immediately forgotten as we're pruning
 			 * this state and will pop a new one.
 			 */
-			propagate_liveness(&sl->state, cur);
+			err = propagate_liveness(env, &sl->state, cur);
+			if (err)
+				return err;
 			return 1;
 		}
 		sl = sl->next;
@@ -3787,9 +4474,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 
 	/* there were no equivalent states, remember current one.
 	 * technically the current state is not proven to be safe yet,
-	 * but it will either reach bpf_exit (which means it's safe) or
-	 * it will be rejected. Since there are no loops, we won't be
-	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
+	 * but it will either reach outer most bpf_exit (which means it's safe)
+	 * or it will be rejected. Since there are no loops, we won't be
+	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
+	 * again on the way to bpf_exit
 	 */
 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
 	if (!new_sl)
@@ -3813,19 +4501,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 	 * explored_states can get read marks.)
 	 */
 	for (i = 0; i < BPF_REG_FP; i++)
-		cur->regs[i].live = REG_LIVE_NONE;
-	for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
-		if (cur->stack[i].slot_type[0] == STACK_SPILL)
-			cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
-	return 0;
-}
+		cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
 
-static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
-				  int insn_idx, int prev_insn_idx)
-{
-	if (env->dev_ops && env->dev_ops->insn_hook)
-		return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+	/* all stack frames are accessible from callee, clear them all */
+	for (j = 0; j <= cur->curframe; j++) {
+		struct bpf_func_state *frame = cur->frame[j];
 
+		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
+			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
+	}
 	return 0;
 }
 
@@ -3834,7 +4518,7 @@ static int do_check(struct bpf_verifier_env *env)
 	struct bpf_verifier_state *state;
 	struct bpf_insn *insns = env->prog->insnsi;
 	struct bpf_reg_state *regs;
-	int insn_cnt = env->prog->len;
+	int insn_cnt = env->prog->len, i;
 	int insn_idx, prev_insn_idx = 0;
 	int insn_processed = 0;
 	bool do_print_state = false;
@@ -3842,9 +4526,18 @@ static int do_check(struct bpf_verifier_env *env)
 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
 	if (!state)
 		return -ENOMEM;
-	env->cur_state = state;
-	init_reg_state(env, state->regs);
+	state->curframe = 0;
 	state->parent = NULL;
+	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
+	if (!state->frame[0]) {
+		kfree(state);
+		return -ENOMEM;
+	}
+	env->cur_state = state;
+	init_func_state(env, state->frame[0],
+			BPF_MAIN_FUNC /* callsite */,
+			0 /* frameno */,
+			0 /* subprogno, zero == main subprog */);
 	insn_idx = 0;
 	for (;;) {
 		struct bpf_insn *insn;
@@ -3891,19 +4584,25 @@ static int do_check(struct bpf_verifier_env *env)
 			else
 				verbose(env, "\nfrom %d to %d:",
 					prev_insn_idx, insn_idx);
-			print_verifier_state(env, state);
+			print_verifier_state(env, state->frame[state->curframe]);
 			do_print_state = false;
 		}
 
 		if (env->log.level) {
+			const struct bpf_insn_cbs cbs = {
+				.cb_print	= verbose,
+			};
+
 			verbose(env, "%d: ", insn_idx);
-			print_bpf_insn(verbose, env, insn,
-				       env->allow_ptr_leaks);
+			print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
 		}
 
-		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
-		if (err)
-			return err;
+		if (bpf_prog_is_dev_bound(env->prog->aux)) {
+			err = bpf_prog_offload_verify_insn(env, insn_idx,
+							   prev_insn_idx);
+			if (err)
+				return err;
+		}
 
 		regs = cur_regs(env);
 		env->insn_aux_data[insn_idx].seen = true;
@@ -4030,13 +4729,17 @@ static int do_check(struct bpf_verifier_env *env)
 			if (opcode == BPF_CALL) {
 				if (BPF_SRC(insn->code) != BPF_K ||
 				    insn->off != 0 ||
-				    insn->src_reg != BPF_REG_0 ||
+				    (insn->src_reg != BPF_REG_0 &&
+				     insn->src_reg != BPF_PSEUDO_CALL) ||
 				    insn->dst_reg != BPF_REG_0) {
 					verbose(env, "BPF_CALL uses reserved fields\n");
 					return -EINVAL;
 				}
 
-				err = check_call(env, insn->imm, insn_idx);
+				if (insn->src_reg == BPF_PSEUDO_CALL)
+					err = check_func_call(env, insn, &insn_idx);
+				else
+					err = check_helper_call(env, insn->imm, insn_idx);
 				if (err)
 					return err;
 
@@ -4061,6 +4764,16 @@ static int do_check(struct bpf_verifier_env *env)
 					return -EINVAL;
 				}
 
+				if (state->curframe) {
+					/* exit from nested function */
+					prev_insn_idx = insn_idx;
+					err = prepare_func_exit(env, &insn_idx);
+					if (err)
+						return err;
+					do_print_state = true;
+					continue;
+				}
+
 				/* eBPF calling convetion is such that R0 is used
 				 * to return the value from eBPF program.
 				 * Make sure that it's readable at this time
@@ -4121,8 +4834,17 @@ process_bpf_exit:
 		insn_idx++;
 	}
 
-	verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
-		env->prog->aux->stack_depth);
+	verbose(env, "processed %d insns (limit %d), stack depth ",
+		insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
+	for (i = 0; i < env->subprog_cnt + 1; i++) {
+		u32 depth = env->subprog_stack_depth[i];
+
+		verbose(env, "%d", depth);
+		if (i + 1 < env->subprog_cnt + 1)
+			verbose(env, "+");
+	}
+	verbose(env, "\n");
+	env->prog->aux->stack_depth = env->subprog_stack_depth[0];
 	return 0;
 }
 
@@ -4155,6 +4877,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
 			return -EINVAL;
 		}
 	}
+
+	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
+	    !bpf_offload_dev_match(prog, map)) {
+		verbose(env, "offload device mismatch between prog and map\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -4252,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
 next_insn:
 			insn++;
 			i++;
+			continue;
+		}
+
+		/* Basic sanity check before we invest more work here. */
+		if (!bpf_opcode_in_insntable(insn->code)) {
+			verbose(env, "unknown opcode %02x\n", insn->code);
+			return -EINVAL;
 		}
 	}
 
@@ -4308,6 +5044,19 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
 	return 0;
 }
 
+static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
+{
+	int i;
+
+	if (len == 1)
+		return;
+	for (i = 0; i < env->subprog_cnt; i++) {
+		if (env->subprog_starts[i] < off)
+			continue;
+		env->subprog_starts[i] += len - 1;
+	}
+}
+
 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
 					    const struct bpf_insn *patch, u32 len)
 {
@@ -4318,17 +5067,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
 		return NULL;
 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
 		return NULL;
+	adjust_subprog_starts(env, off, len);
 	return new_prog;
 }
 
-/* The verifier does more data flow analysis than llvm and will not explore
- * branches that are dead at run time. Malicious programs can have dead code
- * too. Therefore replace all dead at-run-time code with nops.
+/* The verifier does more data flow analysis than llvm and will not
+ * explore branches that are dead at run time. Malicious programs can
+ * have dead code too. Therefore replace all dead at-run-time code
+ * with 'ja -1'.
+ *
+ * Just nops are not optimal, e.g. if they would sit at the end of the
+ * program and through another bug we would manage to jump there, then
+ * we'd execute beyond program memory otherwise. Returning exception
+ * code also wouldn't work since we can have subprogs where the dead
+ * code could be located.
  */
 static void sanitize_dead_code(struct bpf_verifier_env *env)
 {
 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
-	struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
 	struct bpf_insn *insn = env->prog->insnsi;
 	const int insn_cnt = env->prog->len;
 	int i;
@@ -4336,7 +5093,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
 	for (i = 0; i < insn_cnt; i++) {
 		if (aux_data[i].seen)
 			continue;
-		memcpy(insn + i, &nop, sizeof(nop));
+		memcpy(insn + i, &trap, sizeof(trap));
 	}
 }
 
@@ -4452,6 +5209,180 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 	return 0;
 }
 
+static int jit_subprogs(struct bpf_verifier_env *env)
+{
+	struct bpf_prog *prog = env->prog, **func, *tmp;
+	int i, j, subprog_start, subprog_end = 0, len, subprog;
+	struct bpf_insn *insn;
+	void *old_bpf_func;
+	int err = -ENOMEM;
+
+	if (env->subprog_cnt == 0)
+		return 0;
+
+	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		subprog = find_subprog(env, i + insn->imm + 1);
+		if (subprog < 0) {
+			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+				  i + insn->imm + 1);
+			return -EFAULT;
+		}
+		/* temporarily remember subprog id inside insn instead of
+		 * aux_data, since next loop will split up all insns into funcs
+		 */
+		insn->off = subprog + 1;
+		/* remember original imm in case JIT fails and fallback
+		 * to interpreter will be needed
+		 */
+		env->insn_aux_data[i].call_imm = insn->imm;
+		/* point imm to __bpf_call_base+1 from JITs point of view */
+		insn->imm = 1;
+	}
+
+	func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
+	if (!func)
+		return -ENOMEM;
+
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		subprog_start = subprog_end;
+		if (env->subprog_cnt == i)
+			subprog_end = prog->len;
+		else
+			subprog_end = env->subprog_starts[i];
+
+		len = subprog_end - subprog_start;
+		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
+		if (!func[i])
+			goto out_free;
+		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
+		       len * sizeof(struct bpf_insn));
+		func[i]->type = prog->type;
+		func[i]->len = len;
+		if (bpf_prog_calc_tag(func[i]))
+			goto out_free;
+		func[i]->is_func = 1;
+		/* Use bpf_prog_F_tag to indicate functions in stack traces.
+		 * Long term would need debug info to populate names
+		 */
+		func[i]->aux->name[0] = 'F';
+		func[i]->aux->stack_depth = env->subprog_stack_depth[i];
+		func[i]->jit_requested = 1;
+		func[i] = bpf_int_jit_compile(func[i]);
+		if (!func[i]->jited) {
+			err = -ENOTSUPP;
+			goto out_free;
+		}
+		cond_resched();
+	}
+	/* at this point all bpf functions were successfully JITed
+	 * now populate all bpf_calls with correct addresses and
+	 * run last pass of JIT
+	 */
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		insn = func[i]->insnsi;
+		for (j = 0; j < func[i]->len; j++, insn++) {
+			if (insn->code != (BPF_JMP | BPF_CALL) ||
+			    insn->src_reg != BPF_PSEUDO_CALL)
+				continue;
+			subprog = insn->off;
+			insn->off = 0;
+			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+				func[subprog]->bpf_func -
+				__bpf_call_base;
+		}
+	}
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		old_bpf_func = func[i]->bpf_func;
+		tmp = bpf_int_jit_compile(func[i]);
+		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
+			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
+			err = -EFAULT;
+			goto out_free;
+		}
+		cond_resched();
+	}
+
+	/* finally lock prog and jit images for all functions and
+	 * populate kallsysm
+	 */
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		bpf_prog_lock_ro(func[i]);
+		bpf_prog_kallsyms_add(func[i]);
+	}
+
+	/* Last step: make now unused interpreter insns from main
+	 * prog consistent for later dump requests, so they can
+	 * later look the same as if they were interpreted only.
+	 */
+	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+		unsigned long addr;
+
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		insn->off = env->insn_aux_data[i].call_imm;
+		subprog = find_subprog(env, i + insn->off + 1);
+		addr  = (unsigned long)func[subprog + 1]->bpf_func;
+		addr &= PAGE_MASK;
+		insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+			    addr - __bpf_call_base;
+	}
+
+	prog->jited = 1;
+	prog->bpf_func = func[0]->bpf_func;
+	prog->aux->func = func;
+	prog->aux->func_cnt = env->subprog_cnt + 1;
+	return 0;
+out_free:
+	for (i = 0; i <= env->subprog_cnt; i++)
+		if (func[i])
+			bpf_jit_free(func[i]);
+	kfree(func);
+	/* cleanup main prog to be interpreted */
+	prog->jit_requested = 0;
+	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		insn->off = 0;
+		insn->imm = env->insn_aux_data[i].call_imm;
+	}
+	return err;
+}
+
+static int fixup_call_args(struct bpf_verifier_env *env)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+	struct bpf_prog *prog = env->prog;
+	struct bpf_insn *insn = prog->insnsi;
+	int i, depth;
+#endif
+	int err;
+
+	err = 0;
+	if (env->prog->jit_requested) {
+		err = jit_subprogs(env);
+		if (err == 0)
+			return 0;
+	}
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+	for (i = 0; i < prog->len; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		depth = get_callee_stack_depth(env, insn, i);
+		if (depth < 0)
+			return depth;
+		bpf_patch_call_args(insn, depth);
+	}
+	err = 0;
+#endif
+	return err;
+}
+
 /* fixup insn->imm field of bpf_call instructions
  * and inline eligible helpers as explicit sequence of BPF instructions
  *
@@ -4469,15 +5400,37 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
 	int i, cnt, delta = 0;
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
-		if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
+		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
-			/* due to JIT bugs clear upper 32-bits of src register
-			 * before div/mod operation
-			 */
-			insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
-			insn_buf[1] = *insn;
-			cnt = 2;
-			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+			struct bpf_insn mask_and_div[] = {
+				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+				/* Rx div 0 -> 0 */
+				BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
+				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+				*insn,
+			};
+			struct bpf_insn mask_and_mod[] = {
+				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+				/* Rx mod 0 -> Rx */
+				BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+				*insn,
+			};
+			struct bpf_insn *patchlet;
+
+			if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+			    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+				patchlet = mask_and_div + (is64 ? 1 : 0);
+				cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
+			} else {
+				patchlet = mask_and_mod + (is64 ? 1 : 0);
+				cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
+			}
+
+			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
 			if (!new_prog)
 				return -ENOMEM;
 
@@ -4489,11 +5442,15 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
 
 		if (insn->code != (BPF_JMP | BPF_CALL))
 			continue;
+		if (insn->src_reg == BPF_PSEUDO_CALL)
+			continue;
 
 		if (insn->imm == BPF_FUNC_get_route_realm)
 			prog->dst_needed = 1;
 		if (insn->imm == BPF_FUNC_get_prandom_u32)
 			bpf_user_rnd_init_once();
+		if (insn->imm == BPF_FUNC_override_return)
+			prog->kprobe_override = 1;
 		if (insn->imm == BPF_FUNC_tail_call) {
 			/* If we tail call into other programs, we
 			 * cannot make any assumptions since they can
@@ -4545,7 +5502,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
 		 * handlers are currently limited to 64 bit only.
 		 */
-		if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
+		if (prog->jit_requested && BITS_PER_LONG == 64 &&
 		    insn->imm == BPF_FUNC_map_lookup_elem) {
 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
 			if (map_ptr == BPF_MAP_PTR_POISON ||
@@ -4680,7 +5637,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
 		env->strict_alignment = true;
 
-	if (env->prog->aux->offload) {
+	if (bpf_prog_is_dev_bound(env->prog->aux)) {
 		ret = bpf_prog_offload_verifier_prep(env);
 		if (ret)
 			goto err_unlock;
@@ -4697,12 +5654,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 	if (!env->explored_states)
 		goto skip_full_check;
 
+	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
 	ret = check_cfg(env);
 	if (ret < 0)
 		goto skip_full_check;
 
-	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
-
 	ret = do_check(env);
 	if (env->cur_state) {
 		free_verifier_state(env->cur_state, true);
@@ -4717,12 +5674,18 @@ skip_full_check:
 		sanitize_dead_code(env);
 
 	if (ret == 0)
+		ret = check_max_stack_depth(env);
+
+	if (ret == 0)
 		/* program is valid, convert *(u32*)(ctx + off) accesses */
 		ret = convert_ctx_accesses(env);
 
 	if (ret == 0)
 		ret = fixup_bpf_calls(env);
 
+	if (ret == 0)
+		ret = fixup_call_args(env);
+
 	if (log->level && bpf_verifier_log_full(log))
 		ret = -ENOSPC;
 	if (log->level && !log->ubuf) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d0d9bfb47d2e..f0549e79978b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4732,6 +4732,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
 		rcu_read_unlock();
 		return 0;
 	}
+
+	case PERF_EVENT_IOC_QUERY_BPF:
+		return perf_event_query_prog_array(event, (void __user *)arg);
 	default:
 		return -ENOTTY;
 	}
@@ -8100,6 +8103,13 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
 		return -EINVAL;
 	}
 
+	/* Kprobe override only works for kprobes, not uprobes. */
+	if (prog->kprobe_override &&
+	    !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
+		bpf_prog_put(prog);
+		return -EINVAL;
+	}
+
 	if (is_tracepoint || is_syscall_tp) {
 		int off = trace_event_get_offsets(event->tp_event);
 
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
new file mode 100644
index 000000000000..21b0122cb39c
--- /dev/null
+++ b/kernel/fail_function.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fail_function.c: Function-based error injection
+ */
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+
+struct fei_attr {
+	struct list_head list;
+	struct kprobe kp;
+	unsigned long retval;
+};
+static DEFINE_MUTEX(fei_lock);
+static LIST_HEAD(fei_attr_list);
+static DECLARE_FAULT_ATTR(fei_fault_attr);
+static struct dentry *fei_debugfs_dir;
+
+static unsigned long adjust_error_retval(unsigned long addr, unsigned long retv)
+{
+	switch (get_injectable_error_type(addr)) {
+	case EI_ETYPE_NULL:
+		if (retv != 0)
+			return 0;
+		break;
+	case EI_ETYPE_ERRNO:
+		if (retv < (unsigned long)-MAX_ERRNO)
+			return (unsigned long)-EINVAL;
+		break;
+	case EI_ETYPE_ERRNO_NULL:
+		if (retv != 0 && retv < (unsigned long)-MAX_ERRNO)
+			return (unsigned long)-EINVAL;
+		break;
+	}
+
+	return retv;
+}
+
+static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
+{
+	struct fei_attr *attr;
+
+	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+	if (attr) {
+		attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL);
+		if (!attr->kp.symbol_name) {
+			kfree(attr);
+			return NULL;
+		}
+		attr->kp.pre_handler = fei_kprobe_handler;
+		attr->retval = adjust_error_retval(addr, 0);
+		INIT_LIST_HEAD(&attr->list);
+	}
+	return attr;
+}
+
+static void fei_attr_free(struct fei_attr *attr)
+{
+	if (attr) {
+		kfree(attr->kp.symbol_name);
+		kfree(attr);
+	}
+}
+
+static struct fei_attr *fei_attr_lookup(const char *sym)
+{
+	struct fei_attr *attr;
+
+	list_for_each_entry(attr, &fei_attr_list, list) {
+		if (!strcmp(attr->kp.symbol_name, sym))
+			return attr;
+	}
+
+	return NULL;
+}
+
+static bool fei_attr_is_valid(struct fei_attr *_attr)
+{
+	struct fei_attr *attr;
+
+	list_for_each_entry(attr, &fei_attr_list, list) {
+		if (attr == _attr)
+			return true;
+	}
+
+	return false;
+}
+
+static int fei_retval_set(void *data, u64 val)
+{
+	struct fei_attr *attr = data;
+	unsigned long retv = (unsigned long)val;
+	int err = 0;
+
+	mutex_lock(&fei_lock);
+	/*
+	 * Since this operation can be done after retval file is removed,
+	 * It is safer to check the attr is still valid before accessing
+	 * its member.
+	 */
+	if (!fei_attr_is_valid(attr)) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (attr->kp.addr) {
+		if (adjust_error_retval((unsigned long)attr->kp.addr,
+					val) != retv)
+			err = -EINVAL;
+	}
+	if (!err)
+		attr->retval = val;
+out:
+	mutex_unlock(&fei_lock);
+
+	return err;
+}
+
+static int fei_retval_get(void *data, u64 *val)
+{
+	struct fei_attr *attr = data;
+	int err = 0;
+
+	mutex_lock(&fei_lock);
+	/* Here we also validate @attr to ensure it still exists. */
+	if (!fei_attr_is_valid(attr))
+		err = -ENOENT;
+	else
+		*val = attr->retval;
+	mutex_unlock(&fei_lock);
+
+	return err;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fei_retval_ops, fei_retval_get, fei_retval_set,
+			 "%llx\n");
+
+static int fei_debugfs_add_attr(struct fei_attr *attr)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir(attr->kp.symbol_name, fei_debugfs_dir);
+	if (!dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("retval", 0600, dir, attr, &fei_retval_ops)) {
+		debugfs_remove_recursive(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void fei_debugfs_remove_attr(struct fei_attr *attr)
+{
+	struct dentry *dir;
+
+	dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
+	if (dir)
+		debugfs_remove_recursive(dir);
+}
+
+static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+{
+	struct fei_attr *attr = container_of(kp, struct fei_attr, kp);
+
+	if (should_fail(&fei_fault_attr, 1)) {
+		regs_set_return_value(regs, attr->retval);
+		override_function_with_return(regs);
+		/* Kprobe specific fixup */
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+		return 1;
+	}
+
+	return 0;
+}
+NOKPROBE_SYMBOL(fei_kprobe_handler)
+
+static void *fei_seq_start(struct seq_file *m, loff_t *pos)
+{
+	mutex_lock(&fei_lock);
+	return seq_list_start(&fei_attr_list, *pos);
+}
+
+static void fei_seq_stop(struct seq_file *m, void *v)
+{
+	mutex_unlock(&fei_lock);
+}
+
+static void *fei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &fei_attr_list, pos);
+}
+
+static int fei_seq_show(struct seq_file *m, void *v)
+{
+	struct fei_attr *attr = list_entry(v, struct fei_attr, list);
+
+	seq_printf(m, "%pf\n", attr->kp.addr);
+	return 0;
+}
+
+static const struct seq_operations fei_seq_ops = {
+	.start	= fei_seq_start,
+	.next	= fei_seq_next,
+	.stop	= fei_seq_stop,
+	.show	= fei_seq_show,
+};
+
+static int fei_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &fei_seq_ops);
+}
+
+static void fei_attr_remove(struct fei_attr *attr)
+{
+	fei_debugfs_remove_attr(attr);
+	unregister_kprobe(&attr->kp);
+	list_del(&attr->list);
+	fei_attr_free(attr);
+}
+
+static void fei_attr_remove_all(void)
+{
+	struct fei_attr *attr, *n;
+
+	list_for_each_entry_safe(attr, n, &fei_attr_list, list) {
+		fei_attr_remove(attr);
+	}
+}
+
+static ssize_t fei_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *ppos)
+{
+	struct fei_attr *attr;
+	unsigned long addr;
+	char *buf, *sym;
+	int ret;
+
+	/* cut off if it is too long */
+	if (count > KSYM_NAME_LEN)
+		count = KSYM_NAME_LEN;
+	buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, buffer, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	buf[count] = '\0';
+	sym = strstrip(buf);
+
+	mutex_lock(&fei_lock);
+
+	/* Writing just spaces will remove all injection points */
+	if (sym[0] == '\0') {
+		fei_attr_remove_all();
+		ret = count;
+		goto out;
+	}
+	/* Writing !function will remove one injection point */
+	if (sym[0] == '!') {
+		attr = fei_attr_lookup(sym + 1);
+		if (!attr) {
+			ret = -ENOENT;
+			goto out;
+		}
+		fei_attr_remove(attr);
+		ret = count;
+		goto out;
+	}
+
+	addr = kallsyms_lookup_name(sym);
+	if (!addr) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!within_error_injection_list(addr)) {
+		ret = -ERANGE;
+		goto out;
+	}
+	if (fei_attr_lookup(sym)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	attr = fei_attr_new(sym, addr);
+	if (!attr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = register_kprobe(&attr->kp);
+	if (!ret)
+		ret = fei_debugfs_add_attr(attr);
+	if (ret < 0)
+		fei_attr_remove(attr);
+	else {
+		list_add_tail(&attr->list, &fei_attr_list);
+		ret = count;
+	}
+out:
+	kfree(buf);
+	mutex_unlock(&fei_lock);
+	return ret;
+}
+
+static const struct file_operations fei_ops = {
+	.open =		fei_open,
+	.read =		seq_read,
+	.write =	fei_write,
+	.llseek =	seq_lseek,
+	.release =	seq_release,
+};
+
+static int __init fei_debugfs_init(void)
+{
+	struct dentry *dir;
+
+	dir = fault_create_debugfs_attr("fail_function", NULL,
+					&fei_fault_attr);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	/* injectable attribute is just a symlink of error_inject/list */
+	if (!debugfs_create_symlink("injectable", dir,
+				    "../error_injection/list"))
+		goto error;
+
+	if (!debugfs_create_file("inject", 0600, dir, NULL, &fei_ops))
+		goto error;
+
+	fei_debugfs_dir = dir;
+
+	return 0;
+error:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
+}
+
+late_initcall(fei_debugfs_init);
diff --git a/kernel/module.c b/kernel/module.c
index 09e48eee4d55..1d65b2cc4f80 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3129,7 +3129,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
 					     sizeof(*mod->ftrace_callsites),
 					     &mod->num_ftrace_callsites);
 #endif
-
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+	mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
+					    sizeof(*mod->ei_funcs),
+					    &mod->num_ei_funcs);
+#endif
 	mod->extable = section_objs(info, "__ex_table",
 				    sizeof(*mod->extable), &mod->num_exentries);
 
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index f54dc62b599c..0b249e2f0c3c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -530,6 +530,15 @@ config FUNCTION_PROFILER
 
 	  If in doubt, say N.
 
+config BPF_KPROBE_OVERRIDE
+	bool "Enable BPF programs to override a kprobed function"
+	depends on BPF_EVENTS
+	depends on FUNCTION_ERROR_INJECTION
+	default n
+	help
+	 Allows BPF to override the execution of a probed function and
+	 set a different return value.  This is used for error injection.
+
 config FTRACE_MCOUNT_RECORD
 	def_bool y
 	depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 40207c2a4113..fc2838ac8b78 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -13,6 +13,10 @@
 #include <linux/filter.h>
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
+#include <linux/kprobes.h>
+#include <linux/error-injection.h>
+
+#include "trace_probe.h"
 #include "trace.h"
 
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
@@ -76,6 +80,23 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 }
 EXPORT_SYMBOL_GPL(trace_call_bpf);
 
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
+{
+	regs_set_return_value(regs, rc);
+	override_function_with_return(regs);
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_override_return_proto = {
+	.func		= bpf_override_return,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_ANYTHING,
+};
+#endif
+
 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 {
 	int ret;
@@ -224,7 +245,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
  */
 #define __BPF_TP_EMIT()	__BPF_ARG3_TP()
 #define __BPF_TP(...)							\
-	__trace_printk(1 /* Fake ip will not be printed. */,		\
+	__trace_printk(0 /* Fake ip */,					\
 		       fmt, ##__VA_ARGS__)
 
 #define __BPF_ARG1_TP(...)						\
@@ -556,6 +577,10 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
 		return &bpf_get_stackid_proto;
 	case BPF_FUNC_perf_event_read_value:
 		return &bpf_perf_event_read_value_proto;
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+	case BPF_FUNC_override_return:
+		return &bpf_override_return_proto;
+#endif
 	default:
 		return tracing_func_proto(func_id);
 	}
@@ -773,6 +798,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
 	struct bpf_prog_array *new_array;
 	int ret = -EEXIST;
 
+	/*
+	 * Kprobe override only works if they are on the function entry,
+	 * and only if they are on the opt-in list.
+	 */
+	if (prog->kprobe_override &&
+	    (!trace_kprobe_on_func_entry(event->tp_event) ||
+	     !trace_kprobe_error_injectable(event->tp_event)))
+		return -EINVAL;
+
 	mutex_lock(&bpf_event_mutex);
 
 	if (event->prog)
@@ -825,3 +859,26 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
 unlock:
 	mutex_unlock(&bpf_event_mutex);
 }
+
+int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+	struct perf_event_query_bpf __user *uquery = info;
+	struct perf_event_query_bpf query = {};
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (event->attr.type != PERF_TYPE_TRACEPOINT)
+		return -EINVAL;
+	if (copy_from_user(&query, uquery, sizeof(query)))
+		return -EFAULT;
+
+	mutex_lock(&bpf_event_mutex);
+	ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
+				       uquery->ids,
+				       query.ids_len,
+				       &uquery->prog_cnt);
+	mutex_unlock(&bpf_event_mutex);
+
+	return ret;
+}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 492700c5fb4d..1fad24acd444 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/rculist.h>
+#include <linux/error-injection.h>
 
 #include "trace_probe.h"
 
@@ -42,7 +43,6 @@ struct trace_kprobe {
 	(offsetof(struct trace_kprobe, tp.args) +	\
 	(sizeof(struct probe_arg) * (n)))
 
-
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
 	return tk->rp.handler != NULL;
@@ -87,6 +87,30 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 	return nhit;
 }
 
+bool trace_kprobe_on_func_entry(struct trace_event_call *call)
+{
+	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+
+	return kprobe_on_func_entry(tk->rp.kp.addr,
+			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
+			tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
+}
+
+bool trace_kprobe_error_injectable(struct trace_event_call *call)
+{
+	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+	unsigned long addr;
+
+	if (tk->symbol) {
+		addr = (unsigned long)
+			kallsyms_lookup_name(trace_kprobe_symbol(tk));
+		addr += tk->rp.kp.offset;
+	} else {
+		addr = (unsigned long)tk->rp.kp.addr;
+	}
+	return within_error_injection_list(addr);
+}
+
 static int register_kprobe_event(struct trace_kprobe *tk);
 static int unregister_kprobe_event(struct trace_kprobe *tk);
 
@@ -1170,7 +1194,7 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static void
+static int
 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 {
 	struct trace_event_call *call = &tk->tp.call;
@@ -1179,12 +1203,31 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 	int size, __size, dsize;
 	int rctx;
 
-	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
-		return;
+	if (bpf_prog_array_valid(call)) {
+		unsigned long orig_ip = instruction_pointer(regs);
+		int ret;
+
+		ret = trace_call_bpf(call, regs);
+
+		/*
+		 * We need to check and see if we modified the pc of the
+		 * pt_regs, and if so clear the kprobe and return 1 so that we
+		 * don't do the single stepping.
+		 * The ftrace kprobe handler leaves it up to us to re-enable
+		 * preemption here before returning if we've modified the ip.
+		 */
+		if (orig_ip != instruction_pointer(regs)) {
+			reset_current_kprobe();
+			preempt_enable_no_resched();
+			return 1;
+		}
+		if (!ret)
+			return 0;
+	}
 
 	head = this_cpu_ptr(call->perf_events);
 	if (hlist_empty(head))
-		return;
+		return 0;
 
 	dsize = __get_data_size(&tk->tp, regs);
 	__size = sizeof(*entry) + tk->tp.size + dsize;
@@ -1193,13 +1236,14 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 
 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!entry)
-		return;
+		return 0;
 
 	entry->ip = (unsigned long)tk->rp.kp.addr;
 	memset(&entry[1], 0, dsize);
 	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
 			      head, NULL);
+	return 0;
 }
 NOKPROBE_SYMBOL(kprobe_perf_func);
 
@@ -1275,6 +1319,7 @@ static int kprobe_register(struct trace_event_call *event,
 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
+	int ret = 0;
 
 	raw_cpu_inc(*tk->nhit);
 
@@ -1282,9 +1327,9 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 		kprobe_trace_func(tk, regs);
 #ifdef CONFIG_PERF_EVENTS
 	if (tk->tp.flags & TP_FLAG_PROFILE)
-		kprobe_perf_func(tk, regs);
+		ret = kprobe_perf_func(tk, regs);
 #endif
-	return 0;	/* We don't tweek kernel, so just return 0 */
+	return ret;
 }
 NOKPROBE_SYMBOL(kprobe_dispatcher);
 
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index fb66e3eaa192..e101c5bb9eda 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -252,6 +252,8 @@ struct symbol_cache;
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
+bool trace_kprobe_on_func_entry(struct trace_event_call *call);
+bool trace_kprobe_error_injectable(struct trace_event_call *call);
 #else
 /* uprobes do not support symbol fetch methods */
 #define fetch_symbol_u8			NULL
@@ -277,6 +279,16 @@ alloc_symbol_cache(const char *sym, long offset)
 {
 	return NULL;
 }
+
+static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
+{
+	return false;
+}
+
+static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
+{
+	return false;
+}
 #endif /* CONFIG_KPROBE_EVENTS */
 
 struct probe_arg {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2a1f28c183d3..64d7c19d3167 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1500,6 +1500,10 @@ config FAULT_INJECTION
 	  Provide fault-injection framework.
 	  For more details, see Documentation/fault-injection/.
 
+config FUNCTION_ERROR_INJECTION
+	def_bool y
+	depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
 config FAILSLAB
 	bool "Fault-injection capability for kmalloc"
 	depends on FAULT_INJECTION
@@ -1547,6 +1551,16 @@ config FAIL_FUTEX
 	help
 	  Provide fault-injection capability for futexes.
 
+config FAIL_FUNCTION
+	bool "Fault-injection capability for functions"
+	depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
+	help
+	  Provide function-based fault-injection capability.
+	  This will allow you to override a specific function with a return
+	  with given return value. As a result, function caller will see
+	  an error value and have to handle it. This is useful to test the
+	  error handling in various subsystems.
+
 config FAULT_INJECTION_DEBUG_FS
 	bool "Debugfs entries for fault-injection capabilities"
 	depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 749851abe85a..7adb066692b3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
 	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
-	 once.o refcount.o usercopy.o errseq.o
+	 once.o refcount.o usercopy.o errseq.o bucket_locks.o
 obj-$(CONFIG_STRING_SELFTEST) += test_string.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -149,6 +149,7 @@ obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
 obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
 obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
 	of-reconfig-notifier-error-inject.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 
 lib-$(CONFIG_GENERIC_BUG) += bug.o
 
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 000000000000..266a97c5708b
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
+ * indicate the number of elements to allocate in the array. max_size
+ * gives the maximum number of elements to allocate. cpu_mult gives
+ * the number of locks per CPU to allocate. The size is rounded up
+ * to a power of 2 to be suitable as a hash table.
+ */
+
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+			   size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+{
+	spinlock_t *tlocks = NULL;
+	unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+	unsigned int nr_pcpus = 2;
+#else
+	unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+	if (cpu_mult) {
+		nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
+		size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
+	} else {
+		size = max_size;
+	}
+
+	if (sizeof(spinlock_t) != 0) {
+		if (gfpflags_allow_blocking(gfp))
+			tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
+		else
+			tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+		if (!tlocks)
+			return -ENOMEM;
+		for (i = 0; i < size; i++)
+			spin_lock_init(&tlocks[i]);
+	}
+
+	*locks = tlocks;
+	*locks_mask = size - 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(alloc_bucket_spinlocks);
+
+void free_bucket_spinlocks(spinlock_t *locks)
+{
+	kvfree(locks);
+}
+EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 000000000000..c0d4600f4896
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// error-inject.c: Function-level error injection table
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/* Whitelist of symbols that can be overridden for error injection. */
+static LIST_HEAD(error_injection_list);
+static DEFINE_MUTEX(ei_mutex);
+struct ei_entry {
+	struct list_head list;
+	unsigned long start_addr;
+	unsigned long end_addr;
+	int etype;
+	void *priv;
+};
+
+bool within_error_injection_list(unsigned long addr)
+{
+	struct ei_entry *ent;
+	bool ret = false;
+
+	mutex_lock(&ei_mutex);
+	list_for_each_entry(ent, &error_injection_list, list) {
+		if (addr >= ent->start_addr && addr < ent->end_addr) {
+			ret = true;
+			break;
+		}
+	}
+	mutex_unlock(&ei_mutex);
+	return ret;
+}
+
+int get_injectable_error_type(unsigned long addr)
+{
+	struct ei_entry *ent;
+
+	list_for_each_entry(ent, &error_injection_list, list) {
+		if (addr >= ent->start_addr && addr < ent->end_addr)
+			return ent->etype;
+	}
+	return EI_ETYPE_NONE;
+}
+
+/*
+ * Lookup and populate the error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overridden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_error_injection_list(struct error_injection_entry *start,
+					  struct error_injection_entry *end,
+					  void *priv)
+{
+	struct error_injection_entry *iter;
+	struct ei_entry *ent;
+	unsigned long entry, offset = 0, size = 0;
+
+	mutex_lock(&ei_mutex);
+	for (iter = start; iter < end; iter++) {
+		entry = arch_deref_entry_point((void *)iter->addr);
+
+		if (!kernel_text_address(entry) ||
+		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+			pr_err("Failed to find error inject entry at %p\n",
+				(void *)entry);
+			continue;
+		}
+
+		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+		if (!ent)
+			break;
+		ent->start_addr = entry;
+		ent->end_addr = entry + size;
+		ent->etype = iter->etype;
+		ent->priv = priv;
+		INIT_LIST_HEAD(&ent->list);
+		list_add_tail(&ent->list, &error_injection_list);
+	}
+	mutex_unlock(&ei_mutex);
+}
+
+/* Markers of the _error_inject_whitelist section */
+extern struct error_injection_entry __start_error_injection_whitelist[];
+extern struct error_injection_entry __stop_error_injection_whitelist[];
+
+static void __init populate_kernel_ei_list(void)
+{
+	populate_error_injection_list(__start_error_injection_whitelist,
+				      __stop_error_injection_whitelist,
+				      NULL);
+}
+
+#ifdef CONFIG_MODULES
+static void module_load_ei_list(struct module *mod)
+{
+	if (!mod->num_ei_funcs)
+		return;
+
+	populate_error_injection_list(mod->ei_funcs,
+				      mod->ei_funcs + mod->num_ei_funcs, mod);
+}
+
+static void module_unload_ei_list(struct module *mod)
+{
+	struct ei_entry *ent, *n;
+
+	if (!mod->num_ei_funcs)
+		return;
+
+	mutex_lock(&ei_mutex);
+	list_for_each_entry_safe(ent, n, &error_injection_list, list) {
+		if (ent->priv == mod) {
+			list_del_init(&ent->list);
+			kfree(ent);
+		}
+	}
+	mutex_unlock(&ei_mutex);
+}
+
+/* Module notifier call back, checking error injection table on the module */
+static int ei_module_callback(struct notifier_block *nb,
+			      unsigned long val, void *data)
+{
+	struct module *mod = data;
+
+	if (val == MODULE_STATE_COMING)
+		module_load_ei_list(mod);
+	else if (val == MODULE_STATE_GOING)
+		module_unload_ei_list(mod);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ei_module_nb = {
+	.notifier_call = ei_module_callback,
+	.priority = 0
+};
+
+static __init int module_ei_init(void)
+{
+	return register_module_notifier(&ei_module_nb);
+}
+#else /* !CONFIG_MODULES */
+#define module_ei_init()	(0)
+#endif
+
+/*
+ * error_injection/whitelist -- shows which functions can be overridden for
+ * error injection.
+ */
+static void *ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+	mutex_lock(&ei_mutex);
+	return seq_list_start(&error_injection_list, *pos);
+}
+
+static void ei_seq_stop(struct seq_file *m, void *v)
+{
+	mutex_unlock(&ei_mutex);
+}
+
+static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &error_injection_list, pos);
+}
+
+static const char *error_type_string(int etype)
+{
+	switch (etype) {
+	case EI_ETYPE_NULL:
+		return "NULL";
+	case EI_ETYPE_ERRNO:
+		return "ERRNO";
+	case EI_ETYPE_ERRNO_NULL:
+		return "ERRNO_NULL";
+	default:
+		return "(unknown)";
+	}
+}
+
+static int ei_seq_show(struct seq_file *m, void *v)
+{
+	struct ei_entry *ent = list_entry(v, struct ei_entry, list);
+
+	seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
+		   error_type_string(ent->etype));
+	return 0;
+}
+
+static const struct seq_operations ei_seq_ops = {
+	.start = ei_seq_start,
+	.next  = ei_seq_next,
+	.stop  = ei_seq_stop,
+	.show  = ei_seq_show,
+};
+
+static int ei_open(struct inode *inode, struct file *filp)
+{
+	return seq_open(filp, &ei_seq_ops);
+}
+
+static const struct file_operations debugfs_ei_ops = {
+	.open           = ei_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = seq_release,
+};
+
+static int __init ei_debugfs_init(void)
+{
+	struct dentry *dir, *file;
+
+	dir = debugfs_create_dir("error_injection", NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+	if (!file) {
+		debugfs_remove(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int __init init_error_injection(void)
+{
+	populate_kernel_ei_list();
+
+	if (!module_ei_init())
+		ei_debugfs_init();
+
+	return 0;
+}
+late_initcall(init_error_injection);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde87c3c..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 #define ASSERT_RHT_MUTEX(HT)
 #endif
 
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
-			      gfp_t gfp)
-{
-	unsigned int i, size;
-#if defined(CONFIG_PROVE_LOCKING)
-	unsigned int nr_pcpus = 2;
-#else
-	unsigned int nr_pcpus = num_possible_cpus();
-#endif
-
-	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
-	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
-
-	/* Never allocate more than 0.5 locks per bucket */
-	size = min_t(unsigned int, size, tbl->size >> 1);
-
-	if (tbl->nest)
-		size = min(size, 1U << tbl->nest);
-
-	if (sizeof(spinlock_t) != 0) {
-		if (gfpflags_allow_blocking(gfp))
-			tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
-		else
-			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
-						   gfp);
-		if (!tbl->locks)
-			return -ENOMEM;
-		for (i = 0; i < size; i++)
-			spin_lock_init(&tbl->locks[i]);
-	}
-	tbl->locks_mask = size - 1;
-
-	return 0;
-}
-
 static void nested_table_free(union nested_table *ntbl, unsigned int size)
 {
 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
 	if (tbl->nest)
 		nested_bucket_table_free(tbl);
 
-	kvfree(tbl->locks);
+	free_bucket_spinlocks(tbl->locks);
 	kvfree(tbl);
 }
 
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
 					       gfp_t gfp)
 {
 	struct bucket_table *tbl = NULL;
-	size_t size;
+	size_t size, max_locks;
 	int i;
 
 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
 
 	tbl->size = size;
 
-	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
+	max_locks = size >> 1;
+	if (tbl->nest)
+		max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
+
+	if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
+				   ht->p.locks_mul, gfp) < 0) {
 		bucket_table_free(tbl);
 		return NULL;
 	}
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
 	iter->p = NULL;
 	iter->slot = 0;
 	iter->skip = 0;
+	iter->end_of_table = 0;
 
 	spin_lock(&ht->lock);
 	iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
 
 /**
- * rhashtable_walk_start - Start a hash table walk
+ * rhashtable_walk_start_check - Start a hash table walk
  * @iter:	Hash table iterator
  *
  * Start a hash table walk at the current iterator position.  Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  * Returns -EAGAIN if resize event occured.  Note that the iterator
  * will rewind back to the beginning and you may use it immediately
  * by calling rhashtable_walk_next.
+ *
+ * rhashtable_walk_start is defined as an inline variant that returns
+ * void. This is preferred in cases where the caller would ignore
+ * resize events and always continue.
  */
-int rhashtable_walk_start(struct rhashtable_iter *iter)
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
 	__acquires(RCU)
 {
 	struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
 		list_del(&iter->walker.list);
 	spin_unlock(&ht->lock);
 
-	if (!iter->walker.tbl) {
+	if (!iter->walker.tbl && !iter->end_of_table) {
 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
 		return -EAGAIN;
 	}
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
 
 /**
- * rhashtable_walk_next - Return the next object and advance the iterator
- * @iter:	Hash table iterator
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
+ * one in case of a new walk).
  *
- * Note that you must call rhashtable_walk_stop when you are finished
- * with the walk.
+ * @iter:	Hash table iterator
  *
- * Returns the next object or NULL when the end of the table is reached.
+ * Returns the found object or NULL when the end of the table is reached.
  *
- * Returns -EAGAIN if resize event occured.  Note that the iterator
- * will rewind back to the beginning and you may continue to use it.
+ * Returns -EAGAIN if resize event occurred.
  */
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
 {
 	struct bucket_table *tbl = iter->walker.tbl;
 	struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
 	struct rhash_head *p = iter->p;
 	bool rhlist = ht->rhlist;
 
-	if (p) {
-		if (!rhlist || !(list = rcu_dereference(list->next))) {
-			p = rcu_dereference(p->next);
-			list = container_of(p, struct rhlist_head, rhead);
-		}
-		goto next;
-	}
+	if (!tbl)
+		return NULL;
 
 	for (; iter->slot < tbl->size; iter->slot++) {
 		int skip = iter->skip;
@@ -836,13 +803,90 @@ next:
 		iter->slot = 0;
 		iter->skip = 0;
 		return ERR_PTR(-EAGAIN);
+	} else {
+		iter->end_of_table = true;
 	}
 
 	return NULL;
 }
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter:	Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred.  Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+	struct rhlist_head *list = iter->list;
+	struct rhashtable *ht = iter->ht;
+	struct rhash_head *p = iter->p;
+	bool rhlist = ht->rhlist;
+
+	if (p) {
+		if (!rhlist || !(list = rcu_dereference(list->next))) {
+			p = rcu_dereference(p->next);
+			list = container_of(p, struct rhlist_head, rhead);
+		}
+		if (!rht_is_a_nulls(p)) {
+			iter->skip++;
+			iter->p = p;
+			iter->list = list;
+			return rht_obj(ht, rhlist ? &list->rhead : p);
+		}
+
+		/* At the end of this slot, switch to next one and then find
+		 * next entry from that point.
+		 */
+		iter->skip = 0;
+		iter->slot++;
+	}
+
+	return __rhashtable_walk_find_next(iter);
+}
 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
 
 /**
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
+ * @iter:	Hash table iterator
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred.  Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
+{
+	struct rhlist_head *list = iter->list;
+	struct rhashtable *ht = iter->ht;
+	struct rhash_head *p = iter->p;
+
+	if (p)
+		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
+
+	/* No object found in current iter, find next one in the table. */
+
+	if (iter->skip) {
+		/* A nonzero skip value points to the next entry in the table
+		 * beyond that last one that was found. Decrement skip so
+		 * we find the current value. __rhashtable_walk_find_next
+		 * will restore the original value of skip assuming that
+		 * the table hasn't changed.
+		 */
+		iter->skip--;
+	}
+
+	return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
+
+/**
  * rhashtable_walk_stop - Finish a hash table walk
  * @iter:	Hash table iterator
  *
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f369889e521d..4cd9ea9b3449 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = {
 		{ { 4, 0 }, { 5, 10 } }
 	},
 	{
-		"INT: DIV by zero",
+		/* This one doesn't go through verifier, but is just raw insn
+		 * as opposed to cBPF tests from here. Thus div by 0 tests are
+		 * done in test_verifier in BPF kselftests.
+		 */
+		"INT: DIV by -1",
 		.u.insns_int = {
 			BPF_ALU64_REG(BPF_MOV, R6, R1),
-			BPF_ALU64_IMM(BPF_MOV, R7, 0),
+			BPF_ALU64_IMM(BPF_MOV, R7, -1),
 			BPF_LD_ABS(BPF_B, 3),
 			BPF_ALU32_REG(BPF_DIV, R0, R7),
 			BPF_EXIT_INSN(),
@@ -6109,6 +6113,110 @@ static struct bpf_test tests[] = {
 		{ { ETH_HLEN, 42 } },
 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
 	},
+	/* Checking interpreter vs JIT wrt signed extended imms. */
+	{
+		"JNE signed compare, test 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+			BPF_MOV64_REG(R2, R1),
+			BPF_ALU64_REG(BPF_AND, R2, R3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JNE signed compare, test 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+			BPF_MOV64_REG(R2, R1),
+			BPF_ALU64_REG(BPF_AND, R2, R3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JNE signed compare, test 3",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+			BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
+			BPF_MOV64_REG(R2, R1),
+			BPF_ALU64_REG(BPF_AND, R2, R3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JNE, R2, R4, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"JNE signed compare, test 4",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, -17104896),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"JNE signed compare, test 5",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0xfefb0000),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JNE signed compare, test 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x7efb0000),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"JNE signed compare, test 7",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
+			BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_RET | BPF_K, 2),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ { 0, 2 } },
+	},
 };
 
 static struct net_device dev;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8e83cbdc049c..76d3667fdea2 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
 		return;
 	}
 
-	err = rhashtable_walk_start(&hti);
-	if (err && err != -EAGAIN) {
-		pr_warn("Test failed: iterator failed: %d\n", err);
-		return;
-	}
+	rhashtable_walk_start(&hti);
 
 	while ((pos = rhashtable_walk_next(&hti))) {
 		if (PTR_ERR(pos) == -EAGAIN) {
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 5f1446c9f098..a662ccc166df 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -80,7 +80,6 @@ static int vlan_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations vlan_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = vlan_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -97,7 +96,6 @@ static int vlandev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations vlandev_fops = {
-	.owner = THIS_MODULE,
 	.open    = vlandev_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/Kconfig b/net/Kconfig
index 9dba2715919d..37ec8e67af57 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -182,6 +182,7 @@ config BRIDGE_NETFILTER
 	depends on BRIDGE
 	depends on NETFILTER && INET
 	depends on NETFILTER_ADVANCED
+	select NETFILTER_FAMILY_BRIDGE
 	default m
 	---help---
 	  Enabling this option will let arptables resp. iptables see bridged
@@ -336,23 +337,6 @@ config NET_PKTGEN
 	  To compile this code as a module, choose M here: the
 	  module will be called pktgen.
 
-config NET_TCPPROBE
-	tristate "TCP connection probing"
-	depends on INET && PROC_FS && KPROBES
-	---help---
-	This module allows for capturing the changes to TCP connection
-	state in response to incoming packets. It is used for debugging
-	TCP congestion avoidance modules. If you don't understand
-	what was just said, you don't need it: say N.
-
-	Documentation on how to use TCP connection probing can be found
-	at:
-	
-	  http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
-
-	To compile this code as a module, choose M here: the
-	module will be called tcp_probe.
-
 config NET_DROP_MONITOR
 	tristate "Network packet drop alerting service"
 	depends on INET && TRACEPOINTS
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 309d7dbb36e8..d4c1021e74e1 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -1047,7 +1047,6 @@ static int aarp_seq_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations atalk_seq_arp_fops = {
-	.owner		= THIS_MODULE,
 	.open           = aarp_seq_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index af46bc49e1e9..a3bf9d519193 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -226,7 +226,6 @@ static int atalk_seq_socket_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations atalk_seq_interface_fops = {
-	.owner		= THIS_MODULE,
 	.open		= atalk_seq_interface_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -234,7 +233,6 @@ static const struct file_operations atalk_seq_interface_fops = {
 };
 
 static const struct file_operations atalk_seq_route_fops = {
-	.owner		= THIS_MODULE,
 	.open		= atalk_seq_route_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -242,7 +240,6 @@ static const struct file_operations atalk_seq_route_fops = {
 };
 
 static const struct file_operations atalk_seq_socket_fops = {
-	.owner		= THIS_MODULE,
 	.open		= atalk_seq_socket_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 4e111196f902..fd94bea36ee8 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -824,7 +824,6 @@ static int br2684_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations br2684_proc_ops = {
-	.owner = THIS_MODULE,
 	.open = br2684_proc_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/atm/common.c b/net/atm/common.c
index 8f12f1c6fa14..6523f38c4957 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -14,7 +14,7 @@
 #include <linux/capability.h>
 #include <linux/mm.h>
 #include <linux/sched/signal.h>
-#include <linux/time.h>		/* struct timeval */
+#include <linux/time64.h>	/* 64-bit time for seconds */
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 6676e3433261..09a1f056712a 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -992,7 +992,6 @@ static int lec_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations lec_seq_fops = {
-	.owner = THIS_MODULE,
 	.open = lec_seq_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 7c6a1cc760a2..31e0dcb970f8 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1089,7 +1089,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 		msg->type = SND_MPOA_RES_RQST;
 		msg->content.in_info = entry->ctrl_info;
 		msg_to_mpoad(msg, mpc);
-		do_gettimeofday(&(entry->reply_wait));
+		entry->reply_wait = ktime_get_seconds();
 		mpc->in_ops->put(entry);
 		return;
 	}
@@ -1099,7 +1099,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 		msg->type = SND_MPOA_RES_RQST;
 		msg->content.in_info = entry->ctrl_info;
 		msg_to_mpoad(msg, mpc);
-		do_gettimeofday(&(entry->reply_wait));
+		entry->reply_wait = ktime_get_seconds();
 		mpc->in_ops->put(entry);
 		return;
 	}
@@ -1175,8 +1175,9 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 	}
 
 	entry->ctrl_info = msg->content.in_info;
-	do_gettimeofday(&(entry->tv));
-	do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
+	entry->time = ktime_get_seconds();
+	/* Used in refreshing func from now on */
+	entry->reply_wait = ktime_get_seconds();
 	entry->refresh_time = 0;
 	ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
 
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index e01450bb32d6..4bb418313720 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -117,7 +117,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
 
 	memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
 	entry->ctrl_info.in_dst_ip = dst_ip;
-	do_gettimeofday(&(entry->tv));
+	entry->time = ktime_get_seconds();
 	entry->retry_time = client->parameters.mpc_p4;
 	entry->count = 1;
 	entry->entry_state = INGRESS_INVALID;
@@ -148,7 +148,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
 			if (qos != NULL)
 				msg.qos = qos->qos;
 			msg_to_mpoad(&msg, mpc);
-			do_gettimeofday(&(entry->reply_wait));
+			entry->reply_wait = ktime_get_seconds();
 			entry->entry_state = INGRESS_RESOLVING;
 		}
 		if (entry->shortcut != NULL)
@@ -171,7 +171,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
 		if (qos != NULL)
 			msg.qos = qos->qos;
 		msg_to_mpoad(&msg, mpc);
-		do_gettimeofday(&(entry->reply_wait));
+		entry->reply_wait = ktime_get_seconds();
 	}
 
 	return CLOSED;
@@ -227,17 +227,16 @@ static void in_cache_remove_entry(in_cache_entry *entry,
 static void clear_count_and_expired(struct mpoa_client *client)
 {
 	in_cache_entry *entry, *next_entry;
-	struct timeval now;
+	time64_t now;
 
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	write_lock_bh(&client->ingress_lock);
 	entry = client->in_cache;
 	while (entry != NULL) {
 		entry->count = 0;
 		next_entry = entry->next;
-		if ((now.tv_sec - entry->tv.tv_sec)
-		   > entry->ctrl_info.holding_time) {
+		if ((now - entry->time) > entry->ctrl_info.holding_time) {
 			dprintk("holding time expired, ip = %pI4\n",
 				&entry->ctrl_info.in_dst_ip);
 			client->in_ops->remove_entry(entry, client);
@@ -253,35 +252,35 @@ static void check_resolving_entries(struct mpoa_client *client)
 
 	struct atm_mpoa_qos *qos;
 	in_cache_entry *entry;
-	struct timeval now;
+	time64_t now;
 	struct k_message msg;
 
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	read_lock_bh(&client->ingress_lock);
 	entry = client->in_cache;
 	while (entry != NULL) {
 		if (entry->entry_state == INGRESS_RESOLVING) {
-			if ((now.tv_sec - entry->hold_down.tv_sec) <
-			    client->parameters.mpc_p6) {
+
+			if ((now - entry->hold_down)
+					< client->parameters.mpc_p6) {
 				entry = entry->next;	/* Entry in hold down */
 				continue;
 			}
-			if ((now.tv_sec - entry->reply_wait.tv_sec) >
-			    entry->retry_time) {
+			if ((now - entry->reply_wait) > entry->retry_time) {
 				entry->retry_time = MPC_C1 * (entry->retry_time);
 				/*
 				 * Retry time maximum exceeded,
 				 * put entry in hold down.
 				 */
 				if (entry->retry_time > client->parameters.mpc_p5) {
-					do_gettimeofday(&(entry->hold_down));
+					entry->hold_down = ktime_get_seconds();
 					entry->retry_time = client->parameters.mpc_p4;
 					entry = entry->next;
 					continue;
 				}
 				/* Ask daemon to send a resolution request. */
-				memset(&(entry->hold_down), 0, sizeof(struct timeval));
+				memset(&entry->hold_down, 0, sizeof(time64_t));
 				msg.type = SND_MPOA_RES_RTRY;
 				memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
 				msg.content.in_info = entry->ctrl_info;
@@ -289,7 +288,7 @@ static void check_resolving_entries(struct mpoa_client *client)
 				if (qos != NULL)
 					msg.qos = qos->qos;
 				msg_to_mpoad(&msg, client);
-				do_gettimeofday(&(entry->reply_wait));
+				entry->reply_wait = ktime_get_seconds();
 			}
 		}
 		entry = entry->next;
@@ -300,18 +299,18 @@ static void check_resolving_entries(struct mpoa_client *client)
 /* Call this every MPC-p5 seconds. */
 static void refresh_entries(struct mpoa_client *client)
 {
-	struct timeval now;
+	time64_t now;
 	struct in_cache_entry *entry = client->in_cache;
 
 	ddprintk("refresh_entries\n");
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	read_lock_bh(&client->ingress_lock);
 	while (entry != NULL) {
 		if (entry->entry_state == INGRESS_RESOLVED) {
 			if (!(entry->refresh_time))
 				entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
-			if ((now.tv_sec - entry->reply_wait.tv_sec) >
+			if ((now - entry->reply_wait) >
 			    entry->refresh_time) {
 				dprintk("refreshing an entry.\n");
 				entry->entry_state = INGRESS_REFRESHING;
@@ -480,7 +479,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
 
 	memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
 	entry->ctrl_info = msg->content.eg_info;
-	do_gettimeofday(&(entry->tv));
+	entry->time = ktime_get_seconds();
 	entry->entry_state = EGRESS_RESOLVED;
 	dprintk("new_eg_cache_entry cache_id %u\n",
 		ntohl(entry->ctrl_info.cache_id));
@@ -495,7 +494,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
 
 static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
 {
-	do_gettimeofday(&(entry->tv));
+	entry->time = ktime_get_seconds();
 	entry->entry_state = EGRESS_RESOLVED;
 	entry->ctrl_info.holding_time = holding_time;
 }
@@ -503,17 +502,16 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
 static void clear_expired(struct mpoa_client *client)
 {
 	eg_cache_entry *entry, *next_entry;
-	struct timeval now;
+	time64_t now;
 	struct k_message msg;
 
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	write_lock_irq(&client->egress_lock);
 	entry = client->eg_cache;
 	while (entry != NULL) {
 		next_entry = entry->next;
-		if ((now.tv_sec - entry->tv.tv_sec)
-		   > entry->ctrl_info.holding_time) {
+		if ((now - entry->time) > entry->ctrl_info.holding_time) {
 			msg.type = SND_EGRESS_PURGE;
 			msg.content.eg_info = entry->ctrl_info;
 			dprintk("egress_cache: holding time expired, cache_id = %u.\n",
diff --git a/net/atm/mpoa_caches.h b/net/atm/mpoa_caches.h
index 6a266669ebf4..464c4c7f8d1f 100644
--- a/net/atm/mpoa_caches.h
+++ b/net/atm/mpoa_caches.h
@@ -2,6 +2,7 @@
 #ifndef MPOA_CACHES_H
 #define MPOA_CACHES_H
 
+#include <linux/time64.h>
 #include <linux/netdevice.h>
 #include <linux/types.h>
 #include <linux/atm.h>
@@ -16,9 +17,9 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc);
 typedef struct in_cache_entry {
 	struct in_cache_entry *next;
 	struct in_cache_entry *prev;
-	struct timeval  tv;
-	struct timeval  reply_wait;
-	struct timeval  hold_down;
+	time64_t  time;
+	time64_t  reply_wait;
+	time64_t  hold_down;
 	uint32_t  packets_fwded;
 	uint16_t  entry_state;
 	uint32_t retry_time;
@@ -53,7 +54,7 @@ struct in_cache_ops{
 typedef struct eg_cache_entry{
 	struct               eg_cache_entry *next;
 	struct               eg_cache_entry *prev;
-	struct               timeval  tv;
+	time64_t	     time;
 	uint8_t              MPS_ctrl_ATM_addr[ATM_ESA_LEN];
 	struct atm_vcc       *shortcut;
 	uint32_t             packets_rcvd;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 8a0c17e1c203..b93cc0f18292 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -8,7 +8,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/proc_fs.h>
-#include <linux/time.h>
+#include <linux/ktime.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/atmmpc.h>
@@ -57,7 +57,6 @@ static int parse_qos(const char *buff);
  *   Define allowed FILE OPERATIONS
  */
 static const struct file_operations mpc_file_operations = {
-	.owner =	THIS_MODULE,
 	.open =		proc_mpc_open,
 	.read =		seq_read,
 	.llseek =	seq_lseek,
@@ -138,7 +137,7 @@ static int mpc_show(struct seq_file *m, void *v)
 	int i;
 	in_cache_entry *in_entry;
 	eg_cache_entry *eg_entry;
-	struct timeval now;
+	time64_t now;
 	unsigned char ip_string[16];
 
 	if (v == SEQ_START_TOKEN) {
@@ -148,15 +147,17 @@ static int mpc_show(struct seq_file *m, void *v)
 
 	seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
 	seq_printf(m, "Ingress Entries:\nIP address      State      Holding time  Packets fwded  VPI  VCI\n");
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
+		unsigned long seconds_delta = now - in_entry->time;
+
 		sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
 		seq_printf(m, "%-16s%s%-14lu%-12u",
 			   ip_string,
 			   ingress_state_string(in_entry->entry_state),
 			   in_entry->ctrl_info.holding_time -
-			   (now.tv_sec-in_entry->tv.tv_sec),
+			   seconds_delta,
 			   in_entry->packets_fwded);
 		if (in_entry->shortcut)
 			seq_printf(m, "   %-3d  %-3d",
@@ -169,13 +170,14 @@ static int mpc_show(struct seq_file *m, void *v)
 	seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id        State      Holding time  Packets recvd  Latest IP addr   VPI VCI\n");
 	for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
 		unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
+		unsigned long seconds_delta = now - eg_entry->time;
+
 		for (i = 0; i < ATM_ESA_LEN; i++)
 			seq_printf(m, "%02x", p[i]);
 		seq_printf(m, "\n%-16lu%s%-14lu%-15u",
 			   (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
 			   egress_state_string(eg_entry->entry_state),
-			   (eg_entry->ctrl_info.holding_time -
-			    (now.tv_sec-eg_entry->tv.tv_sec)),
+			   (eg_entry->ctrl_info.holding_time - seconds_delta),
 			   eg_entry->packets_rcvd);
 
 		/* latest IP address */
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 642f9272ab95..edc48edc95c1 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -37,7 +37,6 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
 				 size_t count, loff_t *pos);
 
 static const struct file_operations proc_atm_dev_ops = {
-	.owner =	THIS_MODULE,
 	.read =		proc_dev_atm_read,
 	.llseek =	noop_llseek,
 };
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 06eac1f50c5e..47fdd399626b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1931,7 +1931,6 @@ static int ax25_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ax25_info_fops = {
-	.owner = THIS_MODULE,
 	.open = ax25_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 0446b892618a..525558972fd9 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -336,7 +336,6 @@ static int ax25_rt_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations ax25_route_fops = {
-	.owner = THIS_MODULE,
 	.open = ax25_rt_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 83b035f56202..4ebe91ba317a 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -194,7 +194,6 @@ static int ax25_uid_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations ax25_uid_fops = {
-	.owner = THIS_MODULE,
 	.open = ax25_uid_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index b73b96a2854b..c44f6515be5e 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -1,3 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
 #
 # B.A.T.M.A.N meshing protocol
 #
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 915987bc6d29..022f6e77307b 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,4 +1,4 @@
-#
+# SPDX-License-Identifier: GPL-2.0
 # Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index 44fd073b7546..80c72c7d3cad 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -37,7 +38,8 @@ char batadv_routing_algo[20] = "BATMAN_IV";
 static struct hlist_head batadv_algo_list;
 
 /**
- * batadv_algo_init - Initialize batman-adv algorithm management data structures
+ * batadv_algo_init() - Initialize batman-adv algorithm management data
+ *  structures
  */
 void batadv_algo_init(void)
 {
@@ -59,6 +61,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
 	return bat_algo_ops;
 }
 
+/**
+ * batadv_algo_register() - Register callbacks for a mesh algorithm
+ * @bat_algo_ops: mesh algorithm callbacks to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 {
 	struct batadv_algo_ops *bat_algo_ops_tmp;
@@ -88,6 +96,19 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 	return 0;
 }
 
+/**
+ * batadv_algo_select() - Select algorithm of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @name: name of the algorithm to select
+ *
+ * The algorithm callbacks for the soft interface will be set when the algorithm
+ * with the correct name was found. Any previous selected algorithm will not be
+ * deinitialized and the new selected algorithm will also not be initialized.
+ * It is therefore not allowed to call batadv_algo_select outside the creation
+ * function of the soft interface.
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 {
 	struct batadv_algo_ops *bat_algo_ops;
@@ -102,6 +123,14 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_algo_seq_print_text() - Print the supported algorithms in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct batadv_algo_ops *bat_algo_ops;
@@ -148,7 +177,7 @@ module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
 		0644);
 
 /**
- * batadv_algo_dump_entry - fill in information about one supported routing
+ * batadv_algo_dump_entry() - fill in information about one supported routing
  *  algorithm
  * @msg: netlink message to be sent back
  * @portid: Port to reply to
@@ -179,7 +208,7 @@ static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_algo_dump - fill in information about supported routing
+ * batadv_algo_dump() - fill in information about supported routing
  *  algorithms
  * @msg: netlink message to be sent back
  * @cb: Parameters to the netlink request
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 29f6312f9bf1..029221615ba3 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index bbe8414b6ee7..79e326383726 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -51,6 +52,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
@@ -72,21 +73,28 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
 
 /**
  * enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is no duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- *  neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
  */
 enum batadv_dup_status {
+	/** @BATADV_NO_DUP: the packet is no duplicate */
 	BATADV_NO_DUP = 0,
+
+	/**
+	 * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
+	 *  the neighbor)
+	 */
 	BATADV_ORIG_DUP,
+
+	/** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
 	BATADV_NEIGH_DUP,
+
+	/**
+	 * @BATADV_PROTECTED: originator is currently protected (after reboot)
+	 */
 	BATADV_PROTECTED,
 };
 
 /**
- * batadv_ring_buffer_set - update the ring buffer with the given value
+ * batadv_ring_buffer_set() - update the ring buffer with the given value
  * @lq_recv: pointer to the ring buffer
  * @lq_index: index to store the value at
  * @value: value to store in the ring buffer
@@ -98,7 +106,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
 }
 
 /**
- * batadv_ring_buffer_avg - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg() - compute the average of all non-zero values stored
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
@@ -130,7 +138,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 }
 
 /**
- * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ * batadv_iv_ogm_orig_free() - free the private resources allocated for this
  *  orig_node
  * @orig_node: the orig_node for which the resources have to be free'd
  */
@@ -141,8 +149,8 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
- *  include the new hard-interface
+ * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node
+ *  to include the new hard-interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  *
@@ -186,7 +194,7 @@ unlock:
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
+ * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -224,7 +232,7 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -259,8 +267,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
- *  exclude the removed interface
+ * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node
+ *  to exclude the removed interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -290,7 +298,8 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
+ *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of the originator
  *
@@ -447,7 +456,7 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -557,7 +566,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_iv_ogm_can_aggregate - find out if an OGM can be aggregated on an
+ * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
  *  existing forward packet
  * @new_bat_ogm_packet: OGM packet to be aggregated
  * @bat_priv: the bat priv with all the soft interface information
@@ -660,7 +669,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_aggregate_new - create a new aggregated packet and add this
+ * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
  *  packet to it.
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -743,7 +752,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
 }
 
 /**
- * batadv_iv_ogm_queue_add - queue up an OGM for transmission
+ * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -869,8 +878,8 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for
- * the given interface
+ * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
+ *  for the given interface
  * @hard_iface: the interface for which the windows have to be shifted
  */
 static void
@@ -987,7 +996,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+ * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig node who originally emitted the ogm packet
@@ -1152,7 +1161,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_calc_tq - calculate tq for current received ogm packet
+ * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
  * @orig_node: the orig node who originally emitted the ogm packet
  * @orig_neigh_node: the orig node struct of the neighbor who sent the packet
  * @batadv_ogm_packet: the ogm packet
@@ -1298,7 +1307,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_update_seqnos -  process a batman packet for all interfaces,
+ * batadv_iv_ogm_update_seqnos() -  process a batman packet for all interfaces,
  *  adjust the sequence number and find out whether it is a duplicate
  * @ethhdr: ethernet header of the packet
  * @batadv_ogm_packet: OGM packet to be considered
@@ -1401,7 +1410,8 @@ out:
 }
 
 /**
- * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
+ * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
+ *  interface
  * @skb: the skb containing the OGM
  * @ogm_offset: offset from skb->data to start of ogm header
  * @orig_node: the (cached) orig node for the originator of this OGM
@@ -1608,7 +1618,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_process - process an incoming batman iv OGM
+ * batadv_iv_ogm_process() - process an incoming batman iv OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -1861,7 +1871,7 @@ free_skb:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table
+ * batadv_iv_ogm_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -1890,7 +1900,7 @@ batadv_iv_ogm_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_print - print the originator table
+ * batadv_iv_ogm_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -1960,7 +1970,7 @@ next:
 #endif
 
 /**
- * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a
+ * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
  *  given outgoing interface.
  * @neigh_node: Neighbour of interest
  * @if_outgoing: Outgoing interface of interest
@@ -1986,7 +1996,7 @@ batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a
+ * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2048,7 +2058,7 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message
+ * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2110,7 +2120,7 @@ batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a
+ * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2153,7 +2163,7 @@ batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump - Dump the originators into a message
+ * batadv_iv_ogm_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2190,7 +2200,7 @@ batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_hardif_neigh_print - print a single hop neighbour node
+ * batadv_iv_hardif_neigh_print() - print a single hop neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -2209,7 +2219,7 @@ batadv_iv_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_print - print the single hop neighbour list
+ * batadv_iv_ogm_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -2242,7 +2252,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
+ * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2287,7 +2297,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message
+ * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2326,7 +2336,7 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface
+ * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
  *  into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2365,7 +2375,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message
+ * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2417,7 +2427,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 }
 
 /**
- * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2443,7 +2453,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
 }
 
 /**
- * batadv_iv_ogm_neigh_is_sob - check if neigh1 is similarly good or better
+ * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
  *  than neigh2 from the metric prospective
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
@@ -2478,7 +2488,7 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_iv_init_sel_class - initialize GW selection class
+ * batadv_iv_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
@@ -2703,7 +2713,7 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_gw_dump_entry - Dump a gateway into a message
+ * batadv_iv_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2774,7 +2784,7 @@ out:
 }
 
 /**
- * batadv_iv_gw_dump - Dump gateways into a message
+ * batadv_iv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2843,6 +2853,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
 	},
 };
 
+/**
+ * batadv_iv_init() - B.A.T.M.A.N. IV initialization function
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int __init batadv_iv_init(void)
 {
 	int ret;
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
index ae2ab526bdb1..9dc0dd5c83df 100644
--- a/net/batman-adv/bat_iv_ogm.h
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index e0e2bfcd6b3e..27e165ac9302 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -36,6 +37,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -48,7 +50,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 
 struct sk_buff;
 
@@ -99,7 +100,7 @@ static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_iface_update_mac - react to hard-interface MAC address change
+ * batadv_v_iface_update_mac() - react to hard-interface MAC address change
  * @hard_iface: the modified interface
  *
  * If the modified interface is the primary one, update the originator
@@ -130,7 +131,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print_neigh - print neighbors for the originator table
+ * batadv_v_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -160,7 +161,7 @@ batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_v_hardif_neigh_print - print a single ELP neighbour node
+ * batadv_v_hardif_neigh_print() - print a single ELP neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -181,7 +182,7 @@ batadv_v_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_v_neigh_print - print the single hop neighbour list
+ * batadv_v_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -215,7 +216,7 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_neigh_dump_neigh - Dump a neighbour into a message
+ * batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -258,7 +259,7 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump_hardif - Dump the  neighbours of a hard interface  into
+ * batadv_v_neigh_dump_hardif() - Dump the  neighbours of a hard interface into
  *  a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -296,7 +297,7 @@ batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump - Dump the neighbours of a hard interface  into a
+ * batadv_v_neigh_dump() - Dump the neighbours of a hard interface  into a
  *  message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
@@ -348,7 +349,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print - print the originator table
+ * batadv_v_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -416,8 +417,7 @@ next:
 #endif
 
 /**
- * batadv_v_orig_dump_subentry - Dump an originator subentry into a
- *  message
+ * batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -483,7 +483,7 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_entry - Dump an originator entry into a message
+ * batadv_v_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -536,8 +536,7 @@ batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_bucket - Dump an originator bucket into a
- *  message
+ * batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -578,7 +577,7 @@ batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump - Dump the originators into a message
+ * batadv_v_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -668,7 +667,7 @@ err_ifinfo1:
 }
 
 /**
- * batadv_v_init_sel_class - initialize GW selection class
+ * batadv_v_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
@@ -704,7 +703,7 @@ static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
 }
 
 /**
- * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
+ * batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
  * @gw_node: the GW to retrieve the metric for
  * @bw: the pointer where the metric will be stored. The metric is computed as
  *  the minimum between the GW advertised throughput and the path throughput to
@@ -747,7 +746,7 @@ out:
 }
 
 /**
- * batadv_v_gw_get_best_gw_node - retrieve the best GW node
+ * batadv_v_gw_get_best_gw_node() - retrieve the best GW node
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: the GW node having the best GW-metric, NULL if no GW is known
@@ -785,7 +784,7 @@ next:
 }
 
 /**
- * batadv_v_gw_is_eligible - check if a originator would be selected as GW
+ * batadv_v_gw_is_eligible() - check if a originator would be selected as GW
  * @bat_priv: the bat priv with all the soft interface information
  * @curr_gw_orig: originator representing the currently selected GW
  * @orig_node: the originator representing the new candidate
@@ -884,7 +883,7 @@ out:
 }
 
 /**
- * batadv_v_gw_print - print the gateway list
+ * batadv_v_gw_print() - print the gateway list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: gateway table seq_file struct
  */
@@ -913,7 +912,7 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_gw_dump_entry - Dump a gateway into a message
+ * batadv_v_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1004,7 +1003,7 @@ out:
 }
 
 /**
- * batadv_v_gw_dump - Dump gateways into a message
+ * batadv_v_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -1074,7 +1073,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
 };
 
 /**
- * batadv_v_hardif_init - initialize the algorithm specific fields in the
+ * batadv_v_hardif_init() - initialize the algorithm specific fields in the
  *  hard-interface object
  * @hard_iface: the hard-interface to initialize
  */
@@ -1088,7 +1087,7 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
+ * batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
  *  mesh
  * @bat_priv: the object representing the mesh interface to initialise
  *
@@ -1106,7 +1105,7 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_mesh_free - free the B.A.T.M.A.N. V private resources for a mesh
+ * batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
  * @bat_priv: the object representing the mesh interface to free
  */
 void batadv_v_mesh_free(struct batadv_priv *bat_priv)
@@ -1115,7 +1114,7 @@ void batadv_v_mesh_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_init - B.A.T.M.A.N. V initialization function
+ * batadv_v_init() - B.A.T.M.A.N. V initialization function
  *
  * Description: Takes care of initializing all the subcomponents.
  * It is invoked upon module load only.
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
index dd7c4b647e6b..a17ab68bbce8 100644
--- a/net/batman-adv/bat_v.h
+++ b/net/batman-adv/bat_v.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 1de992c58b35..a83478c46597 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -24,7 +25,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
@@ -41,18 +42,18 @@
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <net/cfg80211.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bat_v_ogm.h"
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 
 /**
- * batadv_v_elp_start_timer - restart timer for ELP periodic work
+ * batadv_v_elp_start_timer() - restart timer for ELP periodic work
  * @hard_iface: the interface for which the timer has to be reset
  */
 static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
@@ -67,7 +68,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_get_throughput - get the throughput towards a neighbour
+ * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
  * @neigh: the neighbour for which the throughput has to be obtained
  *
  * Return: The throughput towards the given neighbour in multiples of 100kpbs
@@ -153,8 +154,8 @@ default_throughput:
 }
 
 /**
- * batadv_v_elp_throughput_metric_update - worker updating the throughput metric
- *  of a single hop neighbour
+ * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+ *  metric of a single hop neighbour
  * @work: the work queue item
  */
 void batadv_v_elp_throughput_metric_update(struct work_struct *work)
@@ -177,7 +178,7 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work)
 }
 
 /**
- * batadv_v_elp_wifi_neigh_probe - send link probing packets to a neighbour
+ * batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
  * @neigh: the neighbour to probe
  *
  * Sends a predefined number of unicast wifi packets to a given neighbour in
@@ -240,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
 }
 
 /**
- * batadv_v_elp_periodic_work - ELP periodic task per interface
+ * batadv_v_elp_periodic_work() - ELP periodic task per interface
  * @work: work queue item
  *
  * Emits broadcast ELP message in regular intervals.
@@ -327,7 +328,7 @@ out:
 }
 
 /**
- * batadv_v_elp_iface_enable - setup the ELP interface private resources
+ * batadv_v_elp_iface_enable() - setup the ELP interface private resources
  * @hard_iface: interface for which the data has to be prepared
  *
  * Return: 0 on success or a -ENOMEM in case of failure.
@@ -375,7 +376,7 @@ out:
 }
 
 /**
- * batadv_v_elp_iface_disable - release ELP interface private resources
+ * batadv_v_elp_iface_disable() - release ELP interface private resources
  * @hard_iface: interface for which the resources have to be released
  */
 void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -387,7 +388,7 @@ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_iface_activate - update the ELP buffer belonging to the given
+ * batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
  *  hard-interface
  * @primary_iface: the new primary interface
  * @hard_iface: interface holding the to-be-updated buffer
@@ -408,7 +409,7 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
 }
 
 /**
- * batadv_v_elp_primary_iface_set - change internal data to reflect the new
+ * batadv_v_elp_primary_iface_set() - change internal data to reflect the new
  *  primary interface
  * @primary_iface: the new primary interface
  */
@@ -428,7 +429,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_elp_neigh_update - update an ELP neighbour node
+ * batadv_v_elp_neigh_update() - update an ELP neighbour node
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh_addr: the neighbour interface address
  * @if_incoming: the interface the packet was received through
@@ -488,7 +489,7 @@ orig_free:
 }
 
 /**
- * batadv_v_elp_packet_recv - main ELP packet handler
+ * batadv_v_elp_packet_recv() - main ELP packet handler
  * @skb: the received packet
  * @if_incoming: the interface this packet was received through
  *
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index 376ead280ab9..5e39d0588a48 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index c251445a42a0..ba59b77c605d 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
@@ -38,20 +39,20 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 /**
- * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
+ * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  *
@@ -88,7 +89,7 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_start_timer - restart the OGM sending timer
+ * batadv_v_ogm_start_timer() - restart the OGM sending timer
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
@@ -107,7 +108,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_send_to_if - send a batman ogm using a given interface
+ * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
  * @skb: the OGM to send
  * @hard_iface: the interface to use to send the OGM
  */
@@ -127,7 +128,7 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send - periodic worker broadcasting the own OGM
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
  * @work: work queue item
  */
 static void batadv_v_ogm_send(struct work_struct *work)
@@ -235,7 +236,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
+ * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
  * @hard_iface: the interface to prepare
  *
  * Takes care of scheduling own OGM sending routine for this interface.
@@ -252,7 +253,7 @@ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_ogm_primary_iface_set - set a new primary interface
+ * batadv_v_ogm_primary_iface_set() - set a new primary interface
  * @primary_iface: the new primary interface
  */
 void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
@@ -268,8 +269,8 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
- *  with B.A.T.M.A.N. V OGMs
+ * batadv_v_forward_penalty() - apply a penalty to the throughput metric
+ *  forwarded with B.A.T.M.A.N. V OGMs
  * @bat_priv: the bat priv with all the soft interface information
  * @if_incoming: the interface where the OGM has been received
  * @if_outgoing: the interface where the OGM has to be forwarded to
@@ -314,7 +315,7 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
  *  outgoing interface
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_received: previously received OGM to be forwarded
@@ -405,7 +406,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_metric_update - update route metric based on OGM
+ * batadv_v_ogm_metric_update() - update route metric based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm2: OGM2 structure
  * @orig_node: Originator structure for which the OGM has been received
@@ -490,7 +491,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_route_update - update routes based on OGM
+ * batadv_v_ogm_route_update() - update routes based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -590,7 +591,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_process_per_outif - process a batman v OGM for an outgoing if
+ * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -639,7 +640,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -659,7 +660,7 @@ static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
 }
 
 /**
- * batadv_v_ogm_process - process an incoming batman v OGM
+ * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -787,7 +788,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_packet_recv - OGM2 receiving handler
+ * batadv_v_ogm_packet_recv() - OGM2 receiving handler
  * @skb: the received OGM
  * @if_incoming: the interface where this OGM has been received
  *
@@ -851,7 +852,7 @@ free_skb:
 }
 
 /**
- * batadv_v_ogm_init - initialise the OGM2 engine
+ * batadv_v_ogm_init() - initialise the OGM2 engine
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or a negative error code in case of failure
@@ -884,7 +885,7 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_free - free OGM private resources
+ * batadv_v_ogm_free() - free OGM private resources
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_v_ogm_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index 2068770b542d..6a4c14ccc3c6 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 2b070c7e31da..bdc1ef06e05b 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -32,7 +33,7 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
 }
 
 /**
- * batadv_bit_get_packet - receive and process one packet within the sequence
+ * batadv_bit_get_packet() - receive and process one packet within the sequence
  *  number window
  * @priv: the bat priv with all the soft interface information
  * @seq_bits: pointer to the sequence number receive packet
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index cc262c9d97e0..ca9d0753dd6b 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -26,7 +27,7 @@
 #include <linux/types.h>
 
 /**
- * batadv_test_bit - check if bit is set in the current window
+ * batadv_test_bit() - check if bit is set in the current window
  *
  * @seq_bits: pointer to the sequence number receive packet
  * @last_seqno: latest sequence number in seq_bits
@@ -46,7 +47,12 @@ static inline bool batadv_test_bit(const unsigned long *seq_bits,
 	return test_bit(diff, seq_bits) != 0;
 }
 
-/* turn corresponding bit on, so we can remember that we got the packet */
+/**
+ * batadv_set_bit() - Turn corresponding bit on, so we can remember that we got
+ *  the packet
+ * @seq_bits: bitmap of the packet receive window
+ * @n: relative sequence number of newly received packet
+ */
 static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
 {
 	/* if too old, just drop it */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index cdd8e8e4df0b..fad47853ad3c 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -24,7 +25,7 @@
 #include <linux/crc16.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -49,6 +50,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
@@ -56,7 +58,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
@@ -69,7 +70,7 @@ batadv_bla_send_announce(struct batadv_priv *bat_priv,
 			 struct batadv_bla_backbone_gw *backbone_gw);
 
 /**
- * batadv_choose_claim - choose the right bucket for a claim.
+ * batadv_choose_claim() - choose the right bucket for a claim.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -87,7 +88,7 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
 }
 
 /**
- * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -105,7 +106,7 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 }
 
 /**
- * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
  * @node: list node of the first entry to compare
  * @data2: pointer to the second backbone gateway
  *
@@ -129,7 +130,7 @@ static bool batadv_compare_backbone_gw(const struct hlist_node *node,
 }
 
 /**
- * batadv_compare_claim - compare address and vid of two claims
+ * batadv_compare_claim() - compare address and vid of two claims
  * @node: list node of the first entry to compare
  * @data2: pointer to the second claims
  *
@@ -153,7 +154,7 @@ static bool batadv_compare_claim(const struct hlist_node *node,
 }
 
 /**
- * batadv_backbone_gw_release - release backbone gw from lists and queue for
+ * batadv_backbone_gw_release() - release backbone gw from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the backbone gw
  */
@@ -168,7 +169,7 @@ static void batadv_backbone_gw_release(struct kref *ref)
 }
 
 /**
- * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
+ * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
  *  release it
  * @backbone_gw: backbone gateway to be free'd
  */
@@ -178,8 +179,8 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_claim_release - release claim from lists and queue for free after rcu
- *  grace period
+ * batadv_claim_release() - release claim from lists and queue for free after
+ *  rcu grace period
  * @ref: kref pointer of the claim
  */
 static void batadv_claim_release(struct kref *ref)
@@ -204,8 +205,7 @@ static void batadv_claim_release(struct kref *ref)
 }
 
 /**
- * batadv_claim_put - decrement the claim refcounter and possibly
- *  release it
+ * batadv_claim_put() - decrement the claim refcounter and possibly release it
  * @claim: claim to be free'd
  */
 static void batadv_claim_put(struct batadv_bla_claim *claim)
@@ -214,7 +214,7 @@ static void batadv_claim_put(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_claim_hash_find - looks for a claim in the claim hash
+ * batadv_claim_hash_find() - looks for a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @data: search data (may be local/static data)
  *
@@ -253,7 +253,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_backbone_hash_find - looks for a backbone gateway in the hash
+ * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  * @vid: the VLAN ID
@@ -297,7 +297,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
 }
 
 /**
- * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * batadv_bla_del_backbone_claims() - delete all claims for a backbone
  * @backbone_gw: backbone gateway where the claims should be removed
  */
 static void
@@ -337,7 +337,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_claim - sends a claim frame according to the provided info
+ * batadv_bla_send_claim() - sends a claim frame according to the provided info
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address to be announced within the claim
  * @vid: the VLAN ID
@@ -457,7 +457,7 @@ out:
 }
 
 /**
- * batadv_bla_loopdetect_report - worker for reporting the loop
+ * batadv_bla_loopdetect_report() - worker for reporting the loop
  * @work: work queue item
  *
  * Throws an uevent, as the loopdetect check function can't do that itself
@@ -487,7 +487,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
 }
 
 /**
- * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
+ * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
@@ -560,7 +560,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface
  * @vid: VLAN identifier
@@ -586,7 +586,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_answer_request - answer a bla request by sending own claims
+ * batadv_bla_answer_request() - answer a bla request by sending own claims
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: interface where the request came on
  * @vid: the vid where the request came on
@@ -636,7 +636,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_request - send a request to repeat claims
+ * batadv_bla_send_request() - send a request to repeat claims
  * @backbone_gw: the backbone gateway from whom we are out of sync
  *
  * When the crc is wrong, ask the backbone gateway for a full table update.
@@ -663,7 +663,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_announce - Send an announcement frame
+ * batadv_bla_send_announce() - Send an announcement frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: our backbone gateway which should be announced
  */
@@ -684,7 +684,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_add_claim - Adds a claim in the claim hash
+ * batadv_bla_add_claim() - Adds a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address of the claim
  * @vid: the VLAN ID of the frame
@@ -774,7 +774,7 @@ claim_free_ref:
 }
 
 /**
- * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
  *  claim
  * @claim: claim whose backbone_gw should be returned
  *
@@ -794,7 +794,7 @@ batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_bla_del_claim - delete a claim from the claim hash
+ * batadv_bla_del_claim() - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: mac address of the claim to be removed
  * @vid: VLAN id for the claim to be removed
@@ -822,7 +822,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_announce - check for ANNOUNCE frame
+ * batadv_handle_announce() - check for ANNOUNCE frame
  * @bat_priv: the bat priv with all the soft interface information
  * @an_addr: announcement mac address (ARP Sender HW address)
  * @backbone_addr: originator address of the sender (Ethernet source MAC)
@@ -880,7 +880,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
 }
 
 /**
- * batadv_handle_request - check for REQUEST frame
+ * batadv_handle_request() - check for REQUEST frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
@@ -913,7 +913,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_unclaim - check for UNCLAIM frame
+ * batadv_handle_unclaim() - check for UNCLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet source)
@@ -951,7 +951,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_claim - check for CLAIM frame
+ * batadv_handle_claim() - check for CLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet Source)
@@ -988,7 +988,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_check_claim_group - check for claim group membership
+ * batadv_check_claim_group() - check for claim group membership
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
@@ -1063,7 +1063,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_process_claim - Check if this is a claim frame, and process it
+ * batadv_bla_process_claim() - Check if this is a claim frame, and process it
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
@@ -1205,7 +1205,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
  *  immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @now: whether the whole hash shall be wiped now
@@ -1258,7 +1258,7 @@ purge_now:
 }
 
 /**
- * batadv_bla_purge_claims - Remove claims after a timeout or immediately
+ * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface, may be NULL if now is set
  * @now: whether the whole hash shall be wiped now
@@ -1316,7 +1316,7 @@ skip:
 }
 
 /**
- * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ * batadv_bla_update_orig_address() - Update the backbone gateways when the own
  *  originator address changes
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the new selected primary_if
@@ -1372,7 +1372,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_loopdetect - send a loopdetect frame
+ * batadv_bla_send_loopdetect() - send a loopdetect frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: the backbone gateway for which a loop should be detected
  *
@@ -1392,7 +1392,7 @@ batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_status_update - purge bla interfaces if necessary
+ * batadv_bla_status_update() - purge bla interfaces if necessary
  * @net_dev: the soft interface net device
  */
 void batadv_bla_status_update(struct net_device *net_dev)
@@ -1412,7 +1412,7 @@ void batadv_bla_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_bla_periodic_work - performs periodic bla work
+ * batadv_bla_periodic_work() - performs periodic bla work
  * @work: kernel work struct
  *
  * periodic work to do:
@@ -1517,7 +1517,7 @@ static struct lock_class_key batadv_claim_hash_lock_class_key;
 static struct lock_class_key batadv_backbone_hash_lock_class_key;
 
 /**
- * batadv_bla_init - initialize all bla structures
+ * batadv_bla_init() - initialize all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success, < 0 on error.
@@ -1579,7 +1579,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: contains the bcast_packet to be checked
  *
@@ -1652,7 +1652,7 @@ out:
 }
 
 /**
- * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
  *  the VLAN identified by vid.
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
@@ -1692,7 +1692,7 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
+ * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
  * @skb: the frame to be checked
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
@@ -1726,7 +1726,7 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_free - free all bla structures
+ * batadv_bla_free() - free all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * for softinterface free or module unload
@@ -1753,7 +1753,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_loopdetect_check - check and handle a detected loop
+ * batadv_bla_loopdetect_check() - check and handle a detected loop
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the packet to check
  * @primary_if: interface where the request came on
@@ -1802,7 +1802,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_rx - check packets coming from the mesh.
+ * batadv_bla_rx() - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -1914,7 +1914,7 @@ out:
 }
 
 /**
- * batadv_bla_tx - check packets going into the mesh
+ * batadv_bla_tx() - check packets going into the mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -2022,7 +2022,7 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2084,7 +2084,7 @@ out:
 #endif
 
 /**
- * batadv_bla_claim_dump_entry - dump one entry of the claim table
+ * batadv_bla_claim_dump_entry() - dump one entry of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2143,7 +2143,7 @@ out:
 }
 
 /**
- * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
+ * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2180,7 +2180,7 @@ unlock:
 }
 
 /**
- * batadv_bla_claim_dump - dump claim table to a netlink socket
+ * batadv_bla_claim_dump() - dump claim table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2247,8 +2247,8 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
- *  file
+ * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
+ *  seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2312,8 +2312,8 @@ out:
 #endif
 
 /**
- * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
+ *  netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2373,8 +2373,8 @@ out:
 }
 
 /**
- * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
+ *  a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2410,7 +2410,7 @@ unlock:
 }
 
 /**
- * batadv_bla_backbone_dump - dump backbone table to a netlink socket
+ * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2477,7 +2477,7 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_bla_check_claim - check if address is claimed
+ * batadv_bla_check_claim() - check if address is claimed
  *
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of which the claim status is checked
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 234775748b8e..b27571abcd2f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -30,8 +31,8 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
- *  frame sent by bridge loop avoidance
+ * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop
+ *  detect frame sent by bridge loop avoidance
  * @mac: mac address to check
  *
  * Return: true if the it looks like a loop detect frame
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index e32ad47c6efd..21d1189957a7 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -25,7 +26,6 @@
 #include <linux/fs.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
-#include <linux/sched.h> /* for linux/wait.h */
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/stddef.h>
@@ -66,8 +66,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
 }
 
 /**
- * batadv_originators_hardif_open - handles debugfs output for the
- *  originator table of an hard interface
+ * batadv_originators_hardif_open() - handles debugfs output for the originator
+ *  table of an hard interface
  * @inode: inode pointer to debugfs file
  * @file: pointer to the seq_file
  *
@@ -117,7 +117,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
+ * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -154,7 +154,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
- * batadv_mcast_flags_open - prepare file handler for reads from mcast_flags
+ * batadv_mcast_flags_open() - prepare file handler for reads from mcast_flags
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -259,6 +259,9 @@ static struct batadv_debuginfo *batadv_hardif_debuginfos[] = {
 	NULL,
 };
 
+/**
+ * batadv_debugfs_init() - Initialize soft interface independent debugfs entries
+ */
 void batadv_debugfs_init(void)
 {
 	struct batadv_debuginfo **bat_debug;
@@ -289,6 +292,9 @@ err:
 	batadv_debugfs = NULL;
 }
 
+/**
+ * batadv_debugfs_destroy() - Remove all debugfs entries
+ */
 void batadv_debugfs_destroy(void)
 {
 	debugfs_remove_recursive(batadv_debugfs);
@@ -296,7 +302,7 @@ void batadv_debugfs_destroy(void)
 }
 
 /**
- * batadv_debugfs_add_hardif - creates the base directory for a hard interface
+ * batadv_debugfs_add_hardif() - creates the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which should be added.
  *
@@ -338,7 +344,7 @@ out:
 }
 
 /**
- * batadv_debugfs_del_hardif - delete the base directory for a hard interface
+ * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which is deleted.
  */
@@ -355,6 +361,12 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
 	}
 }
 
+/**
+ * batadv_debugfs_add_meshif() - Initialize interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debugfs_add_meshif(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -401,6 +413,10 @@ out:
 	return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_debugfs_del_meshif(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 9c5d4a65b98c..90a08d35c501 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 760c0de72582..9703c791ffc5 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -55,7 +56,7 @@
 static void batadv_dat_purge(struct work_struct *work);
 
 /**
- * batadv_dat_start_timer - initialise the DAT periodic worker
+ * batadv_dat_start_timer() - initialise the DAT periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
@@ -66,7 +67,7 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_entry_release - release dat_entry from lists and queue for free
+ * batadv_dat_entry_release() - release dat_entry from lists and queue for free
  *  after rcu grace period
  * @ref: kref pointer of the dat_entry
  */
@@ -80,7 +81,7 @@ static void batadv_dat_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_dat_entry_put - decrement the dat_entry refcounter and possibly
+ * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
  *  release it
  * @dat_entry: dat_entry to be free'd
  */
@@ -90,7 +91,7 @@ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
+ * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
  * Return: true if the entry has to be purged now, false otherwise.
@@ -102,7 +103,7 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * __batadv_dat_purge - delete entries from the DAT local storage
+ * __batadv_dat_purge() - delete entries from the DAT local storage
  * @bat_priv: the bat priv with all the soft interface information
  * @to_purge: function in charge to decide whether an entry has to be purged or
  *	      not. This function takes the dat_entry as argument and has to
@@ -145,8 +146,8 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_purge - periodic task that deletes old entries from the local DAT
- * hash table
+ * batadv_dat_purge() - periodic task that deletes old entries from the local
+ *  DAT hash table
  * @work: kernel work struct
  */
 static void batadv_dat_purge(struct work_struct *work)
@@ -164,7 +165,7 @@ static void batadv_dat_purge(struct work_struct *work)
 }
 
 /**
- * batadv_compare_dat - comparing function used in the local DAT hash table
+ * batadv_compare_dat() - comparing function used in the local DAT hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -179,7 +180,7 @@ static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_arp_hw_src - extract the hw_src field from an ARP packet
+ * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -196,7 +197,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_src - extract the ip_src field from an ARP packet
+ * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -208,7 +209,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
+ * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -220,7 +221,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
+ * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -232,7 +233,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_hash_dat - compute the hash value for an IP address
+ * batadv_hash_dat() - compute the hash value for an IP address
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -267,7 +268,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
 }
 
 /**
- * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
+ * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
@@ -310,7 +311,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
 }
 
 /**
- * batadv_dat_entry_add - add a new dat entry or update it if already exists
+ * batadv_dat_entry_add() - add a new dat entry or update it if already exists
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
@@ -367,7 +368,8 @@ out:
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 
 /**
- * batadv_dbg_arp - print a debug message containing all the ARP packet details
+ * batadv_dbg_arp() - print a debug message containing all the ARP packet
+ *  details
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
@@ -448,7 +450,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
 #endif /* CONFIG_BATMAN_ADV_DEBUG */
 
 /**
- * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
+ * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
  * @res: the array with the already selected candidates
  * @select: number of already selected candidates
  * @tmp_max: address of the currently evaluated node
@@ -502,7 +504,7 @@ out:
 }
 
 /**
- * batadv_choose_next_candidate - select the next DHT candidate
+ * batadv_choose_next_candidate() - select the next DHT candidate
  * @bat_priv: the bat priv with all the soft interface information
  * @cands: candidates array
  * @select: number of candidates already present in the array
@@ -566,8 +568,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_select_candidates - select the nodes which the DHT message has to
- * be sent to
+ * batadv_dat_select_candidates() - select the nodes which the DHT message has
+ *  to be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
  * @vid: VLAN identifier
@@ -612,7 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
 }
 
 /**
- * batadv_dat_send_data - send a payload to the selected candidates
+ * batadv_dat_send_data() - send a payload to the selected candidates
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
@@ -688,7 +690,7 @@ out:
 }
 
 /**
- * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
  *  setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -710,7 +712,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_status_update - update the dat tvlv container after dat
+ * batadv_dat_status_update() - update the dat tvlv container after dat
  *  setting change
  * @net_dev: the soft interface net device
  */
@@ -722,7 +724,7 @@ void batadv_dat_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -741,7 +743,7 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_hash_free - free the local DAT hash table
+ * batadv_dat_hash_free() - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
@@ -757,7 +759,7 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_init - initialise the DAT internals
+ * batadv_dat_init() - initialise the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 in case of success, a negative error code otherwise
@@ -782,7 +784,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_free - free the DAT internals
+ * batadv_dat_free() - free the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
@@ -797,7 +799,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_dat_cache_seq_print_text - print the local DAT hash table
+ * batadv_dat_cache_seq_print_text() - print the local DAT hash table
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -850,7 +852,7 @@ out:
 #endif
 
 /**
- * batadv_arp_get_type - parse an ARP packet and gets the type
+ * batadv_arp_get_type() - parse an ARP packet and gets the type
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
@@ -924,7 +926,7 @@ out:
 }
 
 /**
- * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet to extract the VID from
  * @hdr_size: the size of the batman-adv header encapsulating the packet
  *
@@ -950,7 +952,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
 }
 
 /**
- * batadv_dat_arp_create_reply - create an ARP Reply
+ * batadv_dat_arp_create_reply() - create an ARP Reply
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_src: ARP sender IP
  * @ip_dst: ARP target IP
@@ -985,7 +987,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
  * answer using DAT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1083,7 +1085,7 @@ out:
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
  * answer using the local DAT storage
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1153,7 +1155,7 @@ out:
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
+ * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  */
@@ -1193,8 +1195,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
- * DAT storage only
+ * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
+ *  local DAT storage only
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
@@ -1282,8 +1284,8 @@ out:
 }
 
 /**
- * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
- * (because the node has already obtained the reply via DAT) or not
+ * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
+ *  dropped (because the node has already obtained the reply via DAT) or not
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index ec364a3c1c66..12897eb46268 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,9 +24,9 @@
 #include <linux/compiler.h>
 #include <linux/netdevice.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 
 struct seq_file;
 struct sk_buff;
@@ -48,7 +49,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
 				      struct batadv_forw_packet *forw_packet);
 
 /**
- * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
+ * batadv_dat_init_orig_node_addr() - assign a DAT address to the orig_node
  * @orig_node: the node to assign the DAT address to
  */
 static inline void
@@ -61,7 +62,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_dat_init_own_addr - assign a DAT address to the node itself
+ * batadv_dat_init_own_addr() - assign a DAT address to the node itself
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: a pointer to the primary interface
  */
@@ -82,7 +83,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv);
 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
 
 /**
- * batadv_dat_inc_counter - increment the correct DAT packet counter
+ * batadv_dat_inc_counter() - increment the correct DAT packet counter
  * @bat_priv: the bat priv with all the soft interface information
  * @subtype: the 4addr subtype of the packet to be counted
  *
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index ebe6e38934e4..22dde42fd80e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
@@ -32,16 +33,16 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
 
 /**
- * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
  * @dropped: whether the chain is cleared because all fragments are dropped
  *
@@ -65,7 +66,7 @@ static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
 }
 
 /**
- * batadv_frag_purge_orig - free fragments associated to an orig
+ * batadv_frag_purge_orig() - free fragments associated to an orig
  * @orig_node: originator to free fragments from
  * @check_cb: optional function to tell if an entry should be purged
  */
@@ -89,7 +90,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
  *
  * Return: the maximum size of payload that can be fragmented.
  */
@@ -104,7 +105,7 @@ static int batadv_frag_size_limit(void)
 }
 
 /**
- * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
  * @chain: chain in fragments table to init
  * @seqno: sequence number of the received fragment
  *
@@ -134,7 +135,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
 }
 
 /**
- * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * batadv_frag_insert_packet() - insert a fragment into a fragment chain
  * @orig_node: originator that the fragment was received from
  * @skb: skb to insert
  * @chain_out: list head to attach complete chains of fragments to
@@ -248,7 +249,7 @@ err:
 }
 
 /**
- * batadv_frag_merge_packets - merge a chain of fragments
+ * batadv_frag_merge_packets() - merge a chain of fragments
  * @chain: head of chain with fragments
  *
  * Expand the first skb in the chain and copy the content of the remaining
@@ -306,7 +307,7 @@ free:
 }
 
 /**
- * batadv_frag_skb_buffer - buffer fragment for later merge
+ * batadv_frag_skb_buffer() - buffer fragment for later merge
  * @skb: skb to buffer
  * @orig_node_src: originator that the skb is received from
  *
@@ -346,7 +347,7 @@ out_err:
 }
 
 /**
- * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
  * @skb: skb to forward
  * @recv_if: interface that the skb is received on
  * @orig_node_src: originator that the skb is received from
@@ -400,7 +401,7 @@ out:
 }
 
 /**
- * batadv_frag_create - create a fragment from skb
+ * batadv_frag_create() - create a fragment from skb
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
  * @fragment_size: size of new fragment
@@ -438,7 +439,7 @@ err:
 }
 
 /**
- * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
  * @skb: skb to create fragments from
  * @orig_node: final destination of the created fragments
  * @neigh_node: next-hop of the created fragments
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index 1a2d6c308745..138b22a1836a 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -39,7 +40,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
 			    struct batadv_neigh_node *neigh_node);
 
 /**
- * batadv_frag_check_entry - check if a list of fragments has timed out
+ * batadv_frag_check_entry() - check if a list of fragments has timed out
  * @frags_entry: table entry to check
  *
  * Return: true if the frags entry has timed out, false otherwise.
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 10d521f0b17f..37fe9a644f22 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -42,6 +43,7 @@
 #include <linux/stddef.h>
 #include <linux/udp.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "gateway_common.h"
@@ -49,7 +51,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "soft-interface.h"
 #include "sysfs.h"
@@ -68,8 +69,8 @@
 #define BATADV_DHCP_CHADDR_OFFSET	28
 
 /**
- * batadv_gw_node_release - release gw_node from lists and queue for free after
- *  rcu grace period
+ * batadv_gw_node_release() - release gw_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the gw_node
  */
 static void batadv_gw_node_release(struct kref *ref)
@@ -83,7 +84,8 @@ static void batadv_gw_node_release(struct kref *ref)
 }
 
 /**
- * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
+ * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release
+ *  it
  * @gw_node: gateway node to free
  */
 void batadv_gw_node_put(struct batadv_gw_node *gw_node)
@@ -91,6 +93,12 @@ void batadv_gw_node_put(struct batadv_gw_node *gw_node)
 	kref_put(&gw_node->refcount, batadv_gw_node_release);
 }
 
+/**
+ * batadv_gw_get_selected_gw_node() - Get currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_gw_node *
 batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
 {
@@ -109,6 +117,12 @@ out:
 	return gw_node;
 }
 
+/**
+ * batadv_gw_get_selected_orig() - Get originator of currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: orig_node of selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_orig_node *
 batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
 {
@@ -155,7 +169,7 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_reselect - force a gateway reselection
+ * batadv_gw_reselect() - force a gateway reselection
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Set a flag to remind the GW component to perform a new gateway reselection.
@@ -171,7 +185,7 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_check_client_stop - check if client mode has been switched off
+ * batadv_gw_check_client_stop() - check if client mode has been switched off
  * @bat_priv: the bat priv with all the soft interface information
  *
  * This function assumes the caller has checked that the gw state *is actually
@@ -202,6 +216,10 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
 	batadv_gw_node_put(curr_gw);
 }
 
+/**
+ * batadv_gw_election() - Elect the best gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_election(struct batadv_priv *bat_priv)
 {
 	struct batadv_gw_node *curr_gw = NULL;
@@ -290,6 +308,11 @@ out:
 		batadv_neigh_ifinfo_put(router_ifinfo);
 }
 
+/**
+ * batadv_gw_check_election() - Elect orig node as best gateway when eligible
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ */
 void batadv_gw_check_election(struct batadv_priv *bat_priv,
 			      struct batadv_orig_node *orig_node)
 {
@@ -321,7 +344,7 @@ out:
 }
 
 /**
- * batadv_gw_node_add - add gateway node to list of available gateways
+ * batadv_gw_node_add() - add gateway node to list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  * @gateway: announced bandwidth information
@@ -364,7 +387,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * batadv_gw_node_get() - retrieve gateway node from list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  *
@@ -393,7 +416,7 @@ struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_update - update list of available gateways with changed
+ * batadv_gw_node_update() - update list of available gateways with changed
  *  bandwidth information
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
@@ -458,6 +481,11 @@ out:
 		batadv_gw_node_put(gw_node);
 }
 
+/**
+ * batadv_gw_node_delete() - Remove orig_node from gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is currently in process of being removed
+ */
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node)
 {
@@ -469,6 +497,10 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 	batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
+/**
+ * batadv_gw_node_free() - Free gateway information from soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_node_free(struct batadv_priv *bat_priv)
 {
 	struct batadv_gw_node *gw_node;
@@ -484,6 +516,14 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_gw_client_seq_print_text() - Print the gateway table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -514,7 +554,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_gw_dump - Dump gateways into a message
+ * batadv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  *
@@ -567,7 +607,7 @@ out:
 }
 
 /**
- * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message
+ * batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
  * @skb: the packet to check
  * @header_len: a pointer to the batman-adv header size
  * @chaddr: buffer where the client address will be stored. Valid
@@ -686,7 +726,8 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
 }
 
 /**
- * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * batadv_gw_out_of_range() - check if the dhcp request destination is the best
+ *  gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the outgoing packet
  *
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 3baa3d466e5e..981f58421a32 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 2c26039c23fc..b3e156af2256 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -26,15 +27,15 @@
 #include <linux/netdevice.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "gateway_client.h"
 #include "log.h"
-#include "packet.h"
 #include "tvlv.h"
 
 /**
- * batadv_parse_throughput - parse supplied string buffer to extract throughput
- *  information
+ * batadv_parse_throughput() - parse supplied string buffer to extract
+ *  throughput information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @description: text shown when throughput string cannot be parsed
@@ -100,8 +101,8 @@ bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
- *  and upload bandwidth information
+ * batadv_parse_gw_bandwidth() - parse supplied string buffer to extract
+ *  download and upload bandwidth information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @down: pointer holding the returned download bandwidth information
@@ -136,8 +137,8 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
- *  setting change
+ * batadv_gw_tvlv_container_update() - update the gw tvlv container after
+ *  gateway setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -164,6 +165,15 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
 	}
 }
 
+/**
+ * batadv_gw_bandwidth_set() - Parse and set download/upload gateway bandwidth
+ *  from supplied string buffer
+ * @net_dev: netdev struct of the soft interface
+ * @buff: the buffer containing the user data
+ * @count: number of bytes in the buffer
+ *
+ * Return: 'count' on success or a negative error code in case of failure
+ */
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 				size_t count)
 {
@@ -207,7 +217,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -248,7 +258,7 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_init - initialise the gateway handling internals
+ * batadv_gw_init() - initialise the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_init(struct batadv_priv *bat_priv)
@@ -264,7 +274,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_free - free the gateway handling internals
+ * batadv_gw_free() - free the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 0a6a97d201f2..afebd9c7edf4 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -32,11 +33,12 @@ enum batadv_gw_modes {
 
 /**
  * enum batadv_bandwidth_units - bandwidth unit types
- * @BATADV_BW_UNIT_KBIT: unit type kbit
- * @BATADV_BW_UNIT_MBIT: unit type mbit
  */
 enum batadv_bandwidth_units {
+	/** @BATADV_BW_UNIT_KBIT: unit type kbit */
 	BATADV_BW_UNIT_KBIT,
+
+	/** @BATADV_BW_UNIT_MBIT: unit type mbit */
 	BATADV_BW_UNIT_MBIT,
 };
 
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4e3d5340ad96..5f186bff284a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -22,7 +23,7 @@
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -37,6 +38,7 @@
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_v.h"
 #include "bridge_loop_avoidance.h"
@@ -45,14 +47,13 @@
 #include "gateway_client.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
 /**
- * batadv_hardif_release - release hard interface from lists and queue for
+ * batadv_hardif_release() - release hard interface from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the hard interface
  */
@@ -66,6 +67,12 @@ void batadv_hardif_release(struct kref *ref)
 	kfree_rcu(hard_iface, rcu);
 }
 
+/**
+ * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
+ * @net_dev: net_device to search for
+ *
+ * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
+ */
 struct batadv_hard_iface *
 batadv_hardif_get_by_netdev(const struct net_device *net_dev)
 {
@@ -86,7 +93,7 @@ out:
 }
 
 /**
- * batadv_getlink_net - return link net namespace (of use fallback)
+ * batadv_getlink_net() - return link net namespace (of use fallback)
  * @netdev: net_device to check
  * @fallback_net: return in case get_link_net is not available for @netdev
  *
@@ -105,7 +112,7 @@ static struct net *batadv_getlink_net(const struct net_device *netdev,
 }
 
 /**
- * batadv_mutual_parents - check if two devices are each others parent
+ * batadv_mutual_parents() - check if two devices are each others parent
  * @dev1: 1st net dev
  * @net1: 1st devices netns
  * @dev2: 2nd net dev
@@ -138,7 +145,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
 }
 
 /**
- * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * batadv_is_on_batman_iface() - check if a device is a batman iface descendant
  * @net_dev: the device to check
  *
  * If the user creates any virtual device on top of a batman-adv interface, it
@@ -202,7 +209,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
 }
 
 /**
- * batadv_get_real_netdevice - check if the given netdev struct is a virtual
+ * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
  *  interface on top of another 'real' interface
  * @netdev: the device to check
  *
@@ -246,7 +253,7 @@ out:
 }
 
 /**
- * batadv_get_real_netdev - check if the given net_device struct is a virtual
+ * batadv_get_real_netdev() - check if the given net_device struct is a virtual
  *  interface on top of another 'real' interface
  * @net_device: the device to check
  *
@@ -265,7 +272,7 @@ struct net_device *batadv_get_real_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_wext_netdev - check if the given net_device struct is a
+ * batadv_is_wext_netdev() - check if the given net_device struct is a
  *  wext wifi interface
  * @net_device: the device to check
  *
@@ -289,7 +296,7 @@ static bool batadv_is_wext_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_cfg80211_netdev - check if the given net_device struct is a
+ * batadv_is_cfg80211_netdev() - check if the given net_device struct is a
  *  cfg80211 wifi interface
  * @net_device: the device to check
  *
@@ -309,7 +316,7 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_wifi_flags_evaluate - calculate wifi flags for net_device
+ * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
  * @net_device: the device to check
  *
  * Return: batadv_hard_iface_wifi_flags flags of the device
@@ -344,7 +351,7 @@ out:
 }
 
 /**
- * batadv_is_cfg80211_hardif - check if the given hardif is a cfg80211 wifi
+ * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
  *  interface
  * @hard_iface: the device to check
  *
@@ -362,7 +369,7 @@ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_is_wifi_hardif - check if the given hardif is a wifi interface
+ * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
  * @hard_iface: the device to check
  *
  * Return: true if the net device is a 802.11 wireless device, false otherwise.
@@ -376,7 +383,7 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_hardif_no_broadcast - check whether (re)broadcast is necessary
+ * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
  * @if_outgoing: the outgoing interface checked and considered for (re)broadcast
  * @orig_addr: the originator of this packet
  * @orig_neigh: originator address of the forwarder we just got the packet from
@@ -560,6 +567,13 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
 	soft_iface->needed_tailroom = lower_tailroom;
 }
 
+/**
+ * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: MTU for the soft-interface (limited by the minimal MTU of all active
+ *  slave interfaces)
+ */
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -606,7 +620,11 @@ out:
 	return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
 }
 
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
+/**
+ * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
+ *  MTU appeared
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
 	soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
@@ -667,7 +685,7 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_master_del_slave - remove hard_iface from the current master interface
+ * batadv_master_del_slave() - remove hard_iface from the current master iface
  * @slave: the interface enslaved in another master
  * @master: the master from which slave has to be removed
  *
@@ -691,6 +709,14 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave,
 	return ret;
 }
 
+/**
+ * batadv_hardif_enable_interface() - Enslave hard interface to soft interface
+ * @hard_iface: hard interface to add to soft interface
+ * @net: the applicable net namespace
+ * @iface_name: name of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
 				   struct net *net, const char *iface_name)
 {
@@ -802,6 +828,12 @@ err:
 	return ret;
 }
 
+/**
+ * batadv_hardif_disable_interface() - Remove hard interface from soft interface
+ * @hard_iface: hard interface to be removed
+ * @autodel: whether to delete soft interface when it doesn't contain any other
+ *  slave interfaces
+ */
 void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
 				     enum batadv_hard_if_cleanup autodel)
 {
@@ -936,6 +968,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
 	batadv_hardif_put(hard_iface);
 }
 
+/**
+ * batadv_hardif_remove_interfaces() - Remove all hard interfaces
+ */
 void batadv_hardif_remove_interfaces(void)
 {
 	struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 9f9890ff7a22..de5e9a374ece 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -30,36 +31,74 @@
 struct net_device;
 struct net;
 
+/**
+ * enum batadv_hard_if_state - State of a hard interface
+ */
 enum batadv_hard_if_state {
+	/**
+	 * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a
+	 * batman-adv soft interface
+	 */
 	BATADV_IF_NOT_IN_USE,
+
+	/**
+	 * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft
+	 * interface
+	 */
 	BATADV_IF_TO_BE_REMOVED,
+
+	/** @BATADV_IF_INACTIVE: interface is deactivated */
 	BATADV_IF_INACTIVE,
+
+	/** @BATADV_IF_ACTIVE: interface is used */
 	BATADV_IF_ACTIVE,
+
+	/** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
 	BATADV_IF_TO_BE_ACTIVATED,
+
+	/**
+	 * @BATADV_IF_I_WANT_YOU: interface is queued up (using sysfs) for being
+	 * added as slave interface of a batman-adv soft interface
+	 */
 	BATADV_IF_I_WANT_YOU,
 };
 
 /**
  * enum batadv_hard_if_bcast - broadcast avoidance options
- * @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface
- * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no recipient
- * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it from
- * @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator
  */
 enum batadv_hard_if_bcast {
+	/** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */
 	BATADV_HARDIF_BCAST_OK = 0,
+
+	/**
+	 * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no
+	 *  recipient
+	 */
 	BATADV_HARDIF_BCAST_NORECIPIENT,
+
+	/**
+	 * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it
+	 *  from
+	 */
 	BATADV_HARDIF_BCAST_DUPFWD,
+
+	/** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */
 	BATADV_HARDIF_BCAST_DUPORIG,
 };
 
 /**
  * enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
- * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
- * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was removed
  */
 enum batadv_hard_if_cleanup {
+	/**
+	 * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
+	 */
 	BATADV_IF_CLEANUP_KEEP,
+
+	/**
+	 * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was
+	 *  removed
+	 */
 	BATADV_IF_CLEANUP_AUTO,
 };
 
@@ -82,7 +121,7 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
 			       u8 *orig_addr, u8 *orig_neigh);
 
 /**
- * batadv_hardif_put - decrement the hard interface refcounter and possibly
+ * batadv_hardif_put() - decrement the hard interface refcounter and possibly
  *  release it
  * @hard_iface: the hard interface to free
  */
@@ -91,6 +130,12 @@ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface)
 	kref_put(&hard_iface->refcount, batadv_hardif_release);
 }
 
+/**
+ * batadv_primary_if_get_selected() - Get reference to primary interface
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: primary interface (with increased refcnt), otherwise NULL
+ */
 static inline struct batadv_hard_iface *
 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
 {
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index b5f7e13918ac..04d964358c98 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -18,7 +19,7 @@
 #include "hash.h"
 #include "main.h"
 
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/lockdep.h>
 #include <linux/slab.h>
 
@@ -33,7 +34,10 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
 	}
 }
 
-/* free only the hashtable and the hash itself. */
+/**
+ * batadv_hash_destroy() - Free only the hashtable and the hash itself
+ * @hash: hash object to destroy
+ */
 void batadv_hash_destroy(struct batadv_hashtable *hash)
 {
 	kfree(hash->list_locks);
@@ -41,7 +45,12 @@ void batadv_hash_destroy(struct batadv_hashtable *hash)
 	kfree(hash);
 }
 
-/* allocates and clears the hash */
+/**
+ * batadv_hash_new() - Allocates and clears the hashtable
+ * @size: number of hash buckets to allocate
+ *
+ * Return: newly allocated hashtable, NULL on errors
+ */
 struct batadv_hashtable *batadv_hash_new(u32 size)
 {
 	struct batadv_hashtable *hash;
@@ -70,6 +79,11 @@ free_hash:
 	return NULL;
 }
 
+/**
+ * batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
+ * @hash: hash object to modify
+ * @key: lockdep class key address
+ */
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 				struct lock_class_key *key)
 {
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 0c905e91c5e2..4ce1b6d3ad5c 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -45,10 +46,18 @@ typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
 typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
 
+/**
+ * struct batadv_hashtable - Wrapper of simple hlist based hashtable
+ */
 struct batadv_hashtable {
-	struct hlist_head *table;   /* the hashtable itself with the buckets */
-	spinlock_t *list_locks;     /* spinlock for each hash list entry */
-	u32 size;		    /* size of hashtable */
+	/** @table: the hashtable itself with the buckets */
+	struct hlist_head *table;
+
+	/** @list_locks: spinlock for each hash list entry */
+	spinlock_t *list_locks;
+
+	/** @size: size of hashtable */
+	u32 size;
 };
 
 /* allocates and clears the hash */
@@ -62,7 +71,7 @@ void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 void batadv_hash_destroy(struct batadv_hashtable *hash);
 
 /**
- *	batadv_hash_add - adds data to the hashtable
+ *	batadv_hash_add() - adds data to the hashtable
  *	@hash: storage hash table
  *	@compare: callback to determine if 2 hash elements are identical
  *	@choose: callback calculating the hash index
@@ -112,8 +121,15 @@ out:
 	return ret;
 }
 
-/* removes data from hash, if found. data could be the structure you use with
- * just the key filled, we just need the key for comparing.
+/**
+ * batadv_hash_remove() - Removes data from hash, if found
+ * @hash: hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ *
+ * ata could be the structure you use with  just the key filled, we just need
+ * the key for comparing.
  *
  * Return: returns pointer do data on success, so you can remove the used
  * structure yourself, or NULL on error
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index a98e0a986cef..581375d0eed2 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -26,6 +27,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -42,11 +44,11 @@
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/wait.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 static struct batadv_socket_client *batadv_socket_client_hash[256];
@@ -55,6 +57,9 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
 				     struct batadv_icmp_header *icmph,
 				     size_t icmp_len);
 
+/**
+ * batadv_socket_init() - Initialize soft interface independent socket data
+ */
 void batadv_socket_init(void)
 {
 	memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
@@ -314,6 +319,12 @@ static const struct file_operations batadv_fops = {
 	.llseek = no_llseek,
 };
 
+/**
+ * batadv_socket_setup() - Create debugfs "socket" file
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_socket_setup(struct batadv_priv *bat_priv)
 {
 	struct dentry *d;
@@ -333,7 +344,7 @@ err:
 }
 
 /**
- * batadv_socket_add_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet() - schedule an icmp packet to be sent to
  *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
@@ -390,7 +401,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be received
+ * batadv_socket_receive_packet() - schedule an icmp packet to be received
  *  locally and sent to userspace.
  * @icmph: pointer to the header of the icmp packet
  * @icmp_len: total length of the icmp packet
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index f3fec40aae86..84cddd01eeab 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 76451460c98d..9be74a44e99d 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -24,6 +25,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -86,6 +88,13 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
 	return 0;
 }
 
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 {
 	va_list args;
@@ -197,6 +206,12 @@ static const struct file_operations batadv_log_fops = {
 	.llseek         = no_llseek,
 };
 
+/**
+ * batadv_debug_log_setup() - Initialize debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 {
 	struct dentry *d;
@@ -222,6 +237,10 @@ err:
 	return -ENOMEM;
 }
 
+/**
+ * batadv_debug_log_cleanup() - Destroy debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 {
 	kfree(bat_priv->debug_log);
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index 65ce97efa6b5..35e02b2b9e72 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -44,25 +45,33 @@ static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 
 /**
  * enum batadv_dbg_level - available log levels
- * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
- * @BATADV_DBG_ROUTES: route added / changed / deleted
- * @BATADV_DBG_TT: translation table messages
- * @BATADV_DBG_BLA: bridge loop avoidance messages
- * @BATADV_DBG_DAT: ARP snooping and DAT related messages
- * @BATADV_DBG_NC: network coding related messages
- * @BATADV_DBG_MCAST: multicast related messages
- * @BATADV_DBG_TP_METER: throughput meter messages
- * @BATADV_DBG_ALL: the union of all the above log levels
  */
 enum batadv_dbg_level {
+	/** @BATADV_DBG_BATMAN: OGM and TQ computations related messages */
 	BATADV_DBG_BATMAN	= BIT(0),
+
+	/** @BATADV_DBG_ROUTES: route added / changed / deleted */
 	BATADV_DBG_ROUTES	= BIT(1),
+
+	/** @BATADV_DBG_TT: translation table messages */
 	BATADV_DBG_TT		= BIT(2),
+
+	/** @BATADV_DBG_BLA: bridge loop avoidance messages */
 	BATADV_DBG_BLA		= BIT(3),
+
+	/** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
 	BATADV_DBG_DAT		= BIT(4),
+
+	/** @BATADV_DBG_NC: network coding related messages */
 	BATADV_DBG_NC		= BIT(5),
+
+	/** @BATADV_DBG_MCAST: multicast related messages */
 	BATADV_DBG_MCAST	= BIT(6),
+
+	/** @BATADV_DBG_TP_METER: throughput meter messages */
 	BATADV_DBG_TP_METER	= BIT(7),
+
+	/** @BATADV_DBG_ALL: the union of all the above log levels */
 	BATADV_DBG_ALL		= 255,
 };
 
@@ -70,7 +79,14 @@ enum batadv_dbg_level {
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
 
-/* possibly ratelimited debug output */
+/**
+ * _batadv_dbg() - Store debug output with(out) ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ratelimited: whether output should be rate limited
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...)		\
 	do {								\
 		struct batadv_priv *__batpriv = (bat_priv);		\
@@ -89,11 +105,30 @@ static inline void _batadv_dbg(int type __always_unused,
 }
 #endif
 
+/**
+ * batadv_dbg() - Store debug output without ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg(type, bat_priv, arg...) \
 	_batadv_dbg(type, bat_priv, 0, ## arg)
+
+/**
+ * batadv_dbg_ratelimited() - Store debug output with ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg_ratelimited(type, bat_priv, arg...) \
 	_batadv_dbg(type, bat_priv, 1, ## arg)
 
+/**
+ * batadv_info() - Store message in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_info(net_dev, fmt, arg...)				\
 	do {								\
 		struct net_device *_netdev = (net_dev);                 \
@@ -101,6 +136,13 @@ static inline void _batadv_dbg(int type __always_unused,
 		batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg);	\
 		pr_info("%s: " fmt, _netdev->name, ## arg);		\
 	} while (0)
+
+/**
+ * batadv_err() - Store error in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_err(net_dev, fmt, arg...)				\
 	do {								\
 		struct net_device *_netdev = (net_dev);                 \
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 4daed7ad46f2..d31c8266e244 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -18,12 +19,12 @@
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
@@ -45,6 +46,7 @@
 #include <linux/workqueue.h>
 #include <net/dsfield.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
@@ -139,6 +140,12 @@ static void __exit batadv_exit(void)
 	batadv_tt_cache_destroy();
 }
 
+/**
+ * batadv_mesh_init() - Initialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_mesh_init(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -216,6 +223,10 @@ err:
 	return ret;
 }
 
+/**
+ * batadv_mesh_free() - Deinitialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_mesh_free(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -255,8 +266,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
 }
 
 /**
- * batadv_is_my_mac - check if the given mac address belongs to any of the real
- * interfaces in the current mesh
+ * batadv_is_my_mac() - check if the given mac address belongs to any of the
+ *  real interfaces in the current mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
  *
@@ -286,7 +297,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_seq_print_text_primary_if_get - called from debugfs table printing
+ * batadv_seq_print_text_primary_if_get() - called from debugfs table printing
  *  function that requires the primary interface
  * @seq: debugfs table seq_file struct
  *
@@ -323,7 +334,7 @@ out:
 #endif
 
 /**
- * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ * batadv_max_header_len() - calculate maximum encapsulation overhead for a
  *  payload packet
  *
  * Return: the maximum encapsulation overhead in bytes.
@@ -348,7 +359,7 @@ int batadv_max_header_len(void)
 }
 
 /**
- * batadv_skb_set_priority - sets skb priority according to packet content
+ * batadv_skb_set_priority() - sets skb priority according to packet content
  * @skb: the packet to be sent
  * @offset: offset to the packet content
  *
@@ -412,6 +423,16 @@ static int batadv_recv_unhandled_packet(struct sk_buff *skb,
 /* incoming packets with the batman ethertype received on any active hard
  * interface
  */
+
+/**
+ * batadv_batman_skb_recv() - Handle incoming message from an hard interface
+ * @skb: the received packet
+ * @dev: the net device that the packet was received on
+ * @ptype: packet type of incoming packet (ETH_P_BATMAN)
+ * @orig_dev: the original receive net device (e.g. bonded device)
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 			   struct packet_type *ptype,
 			   struct net_device *orig_dev)
@@ -535,6 +556,13 @@ static void batadv_recv_handler_init(void)
 	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
+/**
+ * batadv_recv_handler_register() - Register handler for batman-adv packet type
+ * @packet_type: batadv_packettype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int
 batadv_recv_handler_register(u8 packet_type,
 			     int (*recv_handler)(struct sk_buff *,
@@ -552,13 +580,17 @@ batadv_recv_handler_register(u8 packet_type,
 	return 0;
 }
 
+/**
+ * batadv_recv_handler_unregister() - Unregister handler for packet type
+ * @packet_type: batadv_packettype which should no longer be handled
+ */
 void batadv_recv_handler_unregister(u8 packet_type)
 {
 	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
 }
 
 /**
- * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
  *  the header
  * @skb: skb pointing to fragmented socket buffers
  * @payload_ptr: Pointer to position inside the head buffer of the skb
@@ -591,7 +623,7 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
 }
 
 /**
- * batadv_get_vid - extract the VLAN identifier from skb if any
+ * batadv_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet
  * @header_len: length of the batman header preceding the ethernet header
  *
@@ -618,7 +650,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
 }
 
 /**
- * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
+ * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index edb2f239d04d..f7ba3f96d8f3 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -24,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.4"
+#define BATADV_SOURCE_VERSION "2018.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -140,24 +141,56 @@
  */
 #define BATADV_TP_MAX_NUM 5
 
+/**
+ * enum batadv_mesh_state - State of a soft interface
+ */
 enum batadv_mesh_state {
+	/** @BATADV_MESH_INACTIVE: soft interface is not yet running */
 	BATADV_MESH_INACTIVE,
+
+	/** @BATADV_MESH_ACTIVE: interface is up and running */
 	BATADV_MESH_ACTIVE,
+
+	/** @BATADV_MESH_DEACTIVATING: interface is getting shut down */
 	BATADV_MESH_DEACTIVATING,
 };
 
 #define BATADV_BCAST_QUEUE_LEN		256
 #define BATADV_BATMAN_QUEUE_LEN	256
 
+/**
+ * enum batadv_uev_action - action type of uevent
+ */
 enum batadv_uev_action {
+	/** @BATADV_UEV_ADD: gateway was selected (after none was selected) */
 	BATADV_UEV_ADD = 0,
+
+	/**
+	 * @BATADV_UEV_DEL: selected gateway was removed and none is selected
+	 * anymore
+	 */
 	BATADV_UEV_DEL,
+
+	/**
+	 * @BATADV_UEV_CHANGE: a different gateway was selected as based gateway
+	 */
 	BATADV_UEV_CHANGE,
+
+	/**
+	 * @BATADV_UEV_LOOPDETECT: loop was detected which cannot be handled by
+	 * bridge loop avoidance
+	 */
 	BATADV_UEV_LOOPDETECT,
 };
 
+/**
+ * enum batadv_uev_type - Type of uevent
+ */
 enum batadv_uev_type {
+	/** @BATADV_UEV_GW: selected gateway was modified */
 	BATADV_UEV_GW = 0,
+
+	/** @BATADV_UEV_BLA: bridge loop avoidance event */
 	BATADV_UEV_BLA,
 };
 
@@ -184,16 +217,14 @@ enum batadv_uev_type {
 
 /* Kernel headers */
 
-#include <linux/bitops.h> /* for packet.h */
 #include <linux/compiler.h>
 #include <linux/etherdevice.h>
-#include <linux/if_ether.h> /* for packet.h */
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
 #include <linux/percpu.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
-#include "packet.h"
 #include "types.h"
 
 struct net_device;
@@ -202,7 +233,7 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_print_vid - return printable version of vid information
+ * batadv_print_vid() - return printable version of vid information
  * @vid: the VLAN identifier
  *
  * Return: -1 when no VLAN is used, VLAN id otherwise
@@ -238,7 +269,7 @@ void batadv_recv_handler_unregister(u8 packet_type);
 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
 
 /**
- * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
  * @data1: Pointer to a six-byte array containing the Ethernet address
  * @data2: Pointer other six-byte array containing the Ethernet address
  *
@@ -252,7 +283,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
 }
 
 /**
- * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ * batadv_has_timed_out() - compares current time (jiffies) and timestamp +
  *  timeout
  * @timestamp:		base value to compare with (in jiffies)
  * @timeout:		added to base value before comparing (in milliseconds)
@@ -265,40 +296,96 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
 	return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
 }
 
+/**
+ * batadv_atomic_dec_not_zero() - Decrease unless the number is 0
+ * @v: pointer of type atomic_t
+ *
+ * Return: non-zero if v was not 0, and zero otherwise.
+ */
 #define batadv_atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
 
-/* Returns the smallest signed integer in two's complement with the sizeof x */
+/**
+ * batadv_smallest_signed_int() - Returns the smallest signed integer in two's
+ *  complement with the sizeof x
+ * @x: type of integer
+ *
+ * Return: smallest signed integer of type
+ */
 #define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
 
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
+/**
+ * batadv_seq_before() - Checks if a sequence number x is a predecessor of y
+ * @x: potential predecessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a predecessor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
  * This means that for a u8 with the maximum value 255, it would think:
- *  - when adding nothing - it is neither a predecessor nor a successor
- *  - before adding more than 127 to the starting value - it is a predecessor,
- *  - when adding 128 - it is neither a predecessor nor a successor,
- *  - after adding more than 127 to the starting value - it is a successor
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a predecessor of y, false otherwise
  */
 #define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
 				 typeof(y)_d2 = (y); \
 				 typeof(x)_dummy = (_d1 - _d2); \
 				 (void)(&_d1 == &_d2); \
 				 _dummy > batadv_smallest_signed_int(_dummy); })
+
+/**
+ * batadv_seq_after() - Checks if a sequence number x is a successor of y
+ * @x: potential sucessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a successor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a successor of y, false otherwise
+ */
 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
 
-/* Stop preemption on local cpu while incrementing the counter */
+/**
+ * batadv_add_counter() - Add to per cpu statistics counter of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: counter index which should be modified
+ * @count: value to increase counter by
+ *
+ * Stop preemption on local cpu while incrementing the counter
+ */
 static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
 				      size_t count)
 {
 	this_cpu_add(bat_priv->bat_counters[idx], count);
 }
 
+/**
+ * batadv_inc_counter() - Increase per cpu statistics counter of soft interface
+ * @b: the bat priv with all the soft interface information
+ * @i: counter index which should be modified
+ */
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
-/* Define a macro to reach the control buffer of the skb. The members of the
- * control buffer are defined in struct batadv_skb_cb in types.h.
- * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+/**
+ * BATADV_SKB_CB() - Get batadv_skb_cb from skb control buffer
+ * @__skb: skb holding the control buffer
+ *
+ * The members of the control buffer are defined in struct batadv_skb_cb in
+ * types.h. The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+ *
+ * Return: pointer to the batadv_skb_cb of the skb
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index e553a8770a89..cbdeb47ec3f6 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -24,7 +25,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/icmpv6.h>
 #include <linux/if_bridge.h>
 #include <linux/if_ether.h>
@@ -54,18 +55,18 @@
 #include <net/if_inet6.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
-#include "packet.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 static void batadv_mcast_mla_update(struct work_struct *work);
 
 /**
- * batadv_mcast_start_timer - schedule the multicast periodic worker
+ * batadv_mcast_start_timer() - schedule the multicast periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
@@ -75,7 +76,7 @@ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
+ * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
  * @soft_iface: netdev struct of the mesh interface
  *
  * If the given soft interface has a bridge on top then the refcount
@@ -101,7 +102,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
 }
 
 /**
- * batadv_mcast_mla_softif_get - get softif multicast listeners
+ * batadv_mcast_mla_softif_get() - get softif multicast listeners
  * @dev: the device to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -147,7 +148,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
 }
 
 /**
- * batadv_mcast_mla_is_duplicate - check whether an address is in a list
+ * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
  * @mcast_addr: the multicast address to check
  * @mcast_list: the list with multicast addresses to search in
  *
@@ -167,7 +168,7 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
 }
 
 /**
- * batadv_mcast_mla_br_addr_cpy - copy a bridge multicast address
+ * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
  * @dst: destination to write to - a multicast MAC address
  * @src: source to read from - a multicast IP address
  *
@@ -191,7 +192,7 @@ static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
 }
 
 /**
- * batadv_mcast_mla_bridge_get - get bridged-in multicast listeners
+ * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
  * @dev: a bridge slave whose bridge to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -244,7 +245,7 @@ out:
 }
 
 /**
- * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * batadv_mcast_mla_list_free() - free a list of multicast addresses
  * @mcast_list: the list to free
  *
  * Removes and frees all items in the given mcast_list.
@@ -261,7 +262,7 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
 }
 
 /**
- * batadv_mcast_mla_tt_retract - clean up multicast listener announcements
+ * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which should _not_ be removed
  *
@@ -297,7 +298,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_mla_tt_add - add multicast listener announcements
+ * batadv_mcast_mla_tt_add() - add multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which are going to get added
  *
@@ -333,7 +334,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_has_bridge - check whether the soft-iface is bridged
+ * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Checks whether there is a bridge on top of our soft interface.
@@ -354,7 +355,8 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_querier_log - debug output regarding the querier status on link
+ * batadv_mcast_querier_log() - debug output regarding the querier status on
+ *  link
  * @bat_priv: the bat priv with all the soft interface information
  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
  * @old_state: the previous querier state on our link
@@ -405,7 +407,8 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
 }
 
 /**
- * batadv_mcast_bridge_log - debug output for topology changes in bridged setups
+ * batadv_mcast_bridge_log() - debug output for topology changes in bridged
+ *  setups
  * @bat_priv: the bat priv with all the soft interface information
  * @bridged: a flag about whether the soft interface is currently bridged or not
  * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
@@ -444,7 +447,7 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
 }
 
 /**
- * batadv_mcast_flags_logs - output debug information about mcast flag changes
+ * batadv_mcast_flags_logs() - output debug information about mcast flag changes
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: flags indicating the new multicast state
  *
@@ -470,7 +473,7 @@ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
 }
 
 /**
- * batadv_mcast_mla_tvlv_update - update multicast tvlv
+ * batadv_mcast_mla_tvlv_update() - update multicast tvlv
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast tvlv with our current multicast related settings,
@@ -545,7 +548,7 @@ update:
 }
 
 /**
- * __batadv_mcast_mla_update - update the own MLAs
+ * __batadv_mcast_mla_update() - update the own MLAs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast listener announcements in the translation
@@ -582,7 +585,7 @@ out:
 }
 
 /**
- * batadv_mcast_mla_update - update the own MLAs
+ * batadv_mcast_mla_update() - update the own MLAs
  * @work: kernel work struct
  *
  * Updates the own multicast listener announcements in the translation
@@ -605,7 +608,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
 }
 
 /**
- * batadv_mcast_is_report_ipv4 - check for IGMP reports
+ * batadv_mcast_is_report_ipv4() - check for IGMP reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -630,7 +633,8 @@ static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv4 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -671,7 +675,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_is_report_ipv6 - check for MLD reports
+ * batadv_mcast_is_report_ipv6() - check for MLD reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -695,7 +699,8 @@ static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv6 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -736,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_mode_check - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the multicast frame to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -774,7 +779,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
  *  interest
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
@@ -798,7 +803,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_tt_node_get - get a multicast tt node
+ * batadv_mcast_forw_tt_node_get() - get a multicast tt node
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the ether header containing the multicast destination
  *
@@ -814,7 +819,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -841,7 +846,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -868,7 +873,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
@@ -892,7 +897,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
@@ -919,7 +924,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_mode - check on how to forward a multicast packet
+ * batadv_mcast_forw_mode() - check on how to forward a multicast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
@@ -973,7 +978,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_mcast_want_unsnoop_update - update unsnoop counter and list
+ * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1018,7 +1023,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv4_update - update want-all-ipv4 counter and list
+ * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1063,7 +1068,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv6_update - update want-all-ipv6 counter and list
+ * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1108,7 +1113,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_tvlv_ogm_handler - process incoming multicast tvlv container
+ * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -1164,7 +1169,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_init - initialize the multicast optimizations structures
+ * batadv_mcast_init() - initialize the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_init(struct batadv_priv *bat_priv)
@@ -1179,7 +1184,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_mcast_flags_print_header - print own mcast flags to debugfs table
+ * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  *
@@ -1220,7 +1225,7 @@ static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_flags_seq_print_text - print the mcast flags of other nodes
+ * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1281,7 +1286,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_mcast_free - free the multicast optimizations structures
+ * batadv_mcast_free() - free the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_free(struct batadv_priv *bat_priv)
@@ -1296,7 +1301,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_purge_orig - reset originator global mcast state modifications
+ * batadv_mcast_purge_orig() - reset originator global mcast state modifications
  * @orig: the originator which is going to get purged
  */
 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 2a78cddab0e9..3ac06337ab71 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -25,15 +26,21 @@ struct sk_buff;
 
 /**
  * enum batadv_forw_mode - the way a packet should be forwarded as
- * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
- *  flooding)
- * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
- *  BATMAN unicast routing protocol)
- * @BATADV_FORW_NONE: don't forward, drop it
  */
 enum batadv_forw_mode {
+	/**
+	 * @BATADV_FORW_ALL: forward the packet to all nodes (currently via
+	 *  classic flooding)
+	 */
 	BATADV_FORW_ALL,
+
+	/**
+	 * @BATADV_FORW_SINGLE: forward the packet to a single node (currently
+	 *  via the BATMAN unicast routing protocol)
+	 */
 	BATADV_FORW_SINGLE,
+
+	/** @BATADV_FORW_NONE: don't forward, drop it */
 	BATADV_FORW_NONE,
 };
 
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index ab13b4d58733..a823d3899bad 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
@@ -23,8 +24,8 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -39,6 +40,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -46,7 +48,6 @@
 #include "gateway_client.h"
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
@@ -99,7 +100,7 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
 };
 
 /**
- * batadv_netlink_get_ifindex - Extract an interface index from a message
+ * batadv_netlink_get_ifindex() - Extract an interface index from a message
  * @nlh: Message header
  * @attrtype: Attribute which holds an interface index
  *
@@ -114,7 +115,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 }
 
 /**
- * batadv_netlink_mesh_info_put - fill in generic information about mesh
+ * batadv_netlink_mesh_info_put() - fill in generic information about mesh
  *  interface
  * @msg: netlink message to be sent back
  * @soft_iface: interface for which the data should be taken
@@ -169,7 +170,7 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
 }
 
 /**
- * batadv_netlink_get_mesh_info - handle incoming BATADV_CMD_GET_MESH_INFO
+ * batadv_netlink_get_mesh_info() - handle incoming BATADV_CMD_GET_MESH_INFO
  *  netlink request
  * @skb: received netlink message
  * @info: receiver information
@@ -230,7 +231,7 @@ batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_put - Fill information of started tp_meter session
+ * batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
  * @msg: netlink message to be sent back
  * @cookie: tp meter session cookie
  *
@@ -246,7 +247,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
 }
 
 /**
- * batadv_netlink_tpmeter_notify - send tp_meter result via netlink to client
+ * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: destination of tp_meter session
  * @result: reason for tp meter session stop
@@ -309,7 +310,7 @@ err_genlmsg:
 }
 
 /**
- * batadv_netlink_tp_meter_start - Start a new tp_meter session
+ * batadv_netlink_tp_meter_start() - Start a new tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -386,7 +387,7 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_start - Cancel a running tp_meter session
+ * batadv_netlink_tp_meter_start() - Cancel a running tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -431,7 +432,7 @@ out:
 }
 
 /**
- * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message
+ * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -473,7 +474,7 @@ batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_netlink_dump_hardifs - Dump all hard interface into a messages
+ * batadv_netlink_dump_hardifs() - Dump all hard interface into a messages
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -620,7 +621,7 @@ struct genl_family batadv_netlink_family __ro_after_init = {
 };
 
 /**
- * batadv_netlink_register - register batadv genl netlink family
+ * batadv_netlink_register() - register batadv genl netlink family
  */
 void __init batadv_netlink_register(void)
 {
@@ -632,7 +633,7 @@ void __init batadv_netlink_register(void)
 }
 
 /**
- * batadv_netlink_unregister - unregister batadv genl netlink family
+ * batadv_netlink_unregister() - unregister batadv genl netlink family
  */
 void batadv_netlink_unregister(void)
 {
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index f1cd8c5da966..0e7e57b69b54 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 3604d7899e2c..b48116bb24ef 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
@@ -25,7 +26,7 @@
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_packet.h>
 #include <linux/init.h>
@@ -35,6 +36,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/random.h>
@@ -47,12 +49,12 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "tvlv.h"
@@ -65,7 +67,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
 
 /**
- * batadv_nc_init - one-time initialization for network coding
+ * batadv_nc_init() - one-time initialization for network coding
  *
  * Return: 0 on success or negative error number in case of failure
  */
@@ -81,7 +83,7 @@ int __init batadv_nc_init(void)
 }
 
 /**
- * batadv_nc_start_timer - initialise the nc periodic worker
+ * batadv_nc_start_timer() - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
@@ -91,7 +93,7 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ * batadv_nc_tvlv_container_update() - update the network coding tvlv container
  *  after network coding setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -113,7 +115,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_status_update - update the network coding tvlv container after
+ * batadv_nc_status_update() - update the network coding tvlv container after
  *  network coding setting change
  * @net_dev: the soft interface net device
  */
@@ -125,7 +127,7 @@ void batadv_nc_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -144,7 +146,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init() - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
@@ -185,7 +187,7 @@ err:
 }
 
 /**
- * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
+ * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
@@ -197,7 +199,7 @@ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init_orig - initialise the nc fields of an orig_node
+ * batadv_nc_init_orig() - initialise the nc fields of an orig_node
  * @orig_node: the orig_node which is going to be initialised
  */
 void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
@@ -209,8 +211,8 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_nc_node_release - release nc_node from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_node_release() - release nc_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_nc_node_release(struct kref *ref)
@@ -224,7 +226,7 @@ static void batadv_nc_node_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_node_put - decrement the nc_node refcounter and possibly
+ * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
  *  release it
  * @nc_node: nc_node to be free'd
  */
@@ -234,8 +236,8 @@ static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
 }
 
 /**
- * batadv_nc_path_release - release nc_path from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_path_release() - release nc_path from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_path
  */
 static void batadv_nc_path_release(struct kref *ref)
@@ -248,7 +250,7 @@ static void batadv_nc_path_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_path_put - decrement the nc_path refcounter and possibly
+ * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
  *  release it
  * @nc_path: nc_path to be free'd
  */
@@ -258,7 +260,7 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
 }
 
 /**
- * batadv_nc_packet_free - frees nc packet
+ * batadv_nc_packet_free() - frees nc packet
  * @nc_packet: the nc packet to free
  * @dropped: whether the packet is freed because is is dropped
  */
@@ -275,7 +277,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
 }
 
 /**
- * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
+ * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_node: the nc node to check
  *
@@ -291,7 +293,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -311,7 +313,8 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
+ *  out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -331,7 +334,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
+ * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
  *  entries
  * @bat_priv: the bat priv with all the soft interface information
  * @list: list of nc nodes
@@ -369,7 +372,7 @@ batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig - purges all nc node data attached of the given
+ * batadv_nc_purge_orig() - purges all nc node data attached of the given
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig_node with the nc node entries to be purged
@@ -395,8 +398,8 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
- *  have timed out nc nodes
+ * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
+ *  they have timed out nc nodes
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
@@ -422,7 +425,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
+ * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
  *  unused ones
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc paths to check
@@ -481,7 +484,7 @@ static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_hash_key_gen - computes the nc_path hash key
+ * batadv_nc_hash_key_gen() - computes the nc_path hash key
  * @key: buffer to hold the final hash key
  * @src: source ethernet mac address going into the hash key
  * @dst: destination ethernet mac address going into the hash key
@@ -494,7 +497,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
 }
 
 /**
- * batadv_nc_hash_choose - compute the hash value for an nc path
+ * batadv_nc_hash_choose() - compute the hash value for an nc path
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -512,7 +515,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
 }
 
 /**
- * batadv_nc_hash_compare - comparing function used in the network coding hash
+ * batadv_nc_hash_compare() - comparing function used in the network coding hash
  *  tables
  * @node: node in the local table
  * @data2: second object to compare the node to
@@ -538,7 +541,7 @@ static bool batadv_nc_hash_compare(const struct hlist_node *node,
 }
 
 /**
- * batadv_nc_hash_find - search for an existing nc path and return it
+ * batadv_nc_hash_find() - search for an existing nc path and return it
  * @hash: hash table containing the nc path
  * @data: search key
  *
@@ -575,7 +578,7 @@ batadv_nc_hash_find(struct batadv_hashtable *hash,
 }
 
 /**
- * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
+ * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
  * @nc_packet: the nc packet to send
  */
 static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
@@ -586,7 +589,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
 }
 
 /**
- * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
+ * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -625,7 +628,7 @@ out:
 }
 
 /**
- * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
+ * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -663,8 +666,8 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
- *  nc packets
+ * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
+ *  out nc packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: to be processed hash table
  * @process_fn: Function called to process given nc packet. Should return true
@@ -709,7 +712,8 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_worker - periodic task for house keeping related to network coding
+ * batadv_nc_worker() - periodic task for house keeping related to network
+ *  coding
  * @work: kernel work struct
  */
 static void batadv_nc_worker(struct work_struct *work)
@@ -749,8 +753,8 @@ static void batadv_nc_worker(struct work_struct *work)
 }
 
 /**
- * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
- *  coding or not
+ * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
+ *  for coding or not
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: neighboring orig node which may be used as nc candidate
  * @ogm_packet: incoming ogm packet also used for the checks
@@ -790,7 +794,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_find_nc_node - search for an existing nc node and return it
+ * batadv_nc_find_nc_node() - search for an existing nc node and return it
  * @orig_node: orig node originating the ogm packet
  * @orig_neigh_node: neighboring orig node from which we received the ogm packet
  *  (can be equal to orig_node)
@@ -830,7 +834,7 @@ batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
+ * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
  *  not found
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -890,7 +894,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
  *  structs (best called on incoming OGMs)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -945,7 +949,7 @@ out:
 }
 
 /**
- * batadv_nc_get_path - get existing nc_path or allocate a new one
+ * batadv_nc_get_path() - get existing nc_path or allocate a new one
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc path
  * @src: ethernet source address - first half of the nc path search key
@@ -1006,7 +1010,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
+ * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
  *
@@ -1029,7 +1033,7 @@ static u8 batadv_nc_random_weight_tq(u8 tq)
 }
 
 /**
- * batadv_nc_memxor - XOR destination with source
+ * batadv_nc_memxor() - XOR destination with source
  * @dst: byte array to XOR into
  * @src: byte array to XOR from
  * @len: length of destination array
@@ -1043,7 +1047,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
 }
 
 /**
- * batadv_nc_code_packets - code a received unicast_packet with an nc packet
+ * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
  *  into a coded_packet and send it
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
@@ -1236,7 +1240,7 @@ out:
 }
 
 /**
- * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
+ * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
  * @skb: data skb to forward
  * @dst: destination mac address of the other skb to code with
  * @src: source mac address of skb
@@ -1260,7 +1264,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 }
 
 /**
- * batadv_nc_path_search - Find the coding path matching in_nc_node and
+ * batadv_nc_path_search() - Find the coding path matching in_nc_node and
  *  out_nc_node to retrieve a buffered packet that can be used for coding.
  * @bat_priv: the bat priv with all the soft interface information
  * @in_nc_node: pointer to skb next hop's neighbor nc node
@@ -1328,8 +1332,8 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
- *  skb's sender (may be equal to the originator).
+ * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of
+ *  the skb's sender (may be equal to the originator).
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
  * @eth_dst: next hop mac address of skb
@@ -1374,7 +1378,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
+ * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
  *  unicast skb before it is stored for use in later decoding
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
@@ -1409,7 +1413,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
+ * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
  * @ethhdr: pointer to the ethernet header inside the skb
@@ -1467,7 +1471,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
+ * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
  * @skb: skb to add to path
  * @nc_path: path to add skb to
  * @neigh_node: next hop to forward packet to
@@ -1502,7 +1506,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
+ * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
  *  buffer
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
@@ -1559,8 +1563,8 @@ out:
 }
 
 /**
- * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
- *  when decoding coded packets
+ * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
+ *  used when decoding coded packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
  */
@@ -1620,7 +1624,7 @@ out:
 }
 
 /**
- * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
+ * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
  *  should be saved in the decoding buffer and, if so, store it there
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to store
@@ -1640,7 +1644,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
+ * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
  *  in nc_packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to decode
@@ -1734,7 +1738,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_find_decoding_packet - search through buffered decoding data to
+ * batadv_nc_find_decoding_packet() - search through buffered decoding data to
  *  find the data needed to decode the coded packet
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: pointer to the ethernet header inside the coded packet
@@ -1799,7 +1803,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
+ * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
  *  resulting unicast packet
  * @skb: incoming coded packet
  * @recv_if: pointer to interface this packet was received on
@@ -1874,7 +1878,7 @@ free_skb:
 }
 
 /**
- * batadv_nc_mesh_free - clean up network coding memory
+ * batadv_nc_mesh_free() - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
@@ -1891,7 +1895,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_nc_nodes_seq_print_text - print the nc node information
+ * batadv_nc_nodes_seq_print_text() - print the nc node information
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1954,7 +1958,7 @@ out:
 }
 
 /**
- * batadv_nc_init_debugfs - create nc folder and related files in debugfs
+ * batadv_nc_init_debugfs() - create nc folder and related files in debugfs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index c66efb81d2f4..adaeafa4f71e 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 2967b86c13da..58a7d9274435 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -21,7 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
@@ -30,10 +31,12 @@
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
+#include <linux/rcupdate.h>
 #include <linux/seq_file.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/stddef.h>
 #include <linux/workqueue.h>
 #include <net/sock.h>
 #include <uapi/linux/batman_adv.h>
@@ -55,10 +58,47 @@
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
 
+/**
+ * batadv_orig_hash_find() - Find and return originator from orig_hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: mac address of the originator
+ *
+ * Return: orig_node (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
+{
+	struct batadv_hashtable *hash = bat_priv->orig_hash;
+	struct hlist_head *head;
+	struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
+	int index;
+
+	if (!hash)
+		return NULL;
+
+	index = batadv_choose_orig(data, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+		if (!batadv_compare_eth(orig_node, data))
+			continue;
+
+		if (!kref_get_unless_zero(&orig_node->refcount))
+			continue;
+
+		orig_node_tmp = orig_node;
+		break;
+	}
+	rcu_read_unlock();
+
+	return orig_node_tmp;
+}
+
 static void batadv_purge_orig(struct work_struct *work);
 
 /**
- * batadv_compare_orig - comparing function used in the originator hash table
+ * batadv_compare_orig() - comparing function used in the originator hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -73,7 +113,7 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * batadv_orig_node_vlan_get() - get an orig_node_vlan object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
@@ -104,7 +144,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
  *  object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
@@ -145,7 +185,7 @@ out:
 }
 
 /**
- * batadv_orig_node_vlan_release - release originator-vlan object from lists
+ * batadv_orig_node_vlan_release() - release originator-vlan object from lists
  *  and queue for free after rcu grace period
  * @ref: kref pointer of the originator-vlan object
  */
@@ -159,7 +199,7 @@ static void batadv_orig_node_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
+ * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release
  *  the originator-vlan object
  * @orig_vlan: the originator-vlan object to release
  */
@@ -168,6 +208,12 @@ void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
 	kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
 }
 
+/**
+ * batadv_originator_init() - Initialize all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
 	if (bat_priv->orig_hash)
@@ -193,7 +239,7 @@ err:
 }
 
 /**
- * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_ifinfo
  */
@@ -210,7 +256,7 @@ static void batadv_neigh_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release
  *  the neigh_ifinfo
  * @neigh_ifinfo: the neigh_ifinfo object to release
  */
@@ -220,7 +266,7 @@ void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
 }
 
 /**
- * batadv_hardif_neigh_release - release hardif neigh node from lists and
+ * batadv_hardif_neigh_release() - release hardif neigh node from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -240,7 +286,7 @@ static void batadv_hardif_neigh_release(struct kref *ref)
 }
 
 /**
- * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
+ * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter
  *  and possibly release it
  * @hardif_neigh: hardif neigh neighbor to free
  */
@@ -250,7 +296,7 @@ void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
 }
 
 /**
- * batadv_neigh_node_release - release neigh_node from lists and queue for
+ * batadv_neigh_node_release() - release neigh_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -275,7 +321,7 @@ static void batadv_neigh_node_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
+ * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly
  *  release it
  * @neigh_node: neigh neighbor to free
  */
@@ -285,7 +331,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
 }
 
 /**
- * batadv_orig_router_get - router to the originator depending on iface
+ * batadv_orig_router_get() - router to the originator depending on iface
  * @orig_node: the orig node for the router
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
@@ -318,7 +364,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
+ * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -350,7 +396,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
+ * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -396,7 +442,7 @@ out:
 }
 
 /**
- * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
+ * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -429,7 +475,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
 }
 
 /**
- * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
+ * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -472,7 +518,7 @@ out:
 }
 
 /**
- * batadv_neigh_node_get - retrieve a neighbour from the list
+ * batadv_neigh_node_get() - retrieve a neighbour from the list
  * @orig_node: originator which the neighbour belongs to
  * @hard_iface: the interface where this neighbour is connected to
  * @addr: the address of the neighbour
@@ -509,7 +555,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_hardif_neigh_create - create a hardif neighbour node
+ * batadv_hardif_neigh_create() - create a hardif neighbour node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  * @orig_node: originator object representing the neighbour
@@ -555,7 +601,7 @@ out:
 }
 
 /**
- * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
+ * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
  *  node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
@@ -579,7 +625,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
+ * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
  * @hard_iface: the interface where this neighbour is connected to
  * @neigh_addr: the address of the neighbour
  *
@@ -611,7 +657,7 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_neigh_node_create - create a neigh node object
+ * batadv_neigh_node_create() - create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -676,7 +722,7 @@ out:
 }
 
 /**
- * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
+ * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -700,7 +746,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
+ * batadv_hardif_neigh_seq_print_text() - print the single hop neighbour list
  * @seq: neighbour table seq_file struct
  * @offset: not used
  *
@@ -735,8 +781,8 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
- *  outgoing interface
+ * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
+ *  specific outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
  *
@@ -812,7 +858,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_ifinfo
  */
@@ -835,7 +881,7 @@ static void batadv_orig_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release
  *  the orig_ifinfo
  * @orig_ifinfo: the orig_ifinfo object to release
  */
@@ -845,7 +891,7 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
 }
 
 /**
- * batadv_orig_node_free_rcu - free the orig_node
+ * batadv_orig_node_free_rcu() - free the orig_node
  * @rcu: rcu pointer of the orig_node
  */
 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
@@ -866,7 +912,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_orig_node_release - release orig_node from lists and queue for
+ * batadv_orig_node_release() - release orig_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_node
  */
@@ -917,7 +963,7 @@ static void batadv_orig_node_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_put - decrement the orig node refcounter and possibly
+ * batadv_orig_node_put() - decrement the orig node refcounter and possibly
  *  release it
  * @orig_node: the orig node to free
  */
@@ -926,6 +972,10 @@ void batadv_orig_node_put(struct batadv_orig_node *orig_node)
 	kref_put(&orig_node->refcount, batadv_orig_node_release);
 }
 
+/**
+ * batadv_originator_free() - Free all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
@@ -959,7 +1009,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_orig_node_new - creates a new orig_node
+ * batadv_orig_node_new() - creates a new orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the originator
  *
@@ -1038,7 +1088,7 @@ free_orig_node:
 }
 
 /**
- * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh: orig node which is to be checked
  */
@@ -1079,7 +1129,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
+ * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1131,7 +1181,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_neighbors - purges neighbors from originator
+ * batadv_purge_orig_neighbors() - purges neighbors from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1189,7 +1239,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_find_best_neighbor - finds the best neighbor after purging
+ * batadv_find_best_neighbor() - finds the best neighbor after purging
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  * @if_outgoing: the interface for which the metric should be compared
@@ -1224,7 +1274,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_node - purges obsolete information from an orig_node
+ * batadv_purge_orig_node() - purges obsolete information from an orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1341,12 +1391,24 @@ static void batadv_purge_orig(struct work_struct *work)
 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
 }
 
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
 {
 	_batadv_purge_orig(bat_priv);
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_orig_seq_print_text() - Print the originator table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1376,7 +1438,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 }
 
 /**
- * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
+ * batadv_orig_hardif_seq_print_text() - writes originator infos for a specific
  *  outgoing interface
  * @seq: debugfs table seq_file struct
  * @offset: not used
@@ -1423,7 +1485,7 @@ out:
 #endif
 
 /**
- * batadv_orig_dump - Dump to netlink the originator infos for a specific
+ * batadv_orig_dump() - Dump to netlink the originator infos for a specific
  *  outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
@@ -1499,6 +1561,13 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	return ret;
 }
 
+/**
+ * batadv_orig_hash_add_if() - Add interface to originators in orig_hash
+ * @hard_iface: hard interface to add (already slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
 			    int max_if_num)
 {
@@ -1534,6 +1603,13 @@ err:
 	return -ENOMEM;
 }
 
+/**
+ * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash
+ * @hard_iface: hard interface to remove (still slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 			    int max_if_num)
 {
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d94220a6d21a..8e543a3cdc6c 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,14 +24,8 @@
 #include <linux/compiler.h>
 #include <linux/if_ether.h>
 #include <linux/jhash.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/stddef.h>
 #include <linux/types.h>
 
-#include "hash.h"
-
 struct netlink_callback;
 struct seq_file;
 struct sk_buff;
@@ -89,8 +84,13 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
 			  unsigned short vid);
 void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan);
 
-/* hashfunction to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+/**
+ * batadv_choose_orig() - Return the index of the orig entry in the hash table
+ * @data: mac address of the originator node
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by @data should be
+ * stored at.
  */
 static inline u32 batadv_choose_orig(const void *data, u32 size)
 {
@@ -100,34 +100,7 @@ static inline u32 batadv_choose_orig(const void *data, u32 size)
 	return hash % size;
 }
 
-static inline struct batadv_orig_node *
-batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-	struct batadv_hashtable *hash = bat_priv->orig_hash;
-	struct hlist_head *head;
-	struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
-	int index;
-
-	if (!hash)
-		return NULL;
-
-	index = batadv_choose_orig(data, hash->size);
-	head = &hash->table[index];
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-		if (!batadv_compare_eth(orig_node, data))
-			continue;
-
-		if (!kref_get_unless_zero(&orig_node->refcount))
-			continue;
-
-		orig_node_tmp = orig_node;
-		break;
-	}
-	rcu_read_unlock();
-
-	return orig_node_tmp;
-}
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data);
 
 #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 40d9bf3e5bfe..b6891e8b741c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -33,6 +34,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bitarray.h"
 #include "bridge_loop_avoidance.h"
@@ -43,7 +45,6 @@
 #include "log.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
@@ -54,7 +55,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
 
 /**
- * _batadv_update_route - set the router for this originator
+ * _batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -118,7 +119,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_update_route - set the router for this originator
+ * batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -145,7 +146,7 @@ out:
 }
 
 /**
- * batadv_window_protected - checks whether the host restarted and is in the
+ * batadv_window_protected() - checks whether the host restarted and is in the
  *  protection time.
  * @bat_priv: the bat priv with all the soft interface information
  * @seq_num_diff: difference between the current/received sequence number and
@@ -180,6 +181,14 @@ bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
 	return false;
 }
 
+/**
+ * batadv_check_management_packet() - Check preconditions for management packets
+ * @skb: incoming packet buffer
+ * @hard_iface: incoming hard interface
+ * @header_len: minimal header length of packet type
+ *
+ * Return: true when management preconditions are met, false otherwise
+ */
 bool batadv_check_management_packet(struct sk_buff *skb,
 				    struct batadv_hard_iface *hard_iface,
 				    int header_len)
@@ -212,7 +221,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * batadv_recv_my_icmp_packet() - receive an icmp packet locally
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: icmp packet to process
  *
@@ -347,6 +356,13 @@ out:
 	return ret;
 }
 
+/**
+ * batadv_recv_icmp_packet() - Process incoming icmp packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_icmp_packet(struct sk_buff *skb,
 			    struct batadv_hard_iface *recv_if)
 {
@@ -440,7 +456,7 @@ free_skb:
 }
 
 /**
- * batadv_check_unicast_packet - Check for malformed unicast packets
+ * batadv_check_unicast_packet() - Check for malformed unicast packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of header to pull
@@ -478,7 +494,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
  * @orig_node: originator node whose last bonding candidate should be retrieved
  *
  * Return: last bonding candidate of router or NULL if not found
@@ -501,7 +517,7 @@ batadv_last_bonding_get(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
  * @orig_node: originator node whose bonding candidates should be replaced
  * @new_candidate: new bonding candidate or NULL
  */
@@ -524,7 +540,7 @@ batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_find_router - find a suitable router for this originator
+ * batadv_find_router() - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the destination node
  * @recv_if: pointer to interface this packet was received on
@@ -741,7 +757,7 @@ free_skb:
 }
 
 /**
- * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * batadv_reroute_unicast_packet() - update the unicast header for re-routing
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
@@ -904,7 +920,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ * batadv_recv_unhandled_unicast_packet() - receive and process packets which
  *	are in the unicast number space but not yet known to the implementation
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
@@ -935,6 +951,13 @@ free_skb:
 	return NET_RX_DROP;
 }
 
+/**
+ * batadv_recv_unicast_packet() - Process incoming unicast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_unicast_packet(struct sk_buff *skb,
 			       struct batadv_hard_iface *recv_if)
 {
@@ -1036,7 +1059,7 @@ free_skb:
 }
 
 /**
- * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
  *
@@ -1090,7 +1113,7 @@ free_skb:
 }
 
 /**
- * batadv_recv_frag_packet - process received fragment
+ * batadv_recv_frag_packet() - process received fragment
  * @skb: the received fragment
  * @recv_if: interface that the skb is received on
  *
@@ -1155,6 +1178,13 @@ free_skb:
 	return ret;
 }
 
+/**
+ * batadv_recv_bcast_packet() - Process incoming broadcast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_bcast_packet(struct sk_buff *skb,
 			     struct batadv_hard_iface *recv_if)
 {
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 5ede16c32f15..a1289bc5f115 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 7895323fd2a7..2a5ab6f1076d 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
@@ -54,7 +55,7 @@
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
 /**
- * batadv_send_skb_packet - send an already prepared packet
+ * batadv_send_skb_packet() - send an already prepared packet
  * @skb: the packet to send
  * @hard_iface: the interface to use to send the broadcast packet
  * @dst_addr: the payload destination
@@ -123,12 +124,30 @@ send_skb_err:
 	return NET_XMIT_DROP;
 }
 
+/**
+ * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @hard_iface: outgoing interface
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_broadcast_skb(struct sk_buff *skb,
 			      struct batadv_hard_iface *hard_iface)
 {
 	return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
 }
 
+/**
+ * batadv_send_unicast_skb() - Send unicast packet to neighbor
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @neigh: neighbor which is used as next hop to destination
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_unicast_skb(struct sk_buff *skb,
 			    struct batadv_neigh_node *neigh)
 {
@@ -153,7 +172,7 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
+ * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
  * @skb: Packet to be transmitted.
  * @orig_node: Final destination of the packet.
  * @recv_if: Interface used when receiving the packet (can be NULL).
@@ -216,7 +235,7 @@ free_skb:
 }
 
 /**
- * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
  *  common fields for unicast packets
  * @skb: the skb carrying the unicast header to initialize
  * @hdr_size: amount of bytes to push at the beginning of the skb
@@ -249,7 +268,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
 }
 
 /**
- * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
@@ -264,7 +283,7 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
  *  unicast 4addr header
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the skb containing the payload to encapsulate
@@ -308,7 +327,7 @@ out:
 }
 
 /**
- * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -378,7 +397,7 @@ out:
 }
 
 /**
- * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -425,7 +444,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * batadv_send_skb_via_gw() - send an skb via gateway lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @vid: the vid to be used to search the translation table
@@ -452,7 +471,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_forw_packet_free - free a forwarding packet
+ * batadv_forw_packet_free() - free a forwarding packet
  * @forw_packet: The packet to free
  * @dropped: whether the packet is freed because is is dropped
  *
@@ -477,7 +496,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_alloc - allocate a forwarding packet
+ * batadv_forw_packet_alloc() - allocate a forwarding packet
  * @if_incoming: The (optional) if_incoming to be grabbed
  * @if_outgoing: The (optional) if_outgoing to be grabbed
  * @queue_left: The (optional) queue counter to decrease
@@ -543,7 +562,7 @@ err:
 }
 
 /**
- * batadv_forw_packet_was_stolen - check whether someone stole this packet
+ * batadv_forw_packet_was_stolen() - check whether someone stole this packet
  * @forw_packet: the forwarding packet to check
  *
  * This function checks whether the given forwarding packet was claimed by
@@ -558,7 +577,7 @@ batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_steal - claim a forw_packet for free()
+ * batadv_forw_packet_steal() - claim a forw_packet for free()
  * @forw_packet: the forwarding packet to steal
  * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
  *
@@ -589,7 +608,7 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_list_steal - claim a list of forward packets for free()
+ * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
  * @forw_list: the to be stolen forward packets
  * @cleanup_list: a backup pointer, to be able to dispose the packet later
  * @hard_iface: the interface to steal forward packets from
@@ -625,7 +644,7 @@ batadv_forw_packet_list_steal(struct hlist_head *forw_list,
 }
 
 /**
- * batadv_forw_packet_list_free - free a list of forward packets
+ * batadv_forw_packet_list_free() - free a list of forward packets
  * @head: a list of to be freed forw_packets
  *
  * This function cancels the scheduling of any packet in the provided list,
@@ -649,7 +668,7 @@ static void batadv_forw_packet_list_free(struct hlist_head *head)
 }
 
 /**
- * batadv_forw_packet_queue - try to queue a forwarding packet
+ * batadv_forw_packet_queue() - try to queue a forwarding packet
  * @forw_packet: the forwarding packet to queue
  * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
  * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
@@ -693,7 +712,7 @@ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
+ * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -712,7 +731,7 @@ batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
+ * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -730,7 +749,7 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: broadcast packet to add
  * @delay: number of jiffies to wait before sending
@@ -790,7 +809,7 @@ err:
 }
 
 /**
- * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
+ * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
  * @forw_packet: the forwarding packet to check
  * @hard_iface: the interface to check on
  *
@@ -818,7 +837,8 @@ batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
+ * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
+ *  packet
  * @forw_packet: the packet to increase the counter for
  */
 static void
@@ -828,7 +848,7 @@ batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
+ * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
  * @forw_packet: the packet to check
  *
  * Return: True if this packet was transmitted before, false otherwise.
@@ -953,7 +973,7 @@ out:
 }
 
 /**
- * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
+ * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
  *
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index a16b34f473ef..1e8c79093623 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,8 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-
-#include "packet.h"
+#include <uapi/linux/batadv_packet.h>
 
 struct sk_buff;
 
@@ -76,7 +76,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 			   unsigned short vid);
 
 /**
- * batadv_send_skb_via_tt - send an skb via TT lookup
+ * batadv_send_skb_via_tt() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @dst_hint: can be used to override the destination contained in the skb
@@ -97,7 +97,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @packet_subtype: the unicast 4addr packet subtype to use
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9f673cdfecf8..900c5ce21cd4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
@@ -48,6 +49,7 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bridge_loop_avoidance.h"
@@ -59,11 +61,17 @@
 #include "multicast.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
+/**
+ * batadv_skb_head_push() - Increase header size and move (push) head pointer
+ * @skb: packet buffer which should be modified
+ * @len: number of bytes to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
 {
 	int result;
@@ -96,7 +104,7 @@ static int batadv_interface_release(struct net_device *dev)
 }
 
 /**
- * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
  * @bat_priv: the bat priv with all the soft interface information
  * @idx: index of counter to sum up
  *
@@ -169,7 +177,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 /**
- * batadv_interface_set_rx_mode - set the rx mode of a device
+ * batadv_interface_set_rx_mode() - set the rx mode of a device
  * @dev: registered network device to modify
  *
  * We do not actually need to set any rx filters for the virtual batman
@@ -389,7 +397,7 @@ end:
 }
 
 /**
- * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
  * @soft_iface: local interface which will receive the ethernet frame
  * @skb: ethernet frame for @soft_iface
  * @hdr_size: size of already parsed batman-adv header
@@ -501,8 +509,8 @@ out:
 }
 
 /**
- * batadv_softif_vlan_release - release vlan from lists and queue for free after
- *  rcu grace period
+ * batadv_softif_vlan_release() - release vlan from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the vlan object
  */
 static void batadv_softif_vlan_release(struct kref *ref)
@@ -519,7 +527,7 @@ static void batadv_softif_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_softif_vlan_put - decrease the vlan object refcounter and
+ * batadv_softif_vlan_put() - decrease the vlan object refcounter and
  *  possibly release it
  * @vlan: the vlan object to release
  */
@@ -532,7 +540,7 @@ void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan)
 }
 
 /**
- * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * batadv_softif_vlan_get() - get the vlan object for a specific vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the identifier of the vlan object to retrieve
  *
@@ -561,7 +569,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_softif_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
@@ -613,7 +621,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 }
 
 /**
- * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the object to remove
  */
@@ -631,7 +639,7 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_interface_add_vid - ndo_add_vid API implementation
+ * batadv_interface_add_vid() - ndo_add_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the new vlan
@@ -689,7 +697,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 }
 
 /**
- * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * batadv_interface_kill_vid() - ndo_kill_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the deleted vlan
@@ -732,7 +740,7 @@ static struct lock_class_key batadv_netdev_xmit_lock_key;
 static struct lock_class_key batadv_netdev_addr_lock_key;
 
 /**
- * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
+ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
  * @dev: device which owns the tx queue
  * @txq: tx queue to modify
  * @_unused: always NULL
@@ -745,7 +753,7 @@ static void batadv_set_lockdep_class_one(struct net_device *dev,
 }
 
 /**
- * batadv_set_lockdep_class - Set txq and addr_list lockdep class
+ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
  * @dev: network device to modify
  */
 static void batadv_set_lockdep_class(struct net_device *dev)
@@ -755,7 +763,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_late - late stage initialization of soft interface
+ * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
  *
  * Return: error code on failures
@@ -860,7 +868,7 @@ free_bat_counters:
 }
 
 /**
- * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface
+ * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should become the slave interface
  * @extack: extended ACK report struct
@@ -888,7 +896,7 @@ out:
 }
 
 /**
- * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface
+ * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should be removed from the master interface
  *
@@ -1023,7 +1031,7 @@ static const struct ethtool_ops batadv_ethtool_ops = {
 };
 
 /**
- * batadv_softif_free - Deconstructor of batadv_soft_interface
+ * batadv_softif_free() - Deconstructor of batadv_soft_interface
  * @dev: Device to cleanup and remove
  */
 static void batadv_softif_free(struct net_device *dev)
@@ -1039,7 +1047,7 @@ static void batadv_softif_free(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_early - early stage initialization of soft interface
+ * batadv_softif_init_early() - early stage initialization of soft interface
  * @dev: registered network device to modify
  */
 static void batadv_softif_init_early(struct net_device *dev)
@@ -1063,6 +1071,13 @@ static void batadv_softif_init_early(struct net_device *dev)
 	dev->ethtool_ops = &batadv_ethtool_ops;
 }
 
+/**
+ * batadv_softif_create() - Create and register soft interface
+ * @net: the applicable net namespace
+ * @name: name of the new soft interface
+ *
+ * Return: newly allocated soft_interface, NULL on errors
+ */
 struct net_device *batadv_softif_create(struct net *net, const char *name)
 {
 	struct net_device *soft_iface;
@@ -1089,7 +1104,7 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
 }
 
 /**
- * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs
+ * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
  * @soft_iface: the to-be-removed batman-adv interface
  */
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
@@ -1111,7 +1126,8 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
 }
 
 /**
- * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink
+ * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
+ *  netlink
  * @soft_iface: the to-be-removed batman-adv interface
  * @head: list pointer
  */
@@ -1139,6 +1155,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
 	unregister_netdevice_queue(soft_iface, head);
 }
 
+/**
+ * batadv_softif_is_valid() - Check whether device is a batadv soft interface
+ * @net_dev: device which should be checked
+ *
+ * Return: true when net_dev is a batman-adv interface, false otherwise
+ */
 bool batadv_softif_is_valid(const struct net_device *net_dev)
 {
 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 639c3abb214a..075c5b5b2ce1 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index aa187fd42475..c1578fa0b952 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -22,10 +23,11 @@
 #include <linux/compiler.h>
 #include <linux/device.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/kernel.h>
+#include <linux/kobject.h>
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
@@ -37,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/stringify.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -45,7 +48,6 @@
 #include "hard-interface.h"
 #include "log.h"
 #include "network-coding.h"
-#include "packet.h"
 #include "soft-interface.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
@@ -63,7 +65,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * batadv_vlan_kobj_to_batpriv() - convert a vlan kobj in the associated batpriv
  * @obj: kobject to covert
  *
  * Return: the associated batadv_priv struct.
@@ -83,7 +85,7 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * batadv_kobj_to_vlan() - convert a kobj in the associated softif_vlan struct
  * @bat_priv: the bat priv with all the soft interface information
  * @obj: kobject to covert
  *
@@ -598,7 +600,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
 }
 
 /**
- * batadv_show_isolation_mark - print the current isolation mark/mask
+ * batadv_show_isolation_mark() - print the current isolation mark/mask
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer that will contain the data to send back to the user
@@ -616,8 +618,8 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
 }
 
 /**
- * batadv_store_isolation_mark - parse and store the isolation mark/mask entered
- *  by the user
+ * batadv_store_isolation_mark() - parse and store the isolation mark/mask
+ *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer containing the user data
@@ -733,6 +735,12 @@ static struct batadv_attribute *batadv_vlan_attrs[] = {
 	NULL,
 };
 
+/**
+ * batadv_sysfs_add_meshif() - Add soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
 	struct kobject *batif_kobject = &dev->dev.kobj;
@@ -773,6 +781,10 @@ out:
 	return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_meshif() - Remove soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_sysfs_del_meshif(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -788,7 +800,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
 }
 
 /**
- * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * batadv_sysfs_add_vlan() - add all the needed sysfs objects for the new vlan
  * @dev: netdev of the mesh interface
  * @vlan: private data of the newly added VLAN interface
  *
@@ -849,7 +861,7 @@ out:
 }
 
 /**
- * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * batadv_sysfs_del_vlan() - remove all the sysfs objects for a given VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the private data of the VLAN to destroy
  */
@@ -894,7 +906,7 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
 }
 
 /**
- * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_finish() - store new hardif mesh_iface state
  * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
  * @ifname: name of soft-interface to modify
  *
@@ -947,7 +959,7 @@ out:
 }
 
 /**
- * batadv_store_mesh_iface_work - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_work() - store new hardif mesh_iface state
  * @work: work queue item
  *
  * Changes the parts of the hard+soft interface which can not be modified under
@@ -1043,7 +1055,7 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
 
 /**
- * batadv_store_throughput_override - parse and store throughput override
+ * batadv_store_throughput_override() - parse and store throughput override
  *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
@@ -1130,6 +1142,13 @@ static struct batadv_attribute *batadv_batman_attrs[] = {
 	NULL,
 };
 
+/**
+ * batadv_sysfs_add_hardif() - Add hard interface specific sysfs entries
+ * @hardif_obj: address where to store the pointer to new sysfs folder
+ * @dev: netdev struct of the hard interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
 {
 	struct kobject *hardif_kobject = &dev->dev.kobj;
@@ -1164,6 +1183,11 @@ out:
 	return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_hardif() - Remove hard interface specific sysfs entries
+ * @hardif_obj: address to the pointer to which stores batman-adv sysfs folder
+ *  of the hard interface
+ */
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
 {
 	kobject_uevent(*hardif_obj, KOBJ_REMOVE);
@@ -1172,6 +1196,16 @@ void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
 	*hardif_obj = NULL;
 }
 
+/**
+ * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: subsystem type of event. Stored in uevent's BATTYPE
+ * @action: action type of event. Stored in uevent's BATACTION
+ * @data: string with additional information to the event (ignored for
+ *  BATADV_UEV_DEL). Stored in uevent's BATDATA
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 			enum batadv_uev_action action, const char *data)
 {
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index e487412e256b..bbeee61221fa 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -35,10 +36,23 @@ struct net_device;
  */
 #define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
+/**
+ * struct batadv_attribute - sysfs export helper for batman-adv attributes
+ */
 struct batadv_attribute {
+	/** @attr: sysfs attribute file */
 	struct attribute attr;
+
+	/**
+	 * @show: function to export the current attribute's content to sysfs
+	 */
 	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
 			char *buf);
+
+	/**
+	 * @store: function to load new value from character buffer and save it
+	 * in batman-adv attribute
+	 */
 	ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
 			 char *buf, size_t count);
 };
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index ebc4e2241c77..8b576712d0c1 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
@@ -19,13 +20,13 @@
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -48,13 +49,13 @@
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 /**
@@ -97,7 +98,7 @@
 static u8 batadv_tp_prerandom[4096] __read_mostly;
 
 /**
- * batadv_tp_session_cookie - generate session cookie based on session ids
+ * batadv_tp_session_cookie() - generate session cookie based on session ids
  * @session: TP session identifier
  * @icmp_uid: icmp pseudo uid of the tp session
  *
@@ -115,7 +116,7 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
 }
 
 /**
- * batadv_tp_cwnd - compute the new cwnd size
+ * batadv_tp_cwnd() - compute the new cwnd size
  * @base: base cwnd size value
  * @increment: the value to add to base to get the new size
  * @min: minumim cwnd value (usually MSS)
@@ -140,7 +141,7 @@ static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
 }
 
 /**
- * batadv_tp_updated_cwnd - update the Congestion Windows
+ * batadv_tp_updated_cwnd() - update the Congestion Windows
  * @tp_vars: the private data of the current TP meter session
  * @mss: maximum segment size of transmission
  *
@@ -176,7 +177,7 @@ static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
 }
 
 /**
- * batadv_tp_update_rto - calculate new retransmission timeout
+ * batadv_tp_update_rto() - calculate new retransmission timeout
  * @tp_vars: the private data of the current TP meter session
  * @new_rtt: new roundtrip time in msec
  */
@@ -212,7 +213,7 @@ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_batctl_notify - send client status result to client
+ * batadv_tp_batctl_notify() - send client status result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -244,7 +245,7 @@ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_batctl_error_notify - send client error result to client
+ * batadv_tp_batctl_error_notify() - send client error result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -259,7 +260,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_list_find - find a tp_vars object in the global list
+ * batadv_tp_list_find() - find a tp_vars object in the global list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  *
@@ -294,7 +295,8 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_list_find_session - find tp_vars session object in the global list
+ * batadv_tp_list_find_session() - find tp_vars session object in the global
+ *  list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  * @session: session identifier
@@ -335,7 +337,7 @@ batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_vars_release - release batadv_tp_vars from lists and queue for
+ * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the batadv_tp_vars
  */
@@ -360,7 +362,7 @@ static void batadv_tp_vars_release(struct kref *ref)
 }
 
 /**
- * batadv_tp_vars_put - decrement the batadv_tp_vars refcounter and possibly
+ * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
  *  release it
  * @tp_vars: the private data of the current TP meter session to be free'd
  */
@@ -370,7 +372,7 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_cleanup - cleanup sender data and drop and timer
+ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session to cleanup
  */
@@ -400,7 +402,7 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_end - print info about ended session and inform client
+ * batadv_tp_sender_end() - print info about ended session and inform client
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session
  */
@@ -433,7 +435,7 @@ static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_shutdown - let sender thread/timer stop gracefully
+ * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
  * @tp_vars: the private data of the current TP meter session
  * @reason: reason for tp meter session stop
  */
@@ -447,7 +449,7 @@ static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_sender_finish - stop sender session after test_length was reached
+ * batadv_tp_sender_finish() - stop sender session after test_length was reached
  * @work: delayed work reference of the related tp_vars
  */
 static void batadv_tp_sender_finish(struct work_struct *work)
@@ -463,7 +465,7 @@ static void batadv_tp_sender_finish(struct work_struct *work)
 }
 
 /**
- * batadv_tp_reset_sender_timer - reschedule the sender timer
+ * batadv_tp_reset_sender_timer() - reschedule the sender timer
  * @tp_vars: the private TP meter data for this session
  *
  * Reschedule the timer using tp_vars->rto as delay
@@ -481,7 +483,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_timeout - timer that fires in case of packet loss
+ * batadv_tp_sender_timeout() - timer that fires in case of packet loss
  * @t: address to timer_list inside tp_vars
  *
  * If fired it means that there was packet loss.
@@ -531,7 +533,7 @@ static void batadv_tp_sender_timeout(struct timer_list *t)
 }
 
 /**
- * batadv_tp_fill_prerandom - Fill buffer with prefetched random bytes
+ * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
  * @tp_vars: the private TP meter data for this session
  * @buf: Buffer to fill with bytes
  * @nbytes: amount of pseudorandom bytes
@@ -563,7 +565,7 @@ static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_send_msg - send a single message
+ * batadv_tp_send_msg() - send a single message
  * @tp_vars: the private TP meter data for this session
  * @src: source mac address
  * @orig_node: the originator of the destination
@@ -623,7 +625,7 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
 }
 
 /**
- * batadv_tp_recv_ack - ACK receiving function
+ * batadv_tp_recv_ack() - ACK receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -765,7 +767,7 @@ out:
 }
 
 /**
- * batadv_tp_avail - check if congestion window is not full
+ * batadv_tp_avail() - check if congestion window is not full
  * @tp_vars: the private data of the current TP meter session
  * @payload_len: size of the payload of a single message
  *
@@ -783,7 +785,7 @@ static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_wait_available - wait until congestion window becomes free or
+ * batadv_tp_wait_available() - wait until congestion window becomes free or
  *  timeout is reached
  * @tp_vars: the private data of the current TP meter session
  * @plen: size of the payload of a single message
@@ -805,7 +807,7 @@ static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
 }
 
 /**
- * batadv_tp_send - main sending thread of a tp meter session
+ * batadv_tp_send() - main sending thread of a tp meter session
  * @arg: address of the related tp_vars
  *
  * Return: nothing, this function never returns
@@ -904,7 +906,8 @@ out:
 }
 
 /**
- * batadv_tp_start_kthread - start new thread which manages the tp meter sender
+ * batadv_tp_start_kthread() - start new thread which manages the tp meter
+ *  sender
  * @tp_vars: the private data of the current TP meter session
  */
 static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
@@ -935,7 +938,7 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_start - start a new tp meter session
+ * batadv_tp_start() - start a new tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @test_length: test length in milliseconds
@@ -1060,7 +1063,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_stop - stop currently running tp meter session
+ * batadv_tp_stop() - stop currently running tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @return_value: reason for tp meter session stop
@@ -1092,7 +1095,7 @@ out:
 }
 
 /**
- * batadv_tp_reset_receiver_timer - reset the receiver shutdown timer
+ * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
  * @tp_vars: the private data of the current TP meter session
  *
  * start the receiver shutdown timer or reset it if already started
@@ -1104,7 +1107,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
+ * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
  *  reached without received ack
  * @t: address to timer_list inside tp_vars
  */
@@ -1149,7 +1152,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t)
 }
 
 /**
- * batadv_tp_send_ack - send an ACK packet
+ * batadv_tp_send_ack() - send an ACK packet
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the mac address of the destination originator
  * @seq: the sequence number to ACK
@@ -1221,7 +1224,7 @@ out:
 }
 
 /**
- * batadv_tp_handle_out_of_order - store an out of order packet
+ * batadv_tp_handle_out_of_order() - store an out of order packet
  * @tp_vars: the private data of the current TP meter session
  * @skb: the buffer containing the received packet
  *
@@ -1297,7 +1300,7 @@ out:
 }
 
 /**
- * batadv_tp_ack_unordered - update number received bytes in current stream
+ * batadv_tp_ack_unordered() - update number received bytes in current stream
  *  without gaps
  * @tp_vars: the private data of the current TP meter session
  */
@@ -1330,7 +1333,7 @@ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_init_recv - return matching or create new receiver tp_vars
+ * batadv_tp_init_recv() - return matching or create new receiver tp_vars
  * @bat_priv: the bat priv with all the soft interface information
  * @icmp: received icmp tp msg
  *
@@ -1383,7 +1386,7 @@ out_unlock:
 }
 
 /**
- * batadv_tp_recv_msg - process a single data message
+ * batadv_tp_recv_msg() - process a single data message
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -1468,7 +1471,7 @@ out:
 }
 
 /**
- * batadv_tp_meter_recv - main TP Meter receiving function
+ * batadv_tp_meter_recv() - main TP Meter receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  */
@@ -1494,7 +1497,7 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
 }
 
 /**
- * batadv_tp_meter_init - initialize global tp_meter structures
+ * batadv_tp_meter_init() - initialize global tp_meter structures
  */
 void __init batadv_tp_meter_init(void)
 {
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
index a8ada5c123bd..c8b8f2cb2c2b 100644
--- a/net/batman-adv/tp_meter.h
+++ b/net/batman-adv/tp_meter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8a3ce79b1307..7550a9ccd695 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
@@ -20,14 +21,14 @@
 
 #include <linux/atomic.h>
 #include <linux/bitops.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jhash.h>
@@ -36,6 +37,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
@@ -50,6 +52,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bridge_loop_avoidance.h"
@@ -58,7 +61,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tvlv.h"
 
@@ -86,7 +88,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
 				 bool roaming);
 
 /**
- * batadv_compare_tt - check if two TT entries are the same
+ * batadv_compare_tt() - check if two TT entries are the same
  * @node: the list element pointer of the first TT entry
  * @data2: pointer to the tt_common_entry of the second TT entry
  *
@@ -105,7 +107,7 @@ static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_choose_tt - return the index of the tt entry in the hash table
+ * batadv_choose_tt() - return the index of the tt entry in the hash table
  * @data: pointer to the tt_common_entry object to map
  * @size: the size of the hash table
  *
@@ -125,7 +127,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
 }
 
 /**
- * batadv_tt_hash_find - look for a client in the given hash table
+ * batadv_tt_hash_find() - look for a client in the given hash table
  * @hash: the hash table to search
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -170,7 +172,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_hash_find - search the local table for a given client
+ * batadv_tt_local_hash_find() - search the local table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -195,7 +197,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_global_hash_find - search the global table for a given client
+ * batadv_tt_global_hash_find() - search the global table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -220,7 +222,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_entry_free_rcu - free the tt_local_entry
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
  * @rcu: rcu pointer of the tt_local_entry
  */
 static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
@@ -234,7 +236,7 @@ static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_local_entry_release - release tt_local_entry from lists and queue
+ * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
  *  for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
@@ -251,7 +253,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_local_entry_put - decrement the tt_local_entry refcounter and
+ * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
  *  possibly release it
  * @tt_local_entry: tt_local_entry to be free'd
  */
@@ -263,7 +265,7 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
 }
 
 /**
- * batadv_tt_global_entry_free_rcu - free the tt_global_entry
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
  * @rcu: rcu pointer of the tt_global_entry
  */
 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
@@ -277,8 +279,8 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_global_entry_release - release tt_global_entry from lists and queue
- *  for free after rcu grace period
+ * batadv_tt_global_entry_release() - release tt_global_entry from lists and
+ *  queue for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_tt_global_entry_release(struct kref *ref)
@@ -294,7 +296,7 @@ static void batadv_tt_global_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_global_entry_put - decrement the tt_global_entry refcounter and
+ * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and
  *  possibly release it
  * @tt_global_entry: tt_global_entry to be free'd
  */
@@ -306,7 +308,7 @@ batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_hash_count - count the number of orig entries
+ * batadv_tt_global_hash_count() - count the number of orig entries
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to count entries for
  * @vid: VLAN identifier
@@ -331,8 +333,8 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_mod - change the size by v of the local table identified
- *  by vid
+ * batadv_tt_local_size_mod() - change the size by v of the local table
+ *  identified by vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier of the sub-table to change
  * @v: the amount to sum to the local table size
@@ -352,8 +354,8 @@ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_inc - increase by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_inc() - increase by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -364,8 +366,8 @@ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_dec - decrease by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_dec() - decrease by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -376,7 +378,7 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_size_mod - change the size by v of the global table
+ * batadv_tt_global_size_mod() - change the size by v of the global table
  *  for orig_node identified by vid
  * @orig_node: the originator for which the table has to be modified
  * @vid: the VLAN identifier
@@ -404,7 +406,7 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_inc - increase by one the global table size for the
+ * batadv_tt_global_size_inc() - increase by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -416,7 +418,7 @@ static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_dec - decrease by one the global table size for the
+ * batadv_tt_global_size_dec() - decrease by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -428,7 +430,7 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_orig_list_entry_free_rcu - free the orig_entry
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
  * @rcu: rcu pointer of the orig_entry
  */
 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
@@ -441,7 +443,7 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the tt orig entry
  */
@@ -457,7 +459,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_orig_list_entry_put - decrement the tt orig entry refcounter and
+ * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
  *  possibly release it
  * @orig_entry: tt orig entry to be free'd
  */
@@ -468,7 +470,7 @@ batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
 }
 
 /**
- * batadv_tt_local_event - store a local TT event (ADD/DEL)
+ * batadv_tt_local_event() - store a local TT event (ADD/DEL)
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_local_entry: the TT entry involved in the event
  * @event_flags: flags to store in the event structure
@@ -543,7 +545,7 @@ unlock:
 }
 
 /**
- * batadv_tt_len - compute length in bytes of given number of tt changes
+ * batadv_tt_len() - compute length in bytes of given number of tt changes
  * @changes_num: number of tt changes
  *
  * Return: computed length in bytes.
@@ -554,7 +556,7 @@ static int batadv_tt_len(int changes_num)
 }
 
 /**
- * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
  * @tt_len: available space
  *
  * Return: the number of entries.
@@ -565,8 +567,8 @@ static u16 batadv_tt_entries(u16 tt_len)
 }
 
 /**
- * batadv_tt_local_table_transmit_size - calculates the local translation table
- *  size when transmitted over the air
+ * batadv_tt_local_table_transmit_size() - calculates the local translation
+ *  table size when transmitted over the air
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: local translation table size in bytes.
@@ -625,7 +627,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_add - add a new client to the local table or update an
+ * batadv_tt_local_add() - add a new client to the local table or update an
  *  existing client
  * @soft_iface: netdev struct of the mesh interface
  * @addr: the mac address of the client to add
@@ -830,7 +832,7 @@ out:
 }
 
 /**
- * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
  *  within a TT Response directed to another node
  * @orig_node: originator for which the TT data has to be prepared
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
@@ -903,8 +905,8 @@ out:
 }
 
 /**
- * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
- *  node
+ * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
+ *  this node
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
  * @tt_change: uninitialised pointer to the address of the area where the TT
@@ -977,8 +979,8 @@ out:
 }
 
 /**
- * batadv_tt_tvlv_container_update - update the translation table tvlv container
- *  after local tt changes have been committed
+ * batadv_tt_tvlv_container_update() - update the translation table tvlv
+ *  container after local tt changes have been committed
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -1053,6 +1055,14 @@ container_register:
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_tt_local_seq_print_text() - Print the local tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1123,7 +1133,7 @@ out:
 #endif
 
 /**
- * batadv_tt_local_dump_entry - Dump one TT local entry into a message
+ * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
  * @msg :Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1179,7 +1189,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1216,7 +1226,7 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump - Dump TT local entries into a message
+ * batadv_tt_local_dump() - Dump TT local entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -1300,7 +1310,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_remove - logically remove an entry from the local table
+ * batadv_tt_local_remove() - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
  * @vid: VLAN identifier
@@ -1362,7 +1372,7 @@ out:
 }
 
 /**
- * batadv_tt_local_purge_list - purge inactive tt local entries
+ * batadv_tt_local_purge_list() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @head: pointer to the list containing the local tt entries
  * @timeout: parameter deciding whether a given tt local entry is considered
@@ -1397,7 +1407,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_purge - purge inactive tt local entries
+ * batadv_tt_local_purge() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @timeout: parameter deciding whether a given tt local entry is considered
  *  inactive or not
@@ -1490,7 +1500,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
  * @entry: the TT global entry where the orig_list_entry has to be
  *  extracted from
  * @orig_node: the originator for which the orig_list_entry has to be found
@@ -1524,8 +1534,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
- *  by a given originator
+ * batadv_tt_global_entry_has_orig() - check if a TT global entry is also
+ *  handled by a given originator
  * @entry: the TT global entry to check
  * @orig_node: the originator to search in the list
  *
@@ -1550,7 +1560,7 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_sync_flags - update TT sync flags
+ * batadv_tt_global_sync_flags() - update TT sync flags
  * @tt_global: the TT global entry to update sync flags in
  *
  * Updates the sync flag bits in the tt_global flag attribute with a logical
@@ -1574,7 +1584,7 @@ batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
 }
 
 /**
- * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * batadv_tt_global_orig_entry_add() - add or update a TT orig entry
  * @tt_global: the TT global entry to add an orig entry in
  * @orig_node: the originator to add an orig entry for
  * @ttvn: translation table version number of this changeset
@@ -1624,7 +1634,7 @@ out:
 }
 
 /**
- * batadv_tt_global_add - add a new TT global entry or update an existing one
+ * batadv_tt_global_add() - add a new TT global entry or update an existing one
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
@@ -1796,7 +1806,7 @@ out:
 }
 
 /**
- * batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * batadv_transtable_best_orig() - Get best originator list entry from tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
@@ -1842,8 +1852,8 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_tt_global_print_entry - print all orig nodes who announce the address
- *  for this global entry
+ * batadv_tt_global_print_entry() - print all orig nodes who announce the
+ *  address for this global entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
@@ -1925,6 +1935,13 @@ print_list:
 	}
 }
 
+/**
+ * batadv_tt_global_seq_print_text() - Print the global tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1967,7 +1984,7 @@ out:
 #endif
 
 /**
- * batadv_tt_global_dump_subentry - Dump all TT local entries into a message
+ * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2028,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_entry - Dump one TT global entry into a message
+ * batadv_tt_global_dump_entry() - Dump one TT global entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2073,7 +2090,7 @@ batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2112,7 +2129,7 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump -  Dump TT global entries into a message
+ * batadv_tt_global_dump() -  Dump TT global entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -2180,7 +2197,7 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
  * @tt_global_entry: the global entry to remove the orig_entry from
  * @orig_entry: the orig entry to remove and free
  *
@@ -2222,7 +2239,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_del_orig_node - remove orig_node from a global tt entry
+ * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: the global entry to remove the orig_node from
  * @orig_node: the originator announcing the client
@@ -2301,7 +2318,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_del - remove a client from the global table
+ * batadv_tt_global_del() - remove a client from the global table
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: an originator serving this client
  * @addr: the mac address of the client
@@ -2367,8 +2384,8 @@ out:
 }
 
 /**
- * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
- *  given originator matching the provided vid
+ * batadv_tt_global_del_orig() - remove all the TT global entries belonging to
+ *  the given originator matching the provided vid
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator owning the entries to remove
  * @match_vid: the VLAN identifier to match. If negative all the entries will be
@@ -2539,7 +2556,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
 }
 
 /**
- * batadv_transtable_search - get the mesh destination for a given client
+ * batadv_transtable_search() - get the mesh destination for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of the source client
  * @addr: mac address of the destination client
@@ -2599,7 +2616,7 @@ out:
 }
 
 /**
- * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ * batadv_tt_global_crc() - calculates the checksum of the local table belonging
  *  to the given orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator for which the CRC should be computed
@@ -2694,7 +2711,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_crc - calculates the checksum of the local table
+ * batadv_tt_local_crc() - calculates the checksum of the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: VLAN identifier for which the CRC32 has to be computed
  *
@@ -2751,7 +2768,7 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_req_node_release - free tt_req node entry
+ * batadv_tt_req_node_release() - free tt_req node entry
  * @ref: kref pointer of the tt req_node entry
  */
 static void batadv_tt_req_node_release(struct kref *ref)
@@ -2764,7 +2781,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
  *  possibly release it
  * @tt_req_node: tt_req_node to be free'd
  */
@@ -2826,7 +2843,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * batadv_tt_req_node_new() - search and possibly create a tt_req_node object
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node this request is being issued for
  *
@@ -2863,7 +2880,7 @@ unlock:
 }
 
 /**
- * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * batadv_tt_local_valid() - verify that given tt entry is a valid one
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
@@ -2897,7 +2914,7 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
 }
 
 /**
- * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
  *  specified tt hash
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the tt entries
@@ -2948,7 +2965,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * batadv_tt_global_check_crc() - check if all the CRCs are correct
  * @orig_node: originator for which the CRCs have to be checked
  * @tt_vlan: pointer to the first tvlv VLAN entry
  * @num_vlan: number of tvlv VLAN entries
@@ -3005,7 +3022,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_local_update_crc - update all the local CRCs
+ * batadv_tt_local_update_crc() - update all the local CRCs
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
@@ -3021,7 +3038,7 @@ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node for which the CRCs have to be updated
  */
@@ -3048,7 +3065,7 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_tt_request - send a TT Request message to a given node
+ * batadv_send_tt_request() - send a TT Request message to a given node
  * @bat_priv: the bat priv with all the soft interface information
  * @dst_orig_node: the destination of the message
  * @ttvn: the version number that the source of the message is looking for
@@ -3137,7 +3154,7 @@ out:
 }
 
 /**
- * batadv_send_other_tt_response - send reply to tt request concerning another
+ * batadv_send_other_tt_response() - send reply to tt request concerning another
  *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
@@ -3270,8 +3287,8 @@ out:
 }
 
 /**
- * batadv_send_my_tt_response - send reply to tt request concerning this node's
- *  translation table
+ * batadv_send_my_tt_response() - send reply to tt request concerning this
+ *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3388,7 +3405,7 @@ out:
 }
 
 /**
- * batadv_send_tt_response - send reply to tt request
+ * batadv_send_tt_response() - send reply to tt request
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3484,7 +3501,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_is_my_client - check if a client is served by the local node
+ * batadv_is_my_client() - check if a client is served by the local node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -3514,7 +3531,7 @@ out:
 }
 
 /**
- * batadv_handle_tt_response - process incoming tt reply
+ * batadv_handle_tt_response() - process incoming tt reply
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @resp_src: mac address of tt reply sender
@@ -3607,7 +3624,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * batadv_tt_check_roam_count() - check if a client has roamed too frequently
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  *
@@ -3662,7 +3679,7 @@ unlock:
 }
 
 /**
- * batadv_send_roam_adv - send a roaming advertisement message
+ * batadv_send_roam_adv() - send a roaming advertisement message
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  * @vid: VLAN identifier
@@ -3727,6 +3744,10 @@ static void batadv_tt_purge(struct work_struct *work)
 			   msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
 }
 
+/**
+ * batadv_tt_free() - Free translation table of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
 	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
@@ -3744,7 +3765,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ * batadv_tt_local_set_flags() - set or unset the specified flags on the local
  *  table and possibly count them in the TT size
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: the flag to switch
@@ -3830,7 +3851,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
  *  which have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -3863,7 +3884,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ * batadv_tt_local_commit_changes() - commit all pending local tt changes which
  *  have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -3874,6 +3895,15 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
 	spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
+/**
+ * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of packet
+ * @dst: destination mac address of packet
+ * @vid: vlan id of packet
+ *
+ * Return: true when src+dst(+vid) pair should be isolated, false otherwise
+ */
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
 			   unsigned short vid)
 {
@@ -3909,7 +3939,7 @@ vlan_put:
 }
 
 /**
- * batadv_tt_update_orig - update global translation table with new tt
+ * batadv_tt_update_orig() - update global translation table with new tt
  *  information received via ogms
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node of the ogm
@@ -3994,7 +4024,7 @@ request_table:
 }
 
 /**
- * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -4020,7 +4050,7 @@ out:
 }
 
 /**
- * batadv_tt_local_client_is_roaming - tells whether the client is roaming
+ * batadv_tt_local_client_is_roaming() - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the local client to query
  * @vid: VLAN identifier
@@ -4045,6 +4075,15 @@ out:
 	return ret;
 }
 
+/**
+ * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which the temporary entry should be associated with
+ * @addr: mac address of the client
+ * @vid: VLAN id of the new temporary global translation table
+ *
+ * Return: true when temporary tt entry could be added, false otherwise
+ */
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
 					  const unsigned char *addr,
@@ -4069,7 +4108,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
  *  maximum packet size that can be transported through the mesh
  * @soft_iface: netdev struct of the mesh interface
  *
@@ -4110,7 +4149,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
 }
 
 /**
- * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -4149,7 +4188,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
  *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
@@ -4231,7 +4270,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
+ *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
  * @dst: mac address of tt tvlv recipient
@@ -4281,7 +4321,7 @@ out:
 }
 
 /**
- * batadv_tt_init - initialise the translation table internals
+ * batadv_tt_init() - initialise the translation table internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure.
@@ -4317,7 +4357,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_is_isolated - check if a client is marked as isolated
+ * batadv_tt_global_is_isolated() - check if a client is marked as isolated
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client
  * @vid: the identifier of the VLAN where this client is connected
@@ -4343,7 +4383,7 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_cache_init - Initialize tt memory object cache
+ * batadv_tt_cache_init() - Initialize tt memory object cache
  *
  * Return: 0 on success or negative error number in case of failure.
  */
@@ -4412,7 +4452,7 @@ err_tt_tl_destroy:
 }
 
 /**
- * batadv_tt_cache_destroy - Destroy tt memory object cache
+ * batadv_tt_cache_destroy() - Destroy tt memory object cache
  */
 void batadv_tt_cache_destroy(void)
 {
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 411d586191da..8d9e3abec2c8 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 1d9e267caec9..5ffcb45ac6ff 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -19,7 +20,7 @@
 
 #include <linux/byteorder/generic.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
@@ -35,14 +36,14 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "tvlv.h"
 
 /**
- * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
+ * batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the tvlv
  */
@@ -55,7 +56,7 @@ static void batadv_tvlv_handler_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv_handler: the tvlv handler to free
  */
@@ -65,7 +66,7 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
 }
 
 /**
- * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
  *  based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to look for
@@ -99,7 +100,7 @@ batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_release - release tvlv from lists and free
+ * batadv_tvlv_container_release() - release tvlv from lists and free
  * @ref: kref pointer of the tvlv
  */
 static void batadv_tvlv_container_release(struct kref *ref)
@@ -111,7 +112,7 @@ static void batadv_tvlv_container_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_container_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_container_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv: the tvlv container to free
  */
@@ -121,7 +122,7 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
 }
 
 /**
- * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
  *  list based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to look for
@@ -155,7 +156,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ * batadv_tvlv_container_list_size() - calculate the size of the tvlv container
  *  list entries
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -180,8 +181,8 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
- *  list
+ * batadv_tvlv_container_remove() - remove tvlv container from the tvlv
+ *  container list
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv: the to be removed tvlv container
  *
@@ -204,7 +205,7 @@ static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ * batadv_tvlv_container_unregister() - unregister tvlv container based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to unregister
@@ -222,7 +223,7 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_register - register tvlv type, version and content
+ * batadv_tvlv_container_register() - register tvlv type, version and content
  *  to be propagated with each (primary interface) OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type
@@ -267,7 +268,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
+ * batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
  *  requested packet size
  * @packet_buff: packet buffer
  * @packet_buff_len: packet buffer size
@@ -300,7 +301,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
 }
 
 /**
- * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ * batadv_tvlv_container_ogm_append() - append tvlv container content to given
  *  OGM packet buffer
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: ogm packet buffer
@@ -353,7 +354,7 @@ end:
 }
 
 /**
- * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv_handler: tvlv callback function handling the tvlv content
@@ -407,7 +408,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
@@ -474,7 +475,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
  *  handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @batadv_ogm_packet: ogm packet containing the tvlv containers
@@ -501,7 +502,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ * batadv_tvlv_handler_register() - register tvlv handler based on the provided
  *  type and version (both need to match) for ogm tvlv payload and/or unicast
  *  payload
  * @bat_priv: the bat priv with all the soft interface information
@@ -556,7 +557,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to be unregistered
@@ -579,7 +580,7 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
  *  specified host
  * @bat_priv: the bat priv with all the soft interface information
  * @src: source mac address of the unicast packet
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index 4d01400ada30..a74df33f446d 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index a62795868794..bb1578410e0c 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -34,10 +35,9 @@
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
-#include "packet.h"
-
 struct seq_file;
 
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -54,13 +54,15 @@ struct seq_file;
 
 /**
  * enum batadv_dhcp_recipient - dhcp destination
- * @BATADV_DHCP_NO: packet is not a dhcp message
- * @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server
- * @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client
  */
 enum batadv_dhcp_recipient {
+	/** @BATADV_DHCP_NO: packet is not a dhcp message */
 	BATADV_DHCP_NO = 0,
+
+	/** @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server */
 	BATADV_DHCP_TO_SERVER,
+
+	/** @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client */
 	BATADV_DHCP_TO_CLIENT,
 };
 
@@ -78,196 +80,274 @@ enum batadv_dhcp_recipient {
 
 /**
  * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
  */
 struct batadv_hard_iface_bat_iv {
+	/** @ogm_buff: buffer holding the OGM packet */
 	unsigned char *ogm_buff;
+
+	/** @ogm_buff_len: length of the OGM packet buffer */
 	int ogm_buff_len;
+
+	/** @ogm_seqno: OGM sequence number - used to identify each OGM */
 	atomic_t ogm_seqno;
 };
 
 /**
  * enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
- * @BATADV_FULL_DUPLEX: tells if the connection over this link is full-duplex
- * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that no
- *  throughput data is available for this interface and that default values are
- *  assumed.
  */
 enum batadv_v_hard_iface_flags {
+	/**
+	 * @BATADV_FULL_DUPLEX: tells if the connection over this link is
+	 *  full-duplex
+	 */
 	BATADV_FULL_DUPLEX	= BIT(0),
+
+	/**
+	 * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that
+	 *  no throughput data is available for this interface and that default
+	 *  values are assumed.
+	 */
 	BATADV_WARNING_DEFAULT	= BIT(1),
 };
 
 /**
  * struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
- * @elp_interval: time interval between two ELP transmissions
- * @elp_seqno: current ELP sequence number
- * @elp_skb: base skb containing the ELP message to send
- * @elp_wq: workqueue used to schedule ELP transmissions
- * @throughput_override: throughput override to disable link auto-detection
- * @flags: interface specific flags
  */
 struct batadv_hard_iface_bat_v {
+	/** @elp_interval: time interval between two ELP transmissions */
 	atomic_t elp_interval;
+
+	/** @elp_seqno: current ELP sequence number */
 	atomic_t elp_seqno;
+
+	/** @elp_skb: base skb containing the ELP message to send */
 	struct sk_buff *elp_skb;
+
+	/** @elp_wq: workqueue used to schedule ELP transmissions */
 	struct delayed_work elp_wq;
+
+	/**
+	 * @throughput_override: throughput override to disable link
+	 *  auto-detection
+	 */
 	atomic_t throughput_override;
+
+	/** @flags: interface specific flags */
 	u8 flags;
 };
 
 /**
  * enum batadv_hard_iface_wifi_flags - Flags describing the wifi configuration
  *  of a batadv_hard_iface
- * @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device
- * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi device
  */
 enum batadv_hard_iface_wifi_flags {
+	/** @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device */
 	BATADV_HARDIF_WIFI_WEXT_DIRECT = BIT(0),
+
+	/** @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device */
 	BATADV_HARDIF_WIFI_CFG80211_DIRECT = BIT(1),
+
+	/**
+	 * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
+	 */
 	BATADV_HARDIF_WIFI_WEXT_INDIRECT = BIT(2),
+
+	/**
+	 * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi
+	 * device
+	 */
 	BATADV_HARDIF_WIFI_CFG80211_INDIRECT = BIT(3),
 };
 
 /**
  * struct batadv_hard_iface - network device known to batman-adv
- * @list: list node for batadv_hardif_list
- * @if_num: identificator of the interface
- * @if_status: status of the interface for batman-adv
- * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
- * @wifi_flags: flags whether this is (directly or indirectly) a wifi interface
- * @net_dev: pointer to the net_device
- * @hardif_obj: kobject of the per interface sysfs "mesh" directory
- * @refcount: number of contexts the object is used
- * @batman_adv_ptype: packet type describing packets that should be processed by
- *  batman-adv for this interface
- * @soft_iface: the batman-adv interface which uses this network interface
- * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
- * @bat_v: per hard-interface B.A.T.M.A.N. V data
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @neigh_list: list of unique single hop neighbors via this interface
- * @neigh_list_lock: lock protecting neigh_list
  */
 struct batadv_hard_iface {
+	/** @list: list node for batadv_hardif_list */
 	struct list_head list;
+
+	/** @if_num: identificator of the interface */
 	s16 if_num;
+
+	/** @if_status: status of the interface for batman-adv */
 	char if_status;
+
+	/**
+	 * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
+	 */
 	u8 num_bcasts;
+
+	/**
+	 * @wifi_flags: flags whether this is (directly or indirectly) a wifi
+	 *  interface
+	 */
 	u32 wifi_flags;
+
+	/** @net_dev: pointer to the net_device */
 	struct net_device *net_dev;
+
+	/** @hardif_obj: kobject of the per interface sysfs "mesh" directory */
 	struct kobject *hardif_obj;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/**
+	 * @batman_adv_ptype: packet type describing packets that should be
+	 * processed by batman-adv for this interface
+	 */
 	struct packet_type batman_adv_ptype;
+
+	/**
+	 * @soft_iface: the batman-adv interface which uses this network
+	 *  interface
+	 */
 	struct net_device *soft_iface;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
 	struct batadv_hard_iface_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: per hard-interface B.A.T.M.A.N. V data */
 	struct batadv_hard_iface_bat_v bat_v;
 #endif
+
+	/**
+	 * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+	 */
 	struct dentry *debug_dir;
+
+	/**
+	 * @neigh_list: list of unique single hop neighbors via this interface
+	 */
 	struct hlist_head neigh_list;
-	/* neigh_list_lock protects: neigh_list */
+
+	/** @neigh_list_lock: lock protecting neigh_list */
 	spinlock_t neigh_list_lock;
 };
 
 /**
  * struct batadv_orig_ifinfo - originator info per outgoing interface
- * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @router: router that should be used to reach this originator
- * @last_real_seqno: last and best known sequence number
- * @last_ttl: ttl of last received packet
- * @last_seqno_forwarded: seqno of the OGM which was forwarded last
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_orig_ifinfo {
+	/** @list: list node for &batadv_orig_node.ifinfo_list */
 	struct hlist_node list;
+
+	/** @if_outgoing: pointer to outgoing hard-interface */
 	struct batadv_hard_iface *if_outgoing;
-	struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
+
+	/** @router: router that should be used to reach this originator */
+	struct batadv_neigh_node __rcu *router;
+
+	/** @last_real_seqno: last and best known sequence number */
 	u32 last_real_seqno;
+
+	/** @last_ttl: ttl of last received packet */
 	u8 last_ttl;
+
+	/** @last_seqno_forwarded: seqno of the OGM which was forwarded last */
 	u32 last_seqno_forwarded;
+
+	/** @batman_seqno_reset: time when the batman seqno window was reset */
 	unsigned long batman_seqno_reset;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_frag_table_entry - head in the fragment buffer table
- * @fragment_list: head of list with fragments
- * @lock: lock to protect the list of fragments
- * @timestamp: time (jiffie) of last received fragment
- * @seqno: sequence number of the fragments in the list
- * @size: accumulated size of packets in list
- * @total_size: expected size of the assembled packet
  */
 struct batadv_frag_table_entry {
+	/** @fragment_list: head of list with fragments */
 	struct hlist_head fragment_list;
-	spinlock_t lock; /* protects fragment_list */
+
+	/** @lock: lock to protect the list of fragments */
+	spinlock_t lock;
+
+	/** @timestamp: time (jiffie) of last received fragment */
 	unsigned long timestamp;
+
+	/** @seqno: sequence number of the fragments in the list */
 	u16 seqno;
+
+	/** @size: accumulated size of packets in list */
 	u16 size;
+
+	/** @total_size: expected size of the assembled packet */
 	u16 total_size;
 };
 
 /**
  * struct batadv_frag_list_entry - entry in a list of fragments
- * @list: list node information
- * @skb: fragment
- * @no: fragment number in the set
  */
 struct batadv_frag_list_entry {
+	/** @list: list node information */
 	struct hlist_node list;
+
+	/** @skb: fragment */
 	struct sk_buff *skb;
+
+	/** @no: fragment number in the set */
 	u8 no;
 };
 
 /**
  * struct batadv_vlan_tt - VLAN specific TT attributes
- * @crc: CRC32 checksum of the entries belonging to this vlan
- * @num_entries: number of TT entries for this VLAN
  */
 struct batadv_vlan_tt {
+	/** @crc: CRC32 checksum of the entries belonging to this vlan */
 	u32 crc;
+
+	/** @num_entries: number of TT entries for this VLAN */
 	atomic_t num_entries;
 };
 
 /**
  * struct batadv_orig_node_vlan - VLAN specific data per orig_node
- * @vid: the VLAN identifier
- * @tt: VLAN specific TT attributes
- * @list: list node for orig_node::vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_orig_node_vlan {
+	/** @vid: the VLAN identifier */
 	unsigned short vid;
+
+	/** @tt: VLAN specific TT attributes */
 	struct batadv_vlan_tt tt;
+
+	/** @list: list node for &batadv_orig_node.vlan_list */
 	struct hlist_node list;
+
+	/**
+	 * @refcount: number of context where this object is currently in use
+	 */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard-interface) where each one counts
- * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
- * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
- * @bcast_own_sum: sum of bcast_own
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
  */
 struct batadv_orig_bat_iv {
+	/**
+	 * @bcast_own: set of bitfields (one per hard-interface) where each one
+	 * counts the number of our OGMs this orig_node rebroadcasted "back" to
+	 * us  (relative to last_real_seqno). Every bitfield is
+	 * BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+	 */
 	unsigned long *bcast_own;
+
+	/** @bcast_own_sum: sum of bcast_own */
 	u8 *bcast_own_sum;
-	/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+
+	/**
+	 * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
 	 * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
 	 */
 	spinlock_t ogm_cnt_lock;
@@ -275,130 +355,205 @@ struct batadv_orig_bat_iv {
 
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
- * @orig: originator ethernet address
- * @ifinfo_list: list for routers per outgoing interface
- * @last_bonding_candidate: pointer to last ifinfo of last used router
- * @dat_addr: address of the orig node in the distributed hash
- * @last_seen: time when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
- * @mcast_flags: multicast flags announced by the orig node
- * @mcast_want_all_unsnoopables_node: a list node for the
- *  mcast.want_all_unsnoopables list
- * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
- * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
- * @capabilities: announced capabilities of this originator
- * @capa_initialized: bitfield to remember whether a capability was initialized
- * @last_ttvn: last seen translation table version number
- * @tt_buff: last tt changeset this node received from the orig node
- * @tt_buff_len: length of the last tt changeset this node received from the
- *  orig node
- * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_lock: prevents from updating the table while reading it. Table update is
- *  made up by two operations (data structure update and metdata -CRC/TTVN-
- *  recalculation) and they have to be executed atomically in order to avoid
- *  another thread to read the table/metadata between those.
- * @bcast_bits: bitfield containing the info which payload broadcast originated
- *  from this orig node this host already has seen (relative to
- *  last_bcast_seqno)
- * @last_bcast_seqno: last broadcast sequence number received by this host
- * @neigh_list: list of potential next hop neighbor towards this orig node
- * @neigh_list_lock: lock protecting neigh_list and router
- * @hash_entry: hlist node for batadv_priv::orig_hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
- * @in_coding_list: list of nodes this orig can hear
- * @out_coding_list: list of nodes that can hear this orig
- * @in_coding_list_lock: protects in_coding_list
- * @out_coding_list_lock: protects out_coding_list
- * @fragments: array with heads for fragment chains
- * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
- *  originator represented by this object
- * @vlan_list_lock: lock protecting vlan_list
- * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
+	/** @orig: originator ethernet address */
 	u8 orig[ETH_ALEN];
+
+	/** @ifinfo_list: list for routers per outgoing interface */
 	struct hlist_head ifinfo_list;
+
+	/**
+	 * @last_bonding_candidate: pointer to last ifinfo of last used router
+	 */
 	struct batadv_orig_ifinfo *last_bonding_candidate;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/** @dat_addr: address of the orig node in the distributed hash */
 	batadv_dat_addr_t dat_addr;
 #endif
+
+	/** @last_seen: time when last packet from this node was received */
 	unsigned long last_seen;
+
+	/**
+	 * @bcast_seqno_reset: time when the broadcast seqno window was reset
+	 */
 	unsigned long bcast_seqno_reset;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
-	/* synchronizes mcast tvlv specific orig changes */
+	/**
+	 * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+	 */
 	spinlock_t mcast_handler_lock;
+
+	/** @mcast_flags: multicast flags announced by the orig node */
 	u8 mcast_flags;
+
+	/**
+	 * @mcast_want_all_unsnoopables_node: a list node for the
+	 *  mcast.want_all_unsnoopables list
+	 */
 	struct hlist_node mcast_want_all_unsnoopables_node;
+
+	/**
+	 * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4
+	 *  list
+	 */
 	struct hlist_node mcast_want_all_ipv4_node;
+	/**
+	 * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6
+	 *  list
+	 */
 	struct hlist_node mcast_want_all_ipv6_node;
 #endif
+
+	/** @capabilities: announced capabilities of this originator */
 	unsigned long capabilities;
+
+	/**
+	 * @capa_initialized: bitfield to remember whether a capability was
+	 *  initialized
+	 */
 	unsigned long capa_initialized;
+
+	/** @last_ttvn: last seen translation table version number */
 	atomic_t last_ttvn;
+
+	/** @tt_buff: last tt changeset this node received from the orig node */
 	unsigned char *tt_buff;
+
+	/**
+	 * @tt_buff_len: length of the last tt changeset this node received
+	 *  from the orig node
+	 */
 	s16 tt_buff_len;
-	spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-	/* prevents from changing the table while reading it */
+
+	/** @tt_buff_lock: lock that protects tt_buff and tt_buff_len */
+	spinlock_t tt_buff_lock;
+
+	/**
+	 * @tt_lock: prevents from updating the table while reading it. Table
+	 *  update is made up by two operations (data structure update and
+	 *  metdata -CRC/TTVN-recalculation) and they have to be executed
+	 *  atomically in order to avoid another thread to read the
+	 *  table/metadata between those.
+	 */
 	spinlock_t tt_lock;
+
+	/**
+	 * @bcast_bits: bitfield containing the info which payload broadcast
+	 *  originated from this orig node this host already has seen (relative
+	 *  to last_bcast_seqno)
+	 */
 	DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+	/**
+	 * @last_bcast_seqno: last broadcast sequence number received by this
+	 *  host
+	 */
 	u32 last_bcast_seqno;
+
+	/**
+	 * @neigh_list: list of potential next hop neighbor towards this orig
+	 *  node
+	 */
 	struct hlist_head neigh_list;
-	/* neigh_list_lock protects: neigh_list, ifinfo_list,
-	 * last_bonding_candidate and router
+
+	/**
+	 * @neigh_list_lock: lock protecting neigh_list, ifinfo_list,
+	 *  last_bonding_candidate and router
 	 */
 	spinlock_t neigh_list_lock;
+
+	/** @hash_entry: hlist node for &batadv_priv.orig_hash */
 	struct hlist_node hash_entry;
+
+	/** @bat_priv: pointer to soft_iface this orig node belongs to */
 	struct batadv_priv *bat_priv;
-	/* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
+
+	/** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */
 	spinlock_t bcast_seqno_lock;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
 #ifdef CONFIG_BATMAN_ADV_NC
+	/** @in_coding_list: list of nodes this orig can hear */
 	struct list_head in_coding_list;
+
+	/** @out_coding_list: list of nodes that can hear this orig */
 	struct list_head out_coding_list;
-	spinlock_t in_coding_list_lock; /* Protects in_coding_list */
-	spinlock_t out_coding_list_lock; /* Protects out_coding_list */
+
+	/** @in_coding_list_lock: protects in_coding_list */
+	spinlock_t in_coding_list_lock;
+
+	/** @out_coding_list_lock: protects out_coding_list */
+	spinlock_t out_coding_list_lock;
 #endif
+
+	/** @fragments: array with heads for fragment chains */
 	struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+
+	/**
+	 * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by
+	 *  the originator represented by this object
+	 */
 	struct hlist_head vlan_list;
-	spinlock_t vlan_list_lock; /* protects vlan_list */
+
+	/** @vlan_list_lock: lock protecting vlan_list */
+	spinlock_t vlan_list_lock;
+
+	/** @bat_iv: B.A.T.M.A.N. IV private structure */
 	struct batadv_orig_bat_iv bat_iv;
 };
 
 /**
  * enum batadv_orig_capabilities - orig node capabilities
- * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
- * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
- * @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability
- * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
- *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
+	/**
+	 * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table
+	 *  enabled
+	 */
 	BATADV_ORIG_CAPA_HAS_DAT,
+
+	/** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
 	BATADV_ORIG_CAPA_HAS_NC,
+
+	/** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
 	BATADV_ORIG_CAPA_HAS_TT,
+
+	/**
+	 * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+	 *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+	 */
 	BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
- * @list: list node for batadv_priv_gw::list
- * @orig_node: pointer to corresponding orig node
- * @bandwidth_down: advertised uplink download bandwidth
- * @bandwidth_up: advertised uplink upload bandwidth
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_gw_node {
+	/** @list: list node for &batadv_priv_gw.list */
 	struct hlist_node list;
+
+	/** @orig_node: pointer to corresponding orig node */
 	struct batadv_orig_node *orig_node;
+
+	/** @bandwidth_down: advertised uplink download bandwidth */
 	u32 bandwidth_down;
+
+	/** @bandwidth_up: advertised uplink upload bandwidth */
 	u32 bandwidth_up;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
@@ -407,118 +562,161 @@ DECLARE_EWMA(throughput, 10, 8)
 /**
  * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
  *  information
- * @throughput: ewma link throughput towards this neighbor
- * @elp_interval: time interval between two ELP transmissions
- * @elp_latest_seqno: latest and best known ELP sequence number
- * @last_unicast_tx: when the last unicast packet has been sent to this neighbor
- * @metric_work: work queue callback item for metric update
  */
 struct batadv_hardif_neigh_node_bat_v {
+	/** @throughput: ewma link throughput towards this neighbor */
 	struct ewma_throughput throughput;
+
+	/** @elp_interval: time interval between two ELP transmissions */
 	u32 elp_interval;
+
+	/** @elp_latest_seqno: latest and best known ELP sequence number */
 	u32 elp_latest_seqno;
+
+	/**
+	 * @last_unicast_tx: when the last unicast packet has been sent to this
+	 *  neighbor
+	 */
 	unsigned long last_unicast_tx;
+
+	/** @metric_work: work queue callback item for metric update */
 	struct work_struct metric_work;
 };
 
 /**
  * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
- * @list: list node for batadv_hard_iface::neigh_list
- * @addr: the MAC address of the neighboring interface
- * @orig: the address of the originator this neighbor node belongs to
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @bat_v: B.A.T.M.A.N. V private data
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_hardif_neigh_node {
+	/** @list: list node for &batadv_hard_iface.neigh_list */
 	struct hlist_node list;
+
+	/** @addr: the MAC address of the neighboring interface */
 	u8 addr[ETH_ALEN];
+
+	/**
+	 * @orig: the address of the originator this neighbor node belongs to
+	 */
 	u8 orig[ETH_ALEN];
+
+	/** @if_incoming: pointer to incoming hard-interface */
 	struct batadv_hard_iface *if_incoming;
+
+	/** @last_seen: when last packet via this neighbor was received */
 	unsigned long last_seen;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: B.A.T.M.A.N. V private data */
 	struct batadv_hardif_neigh_node_bat_v bat_v;
 #endif
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_node - structure for single hops neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @orig_node: pointer to corresponding orig_node
- * @addr: the MAC address of the neighboring interface
- * @ifinfo_list: list for routing metrics per outgoing interface
- * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @hardif_neigh: hardif_neigh of this neighbor
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_neigh_node {
+	/** @list: list node for &batadv_orig_node.neigh_list */
 	struct hlist_node list;
+
+	/** @orig_node: pointer to corresponding orig_node */
 	struct batadv_orig_node *orig_node;
+
+	/** @addr: the MAC address of the neighboring interface */
 	u8 addr[ETH_ALEN];
+
+	/** @ifinfo_list: list for routing metrics per outgoing interface */
 	struct hlist_head ifinfo_list;
-	spinlock_t ifinfo_lock;	/* protects ifinfo_list and its members */
+
+	/** @ifinfo_lock: lock protecting ifinfo_list and its members */
+	spinlock_t ifinfo_lock;
+
+	/** @if_incoming: pointer to incoming hard-interface */
 	struct batadv_hard_iface *if_incoming;
+
+	/** @last_seen: when last packet via this neighbor was received */
 	unsigned long last_seen;
+
+	/** @hardif_neigh: hardif_neigh of this neighbor */
 	struct batadv_hardif_neigh_node *hardif_neigh;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. IV
- * @tq_recv: ring buffer of received TQ values from this neigh node
- * @tq_index: ring buffer index
- * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @real_bits: bitfield containing the number of OGMs received from this neigh
- *  node (relative to orig_node->last_real_seqno)
- * @real_packet_count: counted result of real_bits
  */
 struct batadv_neigh_ifinfo_bat_iv {
+	/** @tq_recv: ring buffer of received TQ values from this neigh node */
 	u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+
+	/** @tq_index: ring buffer index */
 	u8 tq_index;
+
+	/**
+	 * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
+	 */
 	u8 tq_avg;
+
+	/**
+	 * @real_bits: bitfield containing the number of OGMs received from this
+	 *  neigh node (relative to orig_node->last_real_seqno)
+	 */
 	DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+	/** @real_packet_count: counted result of real_bits */
 	u8 real_packet_count;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. V
- * @throughput: last throughput metric received from originator via this neigh
- * @last_seqno: last sequence number known for this neighbor
  */
 struct batadv_neigh_ifinfo_bat_v {
+	/**
+	 * @throughput: last throughput metric received from originator via this
+	 *  neigh
+	 */
 	u32 throughput;
+
+	/** @last_seqno: last sequence number known for this neighbor */
 	u32 last_seqno;
 };
 
 /**
  * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
- * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @bat_iv: B.A.T.M.A.N. IV private structure
- * @bat_v: B.A.T.M.A.N. V private data
- * @last_ttl: last received ttl from this neigh node
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_neigh_ifinfo {
+	/** @list: list node for &batadv_neigh_node.ifinfo_list */
 	struct hlist_node list;
+
+	/** @if_outgoing: pointer to outgoing hard-interface */
 	struct batadv_hard_iface *if_outgoing;
+
+	/** @bat_iv: B.A.T.M.A.N. IV private structure */
 	struct batadv_neigh_ifinfo_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: B.A.T.M.A.N. V private data */
 	struct batadv_neigh_ifinfo_bat_v bat_v;
 #endif
+
+	/** @last_ttl: last received ttl from this neigh node */
 	u8 last_ttl;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
@@ -526,148 +724,278 @@ struct batadv_neigh_ifinfo {
 
 /**
  * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
- * @orig: mac address of orig node orginating the broadcast
- * @crc: crc32 checksum of broadcast payload
- * @entrytime: time when the broadcast packet was received
  */
 struct batadv_bcast_duplist_entry {
+	/** @orig: mac address of orig node orginating the broadcast */
 	u8 orig[ETH_ALEN];
+
+	/** @crc: crc32 checksum of broadcast payload */
 	__be32 crc;
+
+	/** @entrytime: time when the broadcast packet was received */
 	unsigned long entrytime;
 };
 #endif
 
 /**
  * enum batadv_counters - indices for traffic counters
- * @BATADV_CNT_TX: transmitted payload traffic packet counter
- * @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter
- * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet counter
- * @BATADV_CNT_RX: received payload traffic packet counter
- * @BATADV_CNT_RX_BYTES: received payload traffic bytes counter
- * @BATADV_CNT_FORWARD: forwarded payload traffic packet counter
- * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
- * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
- * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
- * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
- * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
- * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
- * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
- * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
- * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
- * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
- * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
- * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
- * @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter
- * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
- * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
- * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
- *  counter
- * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
- * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes counter
- * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet counter
- * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes counter
- * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc decoding
- * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
- * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes counter
- * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic packet
- *  counter
- * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in promisc
- *  mode.
- * @BATADV_CNT_NUM: number of traffic counters
  */
 enum batadv_counters {
+	/** @BATADV_CNT_TX: transmitted payload traffic packet counter */
 	BATADV_CNT_TX,
+
+	/** @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter */
 	BATADV_CNT_TX_BYTES,
+
+	/**
+	 * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_TX_DROPPED,
+
+	/** @BATADV_CNT_RX: received payload traffic packet counter */
 	BATADV_CNT_RX,
+
+	/** @BATADV_CNT_RX_BYTES: received payload traffic bytes counter */
 	BATADV_CNT_RX_BYTES,
+
+	/** @BATADV_CNT_FORWARD: forwarded payload traffic packet counter */
 	BATADV_CNT_FORWARD,
+
+	/**
+	 * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
+	 */
 	BATADV_CNT_FORWARD_BYTES,
+
+	/**
+	 * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_MGMT_TX,
+
+	/**
+	 * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_MGMT_TX_BYTES,
+
+	/**
+	 * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
+	 */
 	BATADV_CNT_MGMT_RX,
+
+	/**
+	 * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_MGMT_RX_BYTES,
+
+	/** @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter */
 	BATADV_CNT_FRAG_TX,
+
+	/**
+	 * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+	 */
 	BATADV_CNT_FRAG_TX_BYTES,
+
+	/** @BATADV_CNT_FRAG_RX: received fragment traffic packet counter */
 	BATADV_CNT_FRAG_RX,
+
+	/**
+	 * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+	 */
 	BATADV_CNT_FRAG_RX_BYTES,
+
+	/** @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter */
 	BATADV_CNT_FRAG_FWD,
+
+	/**
+	 * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
+	 */
 	BATADV_CNT_FRAG_FWD_BYTES,
+
+	/**
+	 * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
+	 */
 	BATADV_CNT_TT_REQUEST_TX,
+
+	/** @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter */
 	BATADV_CNT_TT_REQUEST_RX,
+
+	/**
+	 * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_TT_RESPONSE_TX,
+
+	/**
+	 * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
+	 */
 	BATADV_CNT_TT_RESPONSE_RX,
+
+	/**
+	 * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_TT_ROAM_ADV_TX,
+
+	/**
+	 * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
+	 */
 	BATADV_CNT_TT_ROAM_ADV_RX,
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/**
+	 * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
+	 */
 	BATADV_CNT_DAT_GET_TX,
+
+	/** @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter */
 	BATADV_CNT_DAT_GET_RX,
+
+	/**
+	 * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
+	 */
 	BATADV_CNT_DAT_PUT_TX,
+
+	/** @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter */
 	BATADV_CNT_DAT_PUT_RX,
+
+	/**
+	 * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic
+	 *  packet counter
+	 */
 	BATADV_CNT_DAT_CACHED_REPLY_TX,
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+	/**
+	 * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
+	 */
 	BATADV_CNT_NC_CODE,
+
+	/**
+	 * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_NC_CODE_BYTES,
+
+	/**
+	 * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_NC_RECODE,
+
+	/**
+	 * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_NC_RECODE_BYTES,
+
+	/**
+	 * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
+	 *  decoding
+	 */
 	BATADV_CNT_NC_BUFFER,
+
+	/**
+	 * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
+	 */
 	BATADV_CNT_NC_DECODE,
+
+	/**
+	 * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_NC_DECODE_BYTES,
+
+	/**
+	 * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
+	 *  packet counter
+	 */
 	BATADV_CNT_NC_DECODE_FAILED,
+
+	/**
+	 * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
+	 *  promisc mode.
+	 */
 	BATADV_CNT_NC_SNIFFED,
 #endif
+
+	/** @BATADV_CNT_NUM: number of traffic counters */
 	BATADV_CNT_NUM,
 };
 
 /**
  * struct batadv_priv_tt - per mesh interface translation table data
- * @vn: translation table version number
- * @ogm_append_cnt: counter of number of OGMs containing the local tt diff
- * @local_changes: changes registered in an originator interval
- * @changes_list: tracks tt local changes within an originator interval
- * @local_hash: local translation table hash table
- * @global_hash: global translation table hash table
- * @req_list: list of pending & unanswered tt_requests
- * @roam_list: list of the last roaming events of each client limiting the
- *  number of roaming events to avoid route flapping
- * @changes_list_lock: lock protecting changes_list
- * @req_list_lock: lock protecting req_list
- * @roam_list_lock: lock protecting roam_list
- * @last_changeset: last tt changeset this host has generated
- * @last_changeset_len: length of last tt changeset this host has generated
- * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
- * @commit_lock: prevents from executing a local TT commit while reading the
- *  local table. The local TT commit is made up by two operations (data
- *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
- *  executed atomically in order to avoid another thread to read the
- *  table/metadata between those.
- * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
+	/** @vn: translation table version number */
 	atomic_t vn;
+
+	/**
+	 * @ogm_append_cnt: counter of number of OGMs containing the local tt
+	 *  diff
+	 */
 	atomic_t ogm_append_cnt;
+
+	/** @local_changes: changes registered in an originator interval */
 	atomic_t local_changes;
+
+	/**
+	 * @changes_list: tracks tt local changes within an originator interval
+	 */
 	struct list_head changes_list;
+
+	/** @local_hash: local translation table hash table */
 	struct batadv_hashtable *local_hash;
+
+	/** @global_hash: global translation table hash table */
 	struct batadv_hashtable *global_hash;
+
+	/** @req_list: list of pending & unanswered tt_requests */
 	struct hlist_head req_list;
+
+	/**
+	 * @roam_list: list of the last roaming events of each client limiting
+	 *  the number of roaming events to avoid route flapping
+	 */
 	struct list_head roam_list;
-	spinlock_t changes_list_lock; /* protects changes */
-	spinlock_t req_list_lock; /* protects req_list */
-	spinlock_t roam_list_lock; /* protects roam_list */
+
+	/** @changes_list_lock: lock protecting changes_list */
+	spinlock_t changes_list_lock;
+
+	/** @req_list_lock: lock protecting req_list */
+	spinlock_t req_list_lock;
+
+	/** @roam_list_lock: lock protecting roam_list */
+	spinlock_t roam_list_lock;
+
+	/** @last_changeset: last tt changeset this host has generated */
 	unsigned char *last_changeset;
+
+	/**
+	 * @last_changeset_len: length of last tt changeset this host has
+	 *  generated
+	 */
 	s16 last_changeset_len;
-	/* protects last_changeset & last_changeset_len */
+
+	/**
+	 * @last_changeset_lock: lock protecting last_changeset &
+	 *  last_changeset_len
+	 */
 	spinlock_t last_changeset_lock;
-	/* prevents from executing a commit while reading the table */
+
+	/**
+	 * @commit_lock: prevents from executing a local TT commit while reading
+	 *  the local table. The local TT commit is made up by two operations
+	 *  (data structure update and metdata -CRC/TTVN- recalculation) and
+	 *  they have to be executed atomically in order to avoid another thread
+	 *  to read the table/metadata between those.
+	 */
 	spinlock_t commit_lock;
+
+	/** @work: work queue callback item for translation table purging */
 	struct delayed_work work;
 };
 
@@ -675,31 +1003,57 @@ struct batadv_priv_tt {
 
 /**
  * struct batadv_priv_bla - per mesh interface bridge loope avoidance data
- * @num_requests: number of bla requests in flight
- * @claim_hash: hash table containing mesh nodes this host has claimed
- * @backbone_hash: hash table containing all detected backbone gateways
- * @loopdetect_addr: MAC address used for own loopdetection frames
- * @loopdetect_lasttime: time when the loopdetection frames were sent
- * @loopdetect_next: how many periods to wait for the next loopdetect process
- * @bcast_duplist: recently received broadcast packets array (for broadcast
- *  duplicate suppression)
- * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
- * @bcast_duplist_lock: lock protecting bcast_duplist & bcast_duplist_curr
- * @claim_dest: local claim data (e.g. claim group)
- * @work: work queue callback item for cleanups & bla announcements
  */
 struct batadv_priv_bla {
+	/** @num_requests: number of bla requests in flight */
 	atomic_t num_requests;
+
+	/**
+	 * @claim_hash: hash table containing mesh nodes this host has claimed
+	 */
 	struct batadv_hashtable *claim_hash;
+
+	/**
+	 * @backbone_hash: hash table containing all detected backbone gateways
+	 */
 	struct batadv_hashtable *backbone_hash;
+
+	/** @loopdetect_addr: MAC address used for own loopdetection frames */
 	u8 loopdetect_addr[ETH_ALEN];
+
+	/**
+	 * @loopdetect_lasttime: time when the loopdetection frames were sent
+	 */
 	unsigned long loopdetect_lasttime;
+
+	/**
+	 * @loopdetect_next: how many periods to wait for the next loopdetect
+	 *  process
+	 */
 	atomic_t loopdetect_next;
+
+	/**
+	 * @bcast_duplist: recently received broadcast packets array (for
+	 *  broadcast duplicate suppression)
+	 */
 	struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+
+	/**
+	 * @bcast_duplist_curr: index of last broadcast packet added to
+	 *  bcast_duplist
+	 */
 	int bcast_duplist_curr;
-	/* protects bcast_duplist & bcast_duplist_curr */
+
+	/**
+	 * @bcast_duplist_lock: lock protecting bcast_duplist &
+	 *  bcast_duplist_curr
+	 */
 	spinlock_t bcast_duplist_lock;
+
+	/** @claim_dest: local claim data (e.g. claim group) */
 	struct batadv_bla_claim_dst claim_dest;
+
+	/** @work: work queue callback item for cleanups & bla announcements */
 	struct delayed_work work;
 };
 #endif
@@ -708,68 +1062,94 @@ struct batadv_priv_bla {
 
 /**
  * struct batadv_priv_debug_log - debug logging data
- * @log_buff: buffer holding the logs (ring bufer)
- * @log_start: index of next character to read
- * @log_end: index of next character to write
- * @lock: lock protecting log_buff, log_start & log_end
- * @queue_wait: log reader's wait queue
  */
 struct batadv_priv_debug_log {
+	/** @log_buff: buffer holding the logs (ring bufer) */
 	char log_buff[BATADV_LOG_BUF_LEN];
+
+	/** @log_start: index of next character to read */
 	unsigned long log_start;
+
+	/** @log_end: index of next character to write */
 	unsigned long log_end;
-	spinlock_t lock; /* protects log_buff, log_start and log_end */
+
+	/** @lock: lock protecting log_buff, log_start & log_end */
+	spinlock_t lock;
+
+	/** @queue_wait: log reader's wait queue */
 	wait_queue_head_t queue_wait;
 };
 #endif
 
 /**
  * struct batadv_priv_gw - per mesh interface gateway data
- * @gateway_list: list of available gateway nodes
- * @list_lock: lock protecting gateway_list & curr_gw
- * @curr_gw: pointer to currently selected gateway node
- * @mode: gateway operation: off, client or server (see batadv_gw_modes)
- * @sel_class: gateway selection class (applies if gw_mode client)
- * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
- * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
- * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
+	/** @gateway_list: list of available gateway nodes */
 	struct hlist_head gateway_list;
-	spinlock_t list_lock; /* protects gateway_list & curr_gw */
-	struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+
+	/** @list_lock: lock protecting gateway_list & curr_gw */
+	spinlock_t list_lock;
+
+	/** @curr_gw: pointer to currently selected gateway node */
+	struct batadv_gw_node __rcu *curr_gw;
+
+	/**
+	 * @mode: gateway operation: off, client or server (see batadv_gw_modes)
+	 */
 	atomic_t mode;
+
+	/** @sel_class: gateway selection class (applies if gw_mode client) */
 	atomic_t sel_class;
+
+	/**
+	 * @bandwidth_down: advertised uplink download bandwidth (if gw_mode
+	 *  server)
+	 */
 	atomic_t bandwidth_down;
+
+	/**
+	 * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
+	 */
 	atomic_t bandwidth_up;
+
+	/** @reselect: bool indicating a gateway re-selection is in progress */
 	atomic_t reselect;
 };
 
 /**
  * struct batadv_priv_tvlv - per mesh interface tvlv data
- * @container_list: list of registered tvlv containers to be sent with each OGM
- * @handler_list: list of the various tvlv content handlers
- * @container_list_lock: protects tvlv container list access
- * @handler_list_lock: protects handler list access
  */
 struct batadv_priv_tvlv {
+	/**
+	 * @container_list: list of registered tvlv containers to be sent with
+	 *  each OGM
+	 */
 	struct hlist_head container_list;
+
+	/** @handler_list: list of the various tvlv content handlers */
 	struct hlist_head handler_list;
-	spinlock_t container_list_lock; /* protects container_list */
-	spinlock_t handler_list_lock; /* protects handler_list */
+
+	/** @container_list_lock: protects tvlv container list access */
+	spinlock_t container_list_lock;
+
+	/** @handler_list_lock: protects handler list access */
+	spinlock_t handler_list_lock;
 };
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /**
  * struct batadv_priv_dat - per mesh interface DAT private data
- * @addr: node DAT address
- * @hash: hashtable representing the local ARP cache
- * @work: work queue callback item for cache purging
  */
 struct batadv_priv_dat {
+	/** @addr: node DAT address */
 	batadv_dat_addr_t addr;
+
+	/** @hash: hashtable representing the local ARP cache */
 	struct batadv_hashtable *hash;
+
+	/** @work: work queue callback item for cache purging */
 	struct delayed_work work;
 };
 #endif
@@ -777,375 +1157,582 @@ struct batadv_priv_dat {
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
  * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
- * @exists: whether a querier exists in the mesh
- * @shadowing: if a querier exists, whether it is potentially shadowing
- *  multicast listeners (i.e. querier is behind our own bridge segment)
  */
 struct batadv_mcast_querier_state {
+	/** @exists: whether a querier exists in the mesh */
 	bool exists;
+
+	/**
+	 * @shadowing: if a querier exists, whether it is potentially shadowing
+	 *  multicast listeners (i.e. querier is behind our own bridge segment)
+	 */
 	bool shadowing;
 };
 
 /**
  * struct batadv_priv_mcast - per mesh interface mcast data
- * @mla_list: list of multicast addresses we are currently announcing via TT
- * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
- *  multicast traffic
- * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
- * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
- * @querier_ipv4: the current state of an IGMP querier in the mesh
- * @querier_ipv6: the current state of an MLD querier in the mesh
- * @flags: the flags we have last sent in our mcast tvlv
- * @enabled: whether the multicast tvlv is currently enabled
- * @bridged: whether the soft interface has a bridge on top
- * @num_disabled: number of nodes that have no mcast tvlv
- * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
- * @num_want_all_ipv4: counter for items in want_all_ipv4_list
- * @num_want_all_ipv6: counter for items in want_all_ipv6_list
- * @want_lists_lock: lock for protecting modifications to mcast want lists
- *  (traversals are rcu-locked)
- * @work: work queue callback item for multicast TT and TVLV updates
  */
 struct batadv_priv_mcast {
+	/**
+	 * @mla_list: list of multicast addresses we are currently announcing
+	 *  via TT
+	 */
 	struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
+
+	/**
+	 * @want_all_unsnoopables_list: a list of orig_nodes wanting all
+	 *  unsnoopable multicast traffic
+	 */
 	struct hlist_head want_all_unsnoopables_list;
+
+	/**
+	 * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast
+	 *  traffic
+	 */
 	struct hlist_head want_all_ipv4_list;
+
+	/**
+	 * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast
+	 *  traffic
+	 */
 	struct hlist_head want_all_ipv6_list;
+
+	/** @querier_ipv4: the current state of an IGMP querier in the mesh */
 	struct batadv_mcast_querier_state querier_ipv4;
+
+	/** @querier_ipv6: the current state of an MLD querier in the mesh */
 	struct batadv_mcast_querier_state querier_ipv6;
+
+	/** @flags: the flags we have last sent in our mcast tvlv */
 	u8 flags;
+
+	/** @enabled: whether the multicast tvlv is currently enabled */
 	bool enabled;
+
+	/** @bridged: whether the soft interface has a bridge on top */
 	bool bridged;
+
+	/** @num_disabled: number of nodes that have no mcast tvlv */
 	atomic_t num_disabled;
+
+	/**
+	 * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+	 *  traffic
+	 */
 	atomic_t num_want_all_unsnoopables;
+
+	/** @num_want_all_ipv4: counter for items in want_all_ipv4_list */
 	atomic_t num_want_all_ipv4;
+
+	/** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
 	atomic_t num_want_all_ipv6;
-	/* protects want_all_{unsnoopables,ipv4,ipv6}_list */
+
+	/**
+	 * @want_lists_lock: lock for protecting modifications to mcasts
+	 *  want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
+	 */
 	spinlock_t want_lists_lock;
+
+	/** @work: work queue callback item for multicast TT and TVLV updates */
 	struct delayed_work work;
 };
 #endif
 
 /**
  * struct batadv_priv_nc - per mesh interface network coding private data
- * @work: work queue callback item for cleanup
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
- * @max_fwd_delay: maximum packet forward delay to allow coding of packets
- * @max_buffer_time: buffer time for sniffed packets used to decoding
- * @timestamp_fwd_flush: timestamp of last forward packet queue flush
- * @timestamp_sniffed_purge: timestamp of last sniffed packet queue purge
- * @coding_hash: Hash table used to buffer skbs while waiting for another
- *  incoming skb to code it with. Skbs are added to the buffer just before being
- *  forwarded in routing.c
- * @decoding_hash: Hash table used to buffer skbs that might be needed to decode
- *  a received coded skb. The buffer is used for 1) skbs arriving on the
- *  soft-interface; 2) skbs overheard on the hard-interface; and 3) skbs
- *  forwarded by batman-adv.
  */
 struct batadv_priv_nc {
+	/** @work: work queue callback item for cleanup */
 	struct delayed_work work;
+
+	/**
+	 * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+	 */
 	struct dentry *debug_dir;
+
+	/**
+	 * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
+	 */
 	u8 min_tq;
+
+	/**
+	 * @max_fwd_delay: maximum packet forward delay to allow coding of
+	 *  packets
+	 */
 	u32 max_fwd_delay;
+
+	/**
+	 * @max_buffer_time: buffer time for sniffed packets used to decoding
+	 */
 	u32 max_buffer_time;
+
+	/**
+	 * @timestamp_fwd_flush: timestamp of last forward packet queue flush
+	 */
 	unsigned long timestamp_fwd_flush;
+
+	/**
+	 * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
+	 *  purge
+	 */
 	unsigned long timestamp_sniffed_purge;
+
+	/**
+	 * @coding_hash: Hash table used to buffer skbs while waiting for
+	 *  another incoming skb to code it with. Skbs are added to the buffer
+	 *  just before being forwarded in routing.c
+	 */
 	struct batadv_hashtable *coding_hash;
+
+	/**
+	 * @decoding_hash: Hash table used to buffer skbs that might be needed
+	 *  to decode a received coded skb. The buffer is used for 1) skbs
+	 *  arriving on the soft-interface; 2) skbs overheard on the
+	 *  hard-interface; and 3) skbs forwarded by batman-adv.
+	 */
 	struct batadv_hashtable *decoding_hash;
 };
 
 /**
  * struct batadv_tp_unacked - unacked packet meta-information
- * @seqno: seqno of the unacked packet
- * @len: length of the packet
- * @list: list node for batadv_tp_vars::unacked_list
  *
  * This struct is supposed to represent a buffer unacked packet. However, since
  * the purpose of the TP meter is to count the traffic only, there is no need to
  * store the entire sk_buff, the starting offset and the length are enough
  */
 struct batadv_tp_unacked {
+	/** @seqno: seqno of the unacked packet */
 	u32 seqno;
+
+	/** @len: length of the packet */
 	u16 len;
+
+	/** @list: list node for &batadv_tp_vars.unacked_list */
 	struct list_head list;
 };
 
 /**
  * enum batadv_tp_meter_role - Modus in tp meter session
- * @BATADV_TP_RECEIVER: Initialized as receiver
- * @BATADV_TP_SENDER: Initialized as sender
  */
 enum batadv_tp_meter_role {
+	/** @BATADV_TP_RECEIVER: Initialized as receiver */
 	BATADV_TP_RECEIVER,
+
+	/** @BATADV_TP_SENDER: Initialized as sender */
 	BATADV_TP_SENDER
 };
 
 /**
  * struct batadv_tp_vars - tp meter private variables per session
- * @list: list node for bat_priv::tp_list
- * @timer: timer for ack (receiver) and retry (sender)
- * @bat_priv: pointer to the mesh object
- * @start_time: start time in jiffies
- * @other_end: mac address of remote
- * @role: receiver/sender modi
- * @sending: sending binary semaphore: 1 if sending, 0 is not
- * @reason: reason for a stopped session
- * @finish_work: work item for the finishing procedure
- * @test_length: test length in milliseconds
- * @session: TP session identifier
- * @icmp_uid: local ICMP "socket" index
- * @dec_cwnd: decimal part of the cwnd used during linear growth
- * @cwnd: current size of the congestion window
- * @cwnd_lock: lock do protect @cwnd & @dec_cwnd
- * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
- *  connection switches to the Congestion Avoidance state
- * @last_acked: last acked byte
- * @last_sent: last sent byte, not yet acked
- * @tot_sent: amount of data sent/ACKed so far
- * @dup_acks: duplicate ACKs counter
- * @fast_recovery: true if in Fast Recovery mode
- * @recover: last sent seqno when entering Fast Recovery
- * @rto: sender timeout
- * @srtt: smoothed RTT scaled by 2^3
- * @rttvar: RTT variation scaled by 2^2
- * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout
- * @prerandom_offset: offset inside the prerandom buffer
- * @prerandom_lock: spinlock protecting access to prerandom_offset
- * @last_recv: last in-order received packet
- * @unacked_list: list of unacked packets (meta-info only)
- * @unacked_lock: protect unacked_list
- * @last_recv_time: time time (jiffies) a msg was received
- * @refcount: number of context where the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tp_vars {
+	/** @list: list node for &bat_priv.tp_list */
 	struct hlist_node list;
+
+	/** @timer: timer for ack (receiver) and retry (sender) */
 	struct timer_list timer;
+
+	/** @bat_priv: pointer to the mesh object */
 	struct batadv_priv *bat_priv;
+
+	/** @start_time: start time in jiffies */
 	unsigned long start_time;
+
+	/** @other_end: mac address of remote */
 	u8 other_end[ETH_ALEN];
+
+	/** @role: receiver/sender modi */
 	enum batadv_tp_meter_role role;
+
+	/** @sending: sending binary semaphore: 1 if sending, 0 is not */
 	atomic_t sending;
+
+	/** @reason: reason for a stopped session */
 	enum batadv_tp_meter_reason reason;
+
+	/** @finish_work: work item for the finishing procedure */
 	struct delayed_work finish_work;
+
+	/** @test_length: test length in milliseconds */
 	u32 test_length;
+
+	/** @session: TP session identifier */
 	u8 session[2];
+
+	/** @icmp_uid: local ICMP "socket" index */
 	u8 icmp_uid;
 
 	/* sender variables */
+
+	/** @dec_cwnd: decimal part of the cwnd used during linear growth */
 	u16 dec_cwnd;
+
+	/** @cwnd: current size of the congestion window */
 	u32 cwnd;
-	spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */
+
+	/** @cwnd_lock: lock do protect @cwnd & @dec_cwnd */
+	spinlock_t cwnd_lock;
+
+	/**
+	 * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
+	 *  connection switches to the Congestion Avoidance state
+	 */
 	u32 ss_threshold;
+
+	/** @last_acked: last acked byte */
 	atomic_t last_acked;
+
+	/** @last_sent: last sent byte, not yet acked */
 	u32 last_sent;
+
+	/** @tot_sent: amount of data sent/ACKed so far */
 	atomic64_t tot_sent;
+
+	/** @dup_acks: duplicate ACKs counter */
 	atomic_t dup_acks;
+
+	/** @fast_recovery: true if in Fast Recovery mode */
 	bool fast_recovery;
+
+	/** @recover: last sent seqno when entering Fast Recovery */
 	u32 recover;
+
+	/** @rto: sender timeout */
 	u32 rto;
+
+	/** @srtt: smoothed RTT scaled by 2^3 */
 	u32 srtt;
+
+	/** @rttvar: RTT variation scaled by 2^2 */
 	u32 rttvar;
+
+	/**
+	 * @more_bytes: waiting queue anchor when waiting for more ack/retry
+	 *  timeout
+	 */
 	wait_queue_head_t more_bytes;
+
+	/** @prerandom_offset: offset inside the prerandom buffer */
 	u32 prerandom_offset;
-	spinlock_t prerandom_lock; /* Protects prerandom_offset */
+
+	/** @prerandom_lock: spinlock protecting access to prerandom_offset */
+	spinlock_t prerandom_lock;
 
 	/* receiver variables */
+
+	/** @last_recv: last in-order received packet */
 	u32 last_recv;
+
+	/** @unacked_list: list of unacked packets (meta-info only) */
 	struct list_head unacked_list;
-	spinlock_t unacked_lock; /* Protects unacked_list */
+
+	/** @unacked_lock: protect unacked_list */
+	spinlock_t unacked_lock;
+
+	/** @last_recv_time: time time (jiffies) a msg was received */
 	unsigned long last_recv_time;
+
+	/** @refcount: number of context where the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
- * @bat_priv: pointer to the mesh object
- * @vid: VLAN identifier
- * @kobj: kobject for sysfs vlan subdirectory
- * @ap_isolation: AP isolation state
- * @tt: TT private attributes (VLAN specific)
- * @list: list node for bat_priv::softif_vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+	/** @bat_priv: pointer to the mesh object */
 	struct batadv_priv *bat_priv;
+
+	/** @vid: VLAN identifier */
 	unsigned short vid;
+
+	/** @kobj: kobject for sysfs vlan subdirectory */
 	struct kobject *kobj;
+
+	/** @ap_isolation: AP isolation state */
 	atomic_t ap_isolation;		/* boolean */
+
+	/** @tt: TT private attributes (VLAN specific) */
 	struct batadv_vlan_tt tt;
+
+	/** @list: list node for &bat_priv.softif_vlan_list */
 	struct hlist_node list;
+
+	/**
+	 * @refcount: number of context where this object is currently in use
+	 */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
- * @ogm_wq: workqueue used to schedule OGM transmissions
  */
 struct batadv_priv_bat_v {
+	/** @ogm_buff: buffer holding the OGM packet */
 	unsigned char *ogm_buff;
+
+	/** @ogm_buff_len: length of the OGM packet buffer */
 	int ogm_buff_len;
+
+	/** @ogm_seqno: OGM sequence number - used to identify each OGM */
 	atomic_t ogm_seqno;
+
+	/** @ogm_wq: workqueue used to schedule OGM transmissions */
 	struct delayed_work ogm_wq;
 };
 
 /**
  * struct batadv_priv - per mesh interface data
- * @mesh_state: current status of the mesh (inactive/active/deactivating)
- * @soft_iface: net device which holds this struct as private data
- * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
- * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
- * @bonding: bool indicating whether traffic bonding is enabled
- * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @packet_size_max: max packet size that can be transmitted via
- *  multiple fragmented skbs or a single frame if fragmentation is disabled
- * @frag_seqno: incremental counter to identify chains of egress fragments
- * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
- *  enabled
- * @distributed_arp_table: bool indicating whether distributed ARP table is
- *  enabled
- * @multicast_mode: Enable or disable multicast optimizations on this node's
- *  sender/originating side
- * @orig_interval: OGM broadcast interval in milliseconds
- * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
- * @log_level: configured log level (see batadv_dbg_level)
- * @isolation_mark: the skb->mark value used to match packets for AP isolation
- * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
- *  for the isolation mark
- * @bcast_seqno: last sent broadcast packet sequence number
- * @bcast_queue_left: number of remaining buffered broadcast packet slots
- * @batman_queue_left: number of remaining OGM packet slots
- * @num_ifaces: number of interfaces assigned to this mesh interface
- * @mesh_obj: kobject for sysfs mesh subdirectory
- * @debug_dir: dentry for debugfs batman-adv subdirectory
- * @forw_bat_list: list of aggregated OGMs that will be forwarded
- * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
- * @tp_list: list of tp sessions
- * @tp_num: number of currently active tp sessions
- * @orig_hash: hash table containing mesh participants (orig nodes)
- * @forw_bat_list_lock: lock protecting forw_bat_list
- * @forw_bcast_list_lock: lock protecting forw_bcast_list
- * @tp_list_lock: spinlock protecting @tp_list
- * @orig_work: work queue callback item for orig node purging
- * @primary_if: one of the hard-interfaces assigned to this mesh interface
- *  becomes the primary interface
- * @algo_ops: routing algorithm used by this mesh interface
- * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
- *  of the mesh interface represented by this object
- * @softif_vlan_list_lock: lock protecting softif_vlan_list
- * @bla: bridge loope avoidance data
- * @debug_log: holding debug logging relevant data
- * @gw: gateway data
- * @tt: translation table data
- * @tvlv: type-version-length-value data
- * @dat: distributed arp table data
- * @mcast: multicast data
- * @network_coding: bool indicating whether network coding is enabled
- * @nc: network coding data
- * @bat_v: B.A.T.M.A.N. V per soft-interface private data
  */
 struct batadv_priv {
+	/**
+	 * @mesh_state: current status of the mesh
+	 *  (inactive/active/deactivating)
+	 */
 	atomic_t mesh_state;
+
+	/** @soft_iface: net device which holds this struct as private data */
 	struct net_device *soft_iface;
+
+	/**
+	 * @bat_counters: mesh internal traffic statistic counters (see
+	 *  batadv_counters)
+	 */
 	u64 __percpu *bat_counters; /* Per cpu counters */
+
+	/**
+	 * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
+	 */
 	atomic_t aggregated_ogms;
+
+	/** @bonding: bool indicating whether traffic bonding is enabled */
 	atomic_t bonding;
+
+	/**
+	 * @fragmentation: bool indicating whether traffic fragmentation is
+	 *  enabled
+	 */
 	atomic_t fragmentation;
+
+	/**
+	 * @packet_size_max: max packet size that can be transmitted via
+	 *  multiple fragmented skbs or a single frame if fragmentation is
+	 *  disabled
+	 */
 	atomic_t packet_size_max;
+
+	/**
+	 * @frag_seqno: incremental counter to identify chains of egress
+	 *  fragments
+	 */
 	atomic_t frag_seqno;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+	/**
+	 * @bridge_loop_avoidance: bool indicating whether bridge loop
+	 *  avoidance is enabled
+	 */
 	atomic_t bridge_loop_avoidance;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/**
+	 * @distributed_arp_table: bool indicating whether distributed ARP table
+	 *  is enabled
+	 */
 	atomic_t distributed_arp_table;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+	/**
+	 * @multicast_mode: Enable or disable multicast optimizations on this
+	 *  node's sender/originating side
+	 */
 	atomic_t multicast_mode;
 #endif
+
+	/** @orig_interval: OGM broadcast interval in milliseconds */
 	atomic_t orig_interval;
+
+	/**
+	 * @hop_penalty: penalty which will be applied to an OGM's tq-field on
+	 *  every hop
+	 */
 	atomic_t hop_penalty;
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+	/** @log_level: configured log level (see batadv_dbg_level) */
 	atomic_t log_level;
 #endif
+
+	/**
+	 * @isolation_mark: the skb->mark value used to match packets for AP
+	 *  isolation
+	 */
 	u32 isolation_mark;
+
+	/**
+	 * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be
+	 *  used for the isolation mark
+	 */
 	u32 isolation_mark_mask;
+
+	/** @bcast_seqno: last sent broadcast packet sequence number */
 	atomic_t bcast_seqno;
+
+	/**
+	 * @bcast_queue_left: number of remaining buffered broadcast packet
+	 *  slots
+	 */
 	atomic_t bcast_queue_left;
+
+	/** @batman_queue_left: number of remaining OGM packet slots */
 	atomic_t batman_queue_left;
+
+	/** @num_ifaces: number of interfaces assigned to this mesh interface */
 	char num_ifaces;
+
+	/** @mesh_obj: kobject for sysfs mesh subdirectory */
 	struct kobject *mesh_obj;
+
+	/** @debug_dir: dentry for debugfs batman-adv subdirectory */
 	struct dentry *debug_dir;
+
+	/** @forw_bat_list: list of aggregated OGMs that will be forwarded */
 	struct hlist_head forw_bat_list;
+
+	/**
+	 * @forw_bcast_list: list of broadcast packets that will be
+	 *  rebroadcasted
+	 */
 	struct hlist_head forw_bcast_list;
+
+	/** @tp_list: list of tp sessions */
 	struct hlist_head tp_list;
+
+	/** @tp_num: number of currently active tp sessions */
 	struct batadv_hashtable *orig_hash;
-	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
-	spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
-	spinlock_t tp_list_lock; /* protects tp_list */
+
+	/** @orig_hash: hash table containing mesh participants (orig nodes) */
+	spinlock_t forw_bat_list_lock;
+
+	/** @forw_bat_list_lock: lock protecting forw_bat_list */
+	spinlock_t forw_bcast_list_lock;
+
+	/** @forw_bcast_list_lock: lock protecting forw_bcast_list */
+	spinlock_t tp_list_lock;
+
+	/** @tp_list_lock: spinlock protecting @tp_list */
 	atomic_t tp_num;
+
+	/** @orig_work: work queue callback item for orig node purging */
 	struct delayed_work orig_work;
+
+	/**
+	 * @primary_if: one of the hard-interfaces assigned to this mesh
+	 *  interface becomes the primary interface
+	 */
 	struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
+
+	/** @algo_ops: routing algorithm used by this mesh interface */
 	struct batadv_algo_ops *algo_ops;
+
+	/**
+	 * @softif_vlan_list: a list of softif_vlan structs, one per VLAN
+	 *  created on top of the mesh interface represented by this object
+	 */
 	struct hlist_head softif_vlan_list;
-	spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
+
+	/** @softif_vlan_list_lock: lock protecting softif_vlan_list */
+	spinlock_t softif_vlan_list_lock;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+	/** @bla: bridge loope avoidance data */
 	struct batadv_priv_bla bla;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+	/** @debug_log: holding debug logging relevant data */
 	struct batadv_priv_debug_log *debug_log;
 #endif
+
+	/** @gw: gateway data */
 	struct batadv_priv_gw gw;
+
+	/** @tt: translation table data */
 	struct batadv_priv_tt tt;
+
+	/** @tvlv: type-version-length-value data */
 	struct batadv_priv_tvlv tvlv;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/** @dat: distributed arp table data */
 	struct batadv_priv_dat dat;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+	/** @mcast: multicast data */
 	struct batadv_priv_mcast mcast;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+	/**
+	 * @network_coding: bool indicating whether network coding is enabled
+	 */
 	atomic_t network_coding;
+
+	/** @nc: network coding data */
 	struct batadv_priv_nc nc;
 #endif /* CONFIG_BATMAN_ADV_NC */
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
 	struct batadv_priv_bat_v bat_v;
 #endif
 };
 
 /**
  * struct batadv_socket_client - layer2 icmp socket client data
- * @queue_list: packet queue for packets destined for this socket client
- * @queue_len: number of packets in the packet queue (queue_list)
- * @index: socket client's index in the batadv_socket_client_hash
- * @lock: lock protecting queue_list, queue_len & index
- * @queue_wait: socket client's wait queue
- * @bat_priv: pointer to soft_iface this client belongs to
  */
 struct batadv_socket_client {
+	/**
+	 * @queue_list: packet queue for packets destined for this socket client
+	 */
 	struct list_head queue_list;
+
+	/** @queue_len: number of packets in the packet queue (queue_list) */
 	unsigned int queue_len;
+
+	/** @index: socket client's index in the batadv_socket_client_hash */
 	unsigned char index;
-	spinlock_t lock; /* protects queue_list, queue_len & index */
+
+	/** @lock: lock protecting queue_list, queue_len & index */
+	spinlock_t lock;
+
+	/** @queue_wait: socket client's wait queue */
 	wait_queue_head_t queue_wait;
+
+	/** @bat_priv: pointer to soft_iface this client belongs to */
 	struct batadv_priv *bat_priv;
 };
 
 /**
  * struct batadv_socket_packet - layer2 icmp packet for socket client
- * @list: list node for batadv_socket_client::queue_list
- * @icmp_len: size of the layer2 icmp packet
- * @icmp_packet: layer2 icmp packet
  */
 struct batadv_socket_packet {
+	/** @list: list node for &batadv_socket_client.queue_list */
 	struct list_head list;
+
+	/** @icmp_len: size of the layer2 icmp packet */
 	size_t icmp_len;
+
+	/** @icmp_packet: layer2 icmp packet */
 	u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
@@ -1153,312 +1740,432 @@ struct batadv_socket_packet {
 
 /**
  * struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
- * @orig: originator address of backbone node (mac address of primary iface)
- * @vid: vlan id this gateway was detected on
- * @hash_entry: hlist node for batadv_priv_bla::backbone_hash
- * @bat_priv: pointer to soft_iface this backbone gateway belongs to
- * @lasttime: last time we heard of this backbone gw
- * @wait_periods: grace time for bridge forward delays and bla group forming at
- *  bootup phase - no bcast traffic is formwared until it has elapsed
- * @request_sent: if this bool is set to true we are out of sync with this
- *  backbone gateway - no bcast traffic is formwared until the situation was
- *  resolved
- * @crc: crc16 checksum over all claims
- * @crc_lock: lock protecting crc
- * @report_work: work struct for reporting detected loops
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_backbone_gw {
+	/**
+	 * @orig: originator address of backbone node (mac address of primary
+	 *  iface)
+	 */
 	u8 orig[ETH_ALEN];
+
+	/** @vid: vlan id this gateway was detected on */
 	unsigned short vid;
+
+	/** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */
 	struct hlist_node hash_entry;
+
+	/** @bat_priv: pointer to soft_iface this backbone gateway belongs to */
 	struct batadv_priv *bat_priv;
+
+	/** @lasttime: last time we heard of this backbone gw */
 	unsigned long lasttime;
+
+	/**
+	 * @wait_periods: grace time for bridge forward delays and bla group
+	 *  forming at bootup phase - no bcast traffic is formwared until it has
+	 *  elapsed
+	 */
 	atomic_t wait_periods;
+
+	/**
+	 * @request_sent: if this bool is set to true we are out of sync with
+	 *  this backbone gateway - no bcast traffic is formwared until the
+	 *  situation was resolved
+	 */
 	atomic_t request_sent;
+
+	/** @crc: crc16 checksum over all claims */
 	u16 crc;
-	spinlock_t crc_lock; /* protects crc */
+
+	/** @crc_lock: lock protecting crc */
+	spinlock_t crc_lock;
+
+	/** @report_work: work struct for reporting detected loops */
 	struct work_struct report_work;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_bla_claim - claimed non-mesh client structure
- * @addr: mac address of claimed non-mesh client
- * @vid: vlan id this client was detected on
- * @backbone_gw: pointer to backbone gw claiming this client
- * @backbone_lock: lock protecting backbone_gw pointer
- * @lasttime: last time we heard of claim (locals only)
- * @hash_entry: hlist node for batadv_priv_bla::claim_hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_claim {
+	/** @addr: mac address of claimed non-mesh client */
 	u8 addr[ETH_ALEN];
+
+	/** @vid: vlan id this client was detected on */
 	unsigned short vid;
+
+	/** @backbone_gw: pointer to backbone gw claiming this client */
 	struct batadv_bla_backbone_gw *backbone_gw;
-	spinlock_t backbone_lock; /* protects backbone_gw */
+
+	/** @backbone_lock: lock protecting backbone_gw pointer */
+	spinlock_t backbone_lock;
+
+	/** @lasttime: last time we heard of claim (locals only) */
 	unsigned long lasttime;
+
+	/** @hash_entry: hlist node for &batadv_priv_bla.claim_hash */
 	struct hlist_node hash_entry;
+
+	/** @refcount: number of contexts the object is used */
 	struct rcu_head rcu;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct kref refcount;
 };
 #endif
 
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
- * @addr: mac address of non-mesh client
- * @vid: VLAN identifier
- * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
- *  batadv_priv_tt::global_hash
- * @flags: various state handling flags (see batadv_tt_client_flags)
- * @added_at: timestamp used for purging stale tt common entries
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_common_entry {
+	/** @addr: mac address of non-mesh client */
 	u8 addr[ETH_ALEN];
+
+	/** @vid: VLAN identifier */
 	unsigned short vid;
+
+	/**
+	 * @hash_entry: hlist node for &batadv_priv_tt.local_hash or for
+	 *  &batadv_priv_tt.global_hash
+	 */
 	struct hlist_node hash_entry;
+
+	/** @flags: various state handling flags (see batadv_tt_client_flags) */
 	u16 flags;
+
+	/** @added_at: timestamp used for purging stale tt common entries */
 	unsigned long added_at;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_local_entry - translation table local entry data
- * @common: general translation table data
- * @last_seen: timestamp used for purging stale tt local entries
- * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
+	/** @common: general translation table data */
 	struct batadv_tt_common_entry common;
+
+	/** @last_seen: timestamp used for purging stale tt local entries */
 	unsigned long last_seen;
+
+	/** @vlan: soft-interface vlan of the entry */
 	struct batadv_softif_vlan *vlan;
 };
 
 /**
  * struct batadv_tt_global_entry - translation table global entry data
- * @common: general translation table data
- * @orig_list: list of orig nodes announcing this non-mesh client
- * @orig_list_count: number of items in the orig_list
- * @list_lock: lock protecting orig_list
- * @roam_at: time at which TT_GLOBAL_ROAM was set
  */
 struct batadv_tt_global_entry {
+	/** @common: general translation table data */
 	struct batadv_tt_common_entry common;
+
+	/** @orig_list: list of orig nodes announcing this non-mesh client */
 	struct hlist_head orig_list;
+
+	/** @orig_list_count: number of items in the orig_list */
 	atomic_t orig_list_count;
-	spinlock_t list_lock;	/* protects orig_list */
+
+	/** @list_lock: lock protecting orig_list */
+	spinlock_t list_lock;
+
+	/** @roam_at: time at which TT_GLOBAL_ROAM was set */
 	unsigned long roam_at;
 };
 
 /**
  * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
- * @orig_node: pointer to orig node announcing this non-mesh client
- * @ttvn: translation table version number which added the non-mesh client
- * @flags: per orig entry TT sync flags
- * @list: list node for batadv_tt_global_entry::orig_list
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_orig_list_entry {
+	/** @orig_node: pointer to orig node announcing this non-mesh client */
 	struct batadv_orig_node *orig_node;
+
+	/**
+	 * @ttvn: translation table version number which added the non-mesh
+	 *  client
+	 */
 	u8 ttvn;
+
+	/** @flags: per orig entry TT sync flags */
 	u8 flags;
+
+	/** @list: list node for &batadv_tt_global_entry.orig_list */
 	struct hlist_node list;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_change_node - structure for tt changes occurred
- * @list: list node for batadv_priv_tt::changes_list
- * @change: holds the actual translation table diff data
  */
 struct batadv_tt_change_node {
+	/** @list: list node for &batadv_priv_tt.changes_list */
 	struct list_head list;
+
+	/** @change: holds the actual translation table diff data */
 	struct batadv_tvlv_tt_change change;
 };
 
 /**
  * struct batadv_tt_req_node - data to keep track of the tt requests in flight
- * @addr: mac address address of the originator this request was sent to
- * @issued_at: timestamp used for purging stale tt requests
- * @refcount: number of contexts the object is used by
- * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
+	/**
+	 * @addr: mac address address of the originator this request was sent to
+	 */
 	u8 addr[ETH_ALEN];
+
+	/** @issued_at: timestamp used for purging stale tt requests */
 	unsigned long issued_at;
+
+	/** @refcount: number of contexts the object is used by */
 	struct kref refcount;
+
+	/** @list: list node for &batadv_priv_tt.req_list */
 	struct hlist_node list;
 };
 
 /**
  * struct batadv_tt_roam_node - roaming client data
- * @addr: mac address of the client in the roaming phase
- * @counter: number of allowed roaming events per client within a single
- *  OGM interval (changes are committed with each OGM)
- * @first_time: timestamp used for purging stale roaming node entries
- * @list: list node for batadv_priv_tt::roam_list
  */
 struct batadv_tt_roam_node {
+	/** @addr: mac address of the client in the roaming phase */
 	u8 addr[ETH_ALEN];
+
+	/**
+	 * @counter: number of allowed roaming events per client within a single
+	 * OGM interval (changes are committed with each OGM)
+	 */
 	atomic_t counter;
+
+	/**
+	 * @first_time: timestamp used for purging stale roaming node entries
+	 */
 	unsigned long first_time;
+
+	/** @list: list node for &batadv_priv_tt.roam_list */
 	struct list_head list;
 };
 
 /**
  * struct batadv_nc_node - network coding node
- * @list: next and prev pointer for the list handling
- * @addr: the node's mac address
- * @refcount: number of contexts the object is used by
- * @rcu: struct used for freeing in an RCU-safe manner
- * @orig_node: pointer to corresponding orig node struct
- * @last_seen: timestamp of last ogm received from this node
  */
 struct batadv_nc_node {
+	/** @list: next and prev pointer for the list handling */
 	struct list_head list;
+
+	/** @addr: the node's mac address */
 	u8 addr[ETH_ALEN];
+
+	/** @refcount: number of contexts the object is used by */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @orig_node: pointer to corresponding orig node struct */
 	struct batadv_orig_node *orig_node;
+
+	/** @last_seen: timestamp of last ogm received from this node */
 	unsigned long last_seen;
 };
 
 /**
  * struct batadv_nc_path - network coding path
- * @hash_entry: next and prev pointer for the list handling
- * @rcu: struct used for freeing in an RCU-safe manner
- * @refcount: number of contexts the object is used by
- * @packet_list: list of buffered packets for this path
- * @packet_list_lock: access lock for packet list
- * @next_hop: next hop (destination) of path
- * @prev_hop: previous hop (source) of path
- * @last_valid: timestamp for last validation of path
  */
 struct batadv_nc_path {
+	/** @hash_entry: next and prev pointer for the list handling */
 	struct hlist_node hash_entry;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @refcount: number of contexts the object is used by */
 	struct kref refcount;
+
+	/** @packet_list: list of buffered packets for this path */
 	struct list_head packet_list;
-	spinlock_t packet_list_lock; /* Protects packet_list */
+
+	/** @packet_list_lock: access lock for packet list */
+	spinlock_t packet_list_lock;
+
+	/** @next_hop: next hop (destination) of path */
 	u8 next_hop[ETH_ALEN];
+
+	/** @prev_hop: previous hop (source) of path */
 	u8 prev_hop[ETH_ALEN];
+
+	/** @last_valid: timestamp for last validation of path */
 	unsigned long last_valid;
 };
 
 /**
  * struct batadv_nc_packet - network coding packet used when coding and
  *  decoding packets
- * @list: next and prev pointer for the list handling
- * @packet_id: crc32 checksum of skb data
- * @timestamp: field containing the info when the packet was added to path
- * @neigh_node: pointer to original next hop neighbor of skb
- * @skb: skb which can be encoded or used for decoding
- * @nc_path: pointer to path this nc packet is attached to
  */
 struct batadv_nc_packet {
+	/** @list: next and prev pointer for the list handling */
 	struct list_head list;
+
+	/** @packet_id: crc32 checksum of skb data */
 	__be32 packet_id;
+
+	/**
+	 * @timestamp: field containing the info when the packet was added to
+	 *  path
+	 */
 	unsigned long timestamp;
+
+	/** @neigh_node: pointer to original next hop neighbor of skb */
 	struct batadv_neigh_node *neigh_node;
+
+	/** @skb: skb which can be encoded or used for decoding */
 	struct sk_buff *skb;
+
+	/** @nc_path: pointer to path this nc packet is attached to */
 	struct batadv_nc_path *nc_path;
 };
 
 /**
  * struct batadv_skb_cb - control buffer structure used to store private data
  *  relevant to batman-adv in the skb->cb buffer in skbs.
- * @decoded: Marks a skb as decoded, which is checked when searching for coding
- *  opportunities in network-coding.c
- * @num_bcasts: Counter for broadcast packet retransmissions
  */
 struct batadv_skb_cb {
+	/**
+	 * @decoded: Marks a skb as decoded, which is checked when searching for
+	 *  coding opportunities in network-coding.c
+	 */
 	bool decoded;
+
+	/** @num_bcasts: Counter for broadcast packet retransmissions */
 	unsigned int num_bcasts;
 };
 
 /**
  * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
- * @list: list node for batadv_priv::forw_{bat,bcast}_list
- * @cleanup_list: list node for purging functions
- * @send_time: execution time for delayed_work (packet sending)
- * @own: bool for locally generated packets (local OGMs are re-scheduled after
- *  sending)
- * @skb: bcast packet's skb buffer
- * @packet_len: size of aggregated OGM packet inside the skb buffer
- * @direct_link_flags: direct link flags for aggregated OGM packets
- * @num_packets: counter for aggregated OGMv1 packets
- * @delayed_work: work queue callback item for packet sending
- * @if_incoming: pointer to incoming hard-iface or primary iface if
- *  locally generated packet
- * @if_outgoing: packet where the packet should be sent to, or NULL if
- *  unspecified
- * @queue_left: The queue (counter) this packet was applied to
  */
 struct batadv_forw_packet {
+	/**
+	 * @list: list node for &batadv_priv.forw.bcast_list and
+	 *  &batadv_priv.forw.bat_list
+	 */
 	struct hlist_node list;
+
+	/** @cleanup_list: list node for purging functions */
 	struct hlist_node cleanup_list;
+
+	/** @send_time: execution time for delayed_work (packet sending) */
 	unsigned long send_time;
+
+	/**
+	 * @own: bool for locally generated packets (local OGMs are re-scheduled
+	 * after sending)
+	 */
 	u8 own;
+
+	/** @skb: bcast packet's skb buffer */
 	struct sk_buff *skb;
+
+	/** @packet_len: size of aggregated OGM packet inside the skb buffer */
 	u16 packet_len;
+
+	/** @direct_link_flags: direct link flags for aggregated OGM packets */
 	u32 direct_link_flags;
+
+	/** @num_packets: counter for aggregated OGMv1 packets */
 	u8 num_packets;
+
+	/** @delayed_work: work queue callback item for packet sending */
 	struct delayed_work delayed_work;
+
+	/**
+	 * @if_incoming: pointer to incoming hard-iface or primary iface if
+	 *  locally generated packet
+	 */
 	struct batadv_hard_iface *if_incoming;
+
+	/**
+	 * @if_outgoing: packet where the packet should be sent to, or NULL if
+	 *  unspecified
+	 */
 	struct batadv_hard_iface *if_outgoing;
+
+	/** @queue_left: The queue (counter) this packet was applied to */
 	atomic_t *queue_left;
 };
 
 /**
  * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
- * @activate: start routing mechanisms when hard-interface is brought up
- *  (optional)
- * @enable: init routing info when hard-interface is enabled
- * @disable: de-init routing info when hard-interface is disabled
- * @update_mac: (re-)init mac addresses of the protocol information
- *  belonging to this hard-interface
- * @primary_set: called when primary interface is selected / changed
  */
 struct batadv_algo_iface_ops {
+	/**
+	 * @activate: start routing mechanisms when hard-interface is brought up
+	 *  (optional)
+	 */
 	void (*activate)(struct batadv_hard_iface *hard_iface);
+
+	/** @enable: init routing info when hard-interface is enabled */
 	int (*enable)(struct batadv_hard_iface *hard_iface);
+
+	/** @disable: de-init routing info when hard-interface is disabled */
 	void (*disable)(struct batadv_hard_iface *hard_iface);
+
+	/**
+	 * @update_mac: (re-)init mac addresses of the protocol information
+	 *  belonging to this hard-interface
+	 */
 	void (*update_mac)(struct batadv_hard_iface *hard_iface);
+
+	/** @primary_set: called when primary interface is selected / changed */
 	void (*primary_set)(struct batadv_hard_iface *hard_iface);
 };
 
 /**
  * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
- * @hardif_init: called on creation of single hop entry
- *  (optional)
- * @cmp: compare the metrics of two neighbors for their respective outgoing
- *  interfaces
- * @is_similar_or_better: check if neigh1 is equally similar or better than
- *  neigh2 for their respective outgoing interface from the metric prospective
- * @print: print the single hop neighbor list (optional)
- * @dump: dump neighbors to a netlink socket (optional)
  */
 struct batadv_algo_neigh_ops {
+	/** @hardif_init: called on creation of single hop entry (optional) */
 	void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
+
+	/**
+	 * @cmp: compare the metrics of two neighbors for their respective
+	 *  outgoing interfaces
+	 */
 	int (*cmp)(struct batadv_neigh_node *neigh1,
 		   struct batadv_hard_iface *if_outgoing1,
 		   struct batadv_neigh_node *neigh2,
 		   struct batadv_hard_iface *if_outgoing2);
+
+	/**
+	 * @is_similar_or_better: check if neigh1 is equally similar or better
+	 *  than neigh2 for their respective outgoing interface from the metric
+	 *  prospective
+	 */
 	bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
 				     struct batadv_hard_iface *if_outgoing1,
 				     struct batadv_neigh_node *neigh2,
 				     struct batadv_hard_iface *if_outgoing2);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	/** @print: print the single hop neighbor list (optional) */
 	void (*print)(struct batadv_priv *priv, struct seq_file *seq);
 #endif
+
+	/** @dump: dump neighbors to a netlink socket (optional) */
 	void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
 		     struct batadv_priv *priv,
 		     struct batadv_hard_iface *hard_iface);
@@ -1466,24 +2173,36 @@ struct batadv_algo_neigh_ops {
 
 /**
  * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
- * @free: free the resources allocated by the routing algorithm for an orig_node
- *  object (optional)
- * @add_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to a new hard-interface being added into the mesh (optional)
- * @del_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to an hard-interface being removed from the mesh (optional)
- * @print: print the originator table (optional)
- * @dump: dump originators to a netlink socket (optional)
  */
 struct batadv_algo_orig_ops {
+	/**
+	 * @free: free the resources allocated by the routing algorithm for an
+	 *  orig_node object (optional)
+	 */
 	void (*free)(struct batadv_orig_node *orig_node);
+
+	/**
+	 * @add_if: ask the routing algorithm to apply the needed changes to the
+	 *  orig_node due to a new hard-interface being added into the mesh
+	 *  (optional)
+	 */
 	int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+
+	/**
+	 * @del_if: ask the routing algorithm to apply the needed changes to the
+	 *  orig_node due to an hard-interface being removed from the mesh
+	 *  (optional)
+	 */
 	int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
 		      int del_if_num);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	/** @print: print the originator table (optional) */
 	void (*print)(struct batadv_priv *priv, struct seq_file *seq,
 		      struct batadv_hard_iface *hard_iface);
 #endif
+
+	/** @dump: dump originators to a netlink socket (optional) */
 	void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
 		     struct batadv_priv *priv,
 		     struct batadv_hard_iface *hard_iface);
@@ -1491,158 +2210,213 @@ struct batadv_algo_orig_ops {
 
 /**
  * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
- * @init_sel_class: initialize GW selection class (optional)
- * @store_sel_class: parse and stores a new GW selection class (optional)
- * @show_sel_class: prints the current GW selection class (optional)
- * @get_best_gw_node: select the best GW from the list of available nodes
- *  (optional)
- * @is_eligible: check if a newly discovered GW is a potential candidate for
- *  the election as best GW (optional)
- * @print: print the gateway table (optional)
- * @dump: dump gateways to a netlink socket (optional)
  */
 struct batadv_algo_gw_ops {
+	/** @init_sel_class: initialize GW selection class (optional) */
 	void (*init_sel_class)(struct batadv_priv *bat_priv);
+
+	/**
+	 * @store_sel_class: parse and stores a new GW selection class
+	 *  (optional)
+	 */
 	ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
 				   size_t count);
+
+	/** @show_sel_class: prints the current GW selection class (optional) */
 	ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+
+	/**
+	 * @get_best_gw_node: select the best GW from the list of available
+	 *  nodes (optional)
+	 */
 	struct batadv_gw_node *(*get_best_gw_node)
 		(struct batadv_priv *bat_priv);
+
+	/**
+	 * @is_eligible: check if a newly discovered GW is a potential candidate
+	 *  for the election as best GW (optional)
+	 */
 	bool (*is_eligible)(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *curr_gw_orig,
 			    struct batadv_orig_node *orig_node);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	/** @print: print the gateway table (optional) */
 	void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
 #endif
+
+	/** @dump: dump gateways to a netlink socket (optional) */
 	void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
 		     struct batadv_priv *priv);
 };
 
 /**
  * struct batadv_algo_ops - mesh algorithm callbacks
- * @list: list node for the batadv_algo_list
- * @name: name of the algorithm
- * @iface: callbacks related to interface handling
- * @neigh: callbacks related to neighbors handling
- * @orig: callbacks related to originators handling
- * @gw: callbacks related to GW mode
  */
 struct batadv_algo_ops {
+	/** @list: list node for the batadv_algo_list */
 	struct hlist_node list;
+
+	/** @name: name of the algorithm */
 	char *name;
+
+	/** @iface: callbacks related to interface handling */
 	struct batadv_algo_iface_ops iface;
+
+	/** @neigh: callbacks related to neighbors handling */
 	struct batadv_algo_neigh_ops neigh;
+
+	/** @orig: callbacks related to originators handling */
 	struct batadv_algo_orig_ops orig;
+
+	/** @gw: callbacks related to GW mode */
 	struct batadv_algo_gw_ops gw;
 };
 
 /**
  * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
  * is used to stored ARP entries needed for the global DAT cache
- * @ip: the IPv4 corresponding to this DAT/ARP entry
- * @mac_addr: the MAC address associated to the stored IPv4
- * @vid: the vlan ID associated to this entry
- * @last_update: time in jiffies when this entry was refreshed last time
- * @hash_entry: hlist node for batadv_priv_dat::hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_dat_entry {
+	/** @ip: the IPv4 corresponding to this DAT/ARP entry */
 	__be32 ip;
+
+	/** @mac_addr: the MAC address associated to the stored IPv4 */
 	u8 mac_addr[ETH_ALEN];
+
+	/** @vid: the vlan ID associated to this entry */
 	unsigned short vid;
+
+	/**
+	 * @last_update: time in jiffies when this entry was refreshed last time
+	 */
 	unsigned long last_update;
+
+	/** @hash_entry: hlist node for &batadv_priv_dat.hash */
 	struct hlist_node hash_entry;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_hw_addr - a list entry for a MAC address
- * @list: list node for the linking of entries
- * @addr: the MAC address of this list entry
  */
 struct batadv_hw_addr {
+	/** @list: list node for the linking of entries */
 	struct hlist_node list;
+
+	/** @addr: the MAC address of this list entry */
 	unsigned char addr[ETH_ALEN];
 };
 
 /**
  * struct batadv_dat_candidate - candidate destination for DAT operations
- * @type: the type of the selected candidate. It can one of the following:
- *	  - BATADV_DAT_CANDIDATE_NOT_FOUND
- *	  - BATADV_DAT_CANDIDATE_ORIG
- * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
- *	       corresponding originator node structure
  */
 struct batadv_dat_candidate {
+	/**
+	 * @type: the type of the selected candidate. It can one of the
+	 *  following:
+	 *	  - BATADV_DAT_CANDIDATE_NOT_FOUND
+	 *	  - BATADV_DAT_CANDIDATE_ORIG
+	 */
 	int type;
+
+	/**
+	 * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to
+	 * the corresponding originator node structure
+	 */
 	struct batadv_orig_node *orig_node;
 };
 
 /**
  * struct batadv_tvlv_container - container for tvlv appended to OGMs
- * @list: hlist node for batadv_priv_tvlv::container_list
- * @tvlv_hdr: tvlv header information needed to construct the tvlv
- * @refcount: number of contexts the object is used
  */
 struct batadv_tvlv_container {
+	/** @list: hlist node for &batadv_priv_tvlv.container_list */
 	struct hlist_node list;
+
+	/** @tvlv_hdr: tvlv header information needed to construct the tvlv */
 	struct batadv_tvlv_hdr tvlv_hdr;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
 };
 
 /**
  * struct batadv_tvlv_handler - handler for specific tvlv type and version
- * @list: hlist node for batadv_priv_tvlv::handler_list
- * @ogm_handler: handler callback which is given the tvlv payload to process on
- *  incoming OGM packets
- * @unicast_handler: handler callback which is given the tvlv payload to process
- *  on incoming unicast tvlv packets
- * @type: tvlv type this handler feels responsible for
- * @version: tvlv version this handler feels responsible for
- * @flags: tvlv handler flags
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tvlv_handler {
+	/** @list: hlist node for &batadv_priv_tvlv.handler_list */
 	struct hlist_node list;
+
+	/**
+	 * @ogm_handler: handler callback which is given the tvlv payload to
+	 *  process on incoming OGM packets
+	 */
 	void (*ogm_handler)(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *orig,
 			    u8 flags, void *tvlv_value, u16 tvlv_value_len);
+
+	/**
+	 * @unicast_handler: handler callback which is given the tvlv payload to
+	 *  process on incoming unicast tvlv packets
+	 */
 	int (*unicast_handler)(struct batadv_priv *bat_priv,
 			       u8 *src, u8 *dst,
 			       void *tvlv_value, u16 tvlv_value_len);
+
+	/** @type: tvlv type this handler feels responsible for */
 	u8 type;
+
+	/** @version: tvlv version this handler feels responsible for */
 	u8 version;
+
+	/** @flags: tvlv handler flags */
 	u8 flags;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
- * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
- *  this handler even if its type was not found (with no data)
- * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
- *  a handler as being called, so it won't be called if the
- *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
  */
 enum batadv_tvlv_handler_flags {
+	/**
+	 * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function
+	 *  will call this handler even if its type was not found (with no data)
+	 */
 	BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+
+	/**
+	 * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the
+	 *  API marks a handler as being called, so it won't be called if the
+	 *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+	 */
 	BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
 };
 
 /**
  * struct batadv_store_mesh_work - Work queue item to detach add/del interface
  *  from sysfs locks
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- * @soft_iface_name: name of soft-interface to modify
- * @work: work queue item
  */
 struct batadv_store_mesh_work {
+	/**
+	 * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+	 */
 	struct net_device *net_dev;
+
+	/** @soft_iface_name: name of soft-interface to modify */
 	char soft_iface_name[IFNAMSIZ];
+
+	/** @work: work queue item */
 	struct work_struct work;
 };
 
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 671b907ba678..f897681780db 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -766,43 +766,39 @@ static int __init bt_init(void)
 		return err;
 
 	err = sock_register(&bt_sock_family_ops);
-	if (err < 0) {
-		bt_sysfs_cleanup();
-		return err;
-	}
+	if (err)
+		goto cleanup_sysfs;
 
 	BT_INFO("HCI device and connection manager initialized");
 
 	err = hci_sock_init();
-	if (err < 0)
-		goto error;
+	if (err)
+		goto unregister_socket;
 
 	err = l2cap_init();
-	if (err < 0)
-		goto sock_err;
+	if (err)
+		goto cleanup_socket;
 
 	err = sco_init();
-	if (err < 0) {
-		l2cap_exit();
-		goto sock_err;
-	}
+	if (err)
+		goto cleanup_cap;
 
 	err = mgmt_init();
-	if (err < 0) {
-		sco_exit();
-		l2cap_exit();
-		goto sock_err;
-	}
+	if (err)
+		goto cleanup_sco;
 
 	return 0;
 
-sock_err:
+cleanup_sco:
+	sco_exit();
+cleanup_cap:
+	l2cap_exit();
+cleanup_socket:
 	hci_sock_cleanup();
-
-error:
+unregister_socket:
 	sock_unregister(PF_BLUETOOTH);
+cleanup_sysfs:
 	bt_sysfs_cleanup();
-
 	return err;
 }
 
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index bb308224099c..426a92f02db4 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -527,7 +527,6 @@ static int cmtp_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations cmtp_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= cmtp_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 63df63ebfb24..57403bd567d0 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -88,6 +88,9 @@ static int __name ## _show(struct seq_file *f, void *ptr)		      \
 	return 0;							      \
 }									      \
 									      \
+DEFINE_SHOW_ATTRIBUTE(__name)
+
+#define DEFINE_SHOW_ATTRIBUTE(__name)					      \
 static int __name ## _open(struct inode *inode, struct file *file)	      \
 {									      \
 	return single_open(file, __name ## _show, inode->i_private);	      \
@@ -106,37 +109,16 @@ static int features_show(struct seq_file *f, void *ptr)
 	u8 p;
 
 	hci_dev_lock(hdev);
-	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
-		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
-			   hdev->features[p][0], hdev->features[p][1],
-			   hdev->features[p][2], hdev->features[p][3],
-			   hdev->features[p][4], hdev->features[p][5],
-			   hdev->features[p][6], hdev->features[p][7]);
-	}
+	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++)
+		seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]);
 	if (lmp_le_capable(hdev))
-		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
-			   hdev->le_features[0], hdev->le_features[1],
-			   hdev->le_features[2], hdev->le_features[3],
-			   hdev->le_features[4], hdev->le_features[5],
-			   hdev->le_features[6], hdev->le_features[7]);
+		seq_printf(f, "LE: %8ph\n", hdev->le_features);
 	hci_dev_unlock(hdev);
 
 	return 0;
 }
 
-static int features_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
-	.open		= features_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(features);
 
 static int device_id_show(struct seq_file *f, void *ptr)
 {
@@ -150,17 +132,7 @@ static int device_id_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int device_id_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, device_id_show, inode->i_private);
-}
-
-static const struct file_operations device_id_fops = {
-	.open		= device_id_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_id);
 
 static int device_list_show(struct seq_file *f, void *ptr)
 {
@@ -180,17 +152,7 @@ static int device_list_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int device_list_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
-	.open		= device_list_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_list);
 
 static int blacklist_show(struct seq_file *f, void *p)
 {
@@ -205,17 +167,7 @@ static int blacklist_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int blacklist_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
-	.open		= blacklist_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(blacklist);
 
 static int uuids_show(struct seq_file *f, void *p)
 {
@@ -240,17 +192,7 @@ static int uuids_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int uuids_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
-	.open		= uuids_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(uuids);
 
 static int remote_oob_show(struct seq_file *f, void *ptr)
 {
@@ -269,17 +211,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int remote_oob_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, remote_oob_show, inode->i_private);
-}
-
-static const struct file_operations remote_oob_fops = {
-	.open		= remote_oob_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(remote_oob);
 
 static int conn_info_min_age_set(void *data, u64 val)
 {
@@ -443,17 +375,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
-	.open		= inquiry_cache_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(inquiry_cache);
 
 static int link_keys_show(struct seq_file *f, void *ptr)
 {
@@ -469,17 +391,7 @@ static int link_keys_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int link_keys_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
-	.open		= link_keys_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(link_keys);
 
 static int dev_class_show(struct seq_file *f, void *ptr)
 {
@@ -493,17 +405,7 @@ static int dev_class_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int dev_class_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
-	.open		= dev_class_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dev_class);
 
 static int voice_setting_get(void *data, u64 *val)
 {
@@ -692,17 +594,7 @@ static int identity_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int identity_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
-	.open		= identity_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity);
 
 static int rpa_timeout_set(void *data, u64 val)
 {
@@ -746,17 +638,7 @@ static int random_address_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int random_address_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
-	.open		= random_address_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(random_address);
 
 static int static_address_show(struct seq_file *f, void *p)
 {
@@ -769,17 +651,7 @@ static int static_address_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int static_address_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
-	.open		= static_address_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(static_address);
 
 static ssize_t force_static_address_read(struct file *file,
 					 char __user *user_buf,
@@ -841,17 +713,7 @@ static int white_list_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int white_list_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
-	.open		= white_list_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(white_list);
 
 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
 {
@@ -869,18 +731,7 @@ static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, identity_resolving_keys_show,
-			   inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
-	.open		= identity_resolving_keys_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys);
 
 static int long_term_keys_show(struct seq_file *f, void *ptr)
 {
@@ -898,17 +749,7 @@ static int long_term_keys_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
-	.open		= long_term_keys_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(long_term_keys);
 
 static int conn_min_interval_set(void *data, u64 val)
 {
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index abc0f3224dd1..3394e6791673 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -919,6 +919,43 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
 	return true;
 }
 
+static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
+{
+	/* If there is no connection we are OK to advertise. */
+	if (hci_conn_num(hdev, LE_LINK) == 0)
+		return true;
+
+	/* Check le_states if there is any connection in slave role. */
+	if (hdev->conn_hash.le_num_slave > 0) {
+		/* Slave connection state and non connectable mode bit 20. */
+		if (!connectable && !(hdev->le_states[2] & 0x10))
+			return false;
+
+		/* Slave connection state and connectable mode bit 38
+		 * and scannable bit 21.
+		 */
+		if (connectable && (!(hdev->le_states[4] & 0x01) ||
+				    !(hdev->le_states[2] & 0x40)))
+			return false;
+	}
+
+	/* Check le_states if there is any connection in master role. */
+	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
+		/* Master connection state and non connectable mode bit 18. */
+		if (!connectable && !(hdev->le_states[2] & 0x02))
+			return false;
+
+		/* Master connection state and connectable mode bit 35 and
+		 * scannable 19.
+		 */
+		if (connectable && (!(hdev->le_states[4] & 0x10) ||
+				    !(hdev->le_states[2] & 0x08)))
+			return false;
+	}
+
+	return true;
+}
+
 void __hci_req_enable_advertising(struct hci_request *req)
 {
 	struct hci_dev *hdev = req->hdev;
@@ -927,7 +964,15 @@ void __hci_req_enable_advertising(struct hci_request *req)
 	bool connectable;
 	u32 flags;
 
-	if (hci_conn_num(hdev, LE_LINK) > 0)
+	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
+
+	/* If the "connectable" instance flag was not set, then choose between
+	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+	 */
+	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+		      mgmt_get_connectable(hdev);
+
+	if (!is_advertising_allowed(hdev, connectable))
 		return;
 
 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
@@ -940,14 +985,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
 	 */
 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
-
-	/* If the "connectable" instance flag was not set, then choose between
-	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
-	 */
-	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
-		      mgmt_get_connectable(hdev);
-
 	/* Set require_privacy to true only when non-connectable
 	 * advertising is used. In that case it is fine to use a
 	 * non-resolvable private address.
@@ -1985,13 +2022,6 @@ unlock:
 	hci_dev_unlock(hdev);
 }
 
-static void disable_advertising(struct hci_request *req)
-{
-	u8 enable = 0x00;
-
-	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
 static int active_scan(struct hci_request *req, unsigned long opt)
 {
 	uint16_t interval = opt;
@@ -2017,7 +2047,7 @@ static int active_scan(struct hci_request *req, unsigned long opt)
 		cancel_adv_timeout(hdev);
 		hci_dev_unlock(hdev);
 
-		disable_advertising(req);
+		__hci_req_disable_advertising(req);
 	}
 
 	/* If controller is scanning, it means the background scanning is
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index af5b8c87f590..1285ca30ab0a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -125,9 +125,16 @@ static int br_dev_init(struct net_device *dev)
 	if (!br->stats)
 		return -ENOMEM;
 
+	err = br_fdb_hash_init(br);
+	if (err) {
+		free_percpu(br->stats);
+		return err;
+	}
+
 	err = br_vlan_init(br);
 	if (err) {
 		free_percpu(br->stats);
+		br_fdb_hash_fini(br);
 		return err;
 	}
 
@@ -135,6 +142,7 @@ static int br_dev_init(struct net_device *dev)
 	if (err) {
 		free_percpu(br->stats);
 		br_vlan_flush(br);
+		br_fdb_hash_fini(br);
 	}
 	br_set_lockdep_class(dev);
 
@@ -148,6 +156,7 @@ static void br_dev_uninit(struct net_device *dev)
 	br_multicast_dev_del(br);
 	br_multicast_uninit_stats(br);
 	br_vlan_flush(br);
+	br_fdb_hash_fini(br);
 	free_percpu(br->stats);
 }
 
@@ -416,6 +425,7 @@ void br_dev_setup(struct net_device *dev)
 	br->dev = dev;
 	spin_lock_init(&br->lock);
 	INIT_LIST_HEAD(&br->port_list);
+	INIT_HLIST_HEAD(&br->fdb_list);
 	spin_lock_init(&br->hash_lock);
 
 	br->bridge_id.prio[0] = 0x80;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4ea5c8bbe286..dc87fbc9a23b 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,14 +28,20 @@
 #include <trace/events/bridge.h>
 #include "br_private.h"
 
+static const struct rhashtable_params br_fdb_rht_params = {
+	.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
+	.key_offset = offsetof(struct net_bridge_fdb_entry, key),
+	.key_len = sizeof(struct net_bridge_fdb_key),
+	.automatic_shrinking = true,
+	.locks_mul = 1,
+};
+
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		      const unsigned char *addr, u16 vid);
 static void fdb_notify(struct net_bridge *br,
 		       const struct net_bridge_fdb_entry *, int);
 
-static u32 fdb_salt __read_mostly;
-
 int __init br_fdb_init(void)
 {
 	br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -45,7 +51,6 @@ int __init br_fdb_init(void)
 	if (!br_fdb_cache)
 		return -ENOMEM;
 
-	get_random_bytes(&fdb_salt, sizeof(fdb_salt));
 	return 0;
 }
 
@@ -54,6 +59,15 @@ void br_fdb_fini(void)
 	kmem_cache_destroy(br_fdb_cache);
 }
 
+int br_fdb_hash_init(struct net_bridge *br)
+{
+	return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
+}
+
+void br_fdb_hash_fini(struct net_bridge *br)
+{
+	rhashtable_destroy(&br->fdb_hash_tbl);
+}
 
 /* if topology_changing then use forward_delay (default 15 sec)
  * otherwise keep longer (default 5 minutes)
@@ -70,13 +84,6 @@ static inline int has_expired(const struct net_bridge *br,
 		time_before_eq(fdb->updated + hold_time(br), jiffies);
 }
 
-static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
-{
-	/* use 1 byte of OUI and 3 bytes of NIC */
-	u32 key = get_unaligned((u32 *)(mac + 2));
-	return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
-}
-
 static void fdb_rcu_free(struct rcu_head *head)
 {
 	struct net_bridge_fdb_entry *ent
@@ -84,19 +91,18 @@ static void fdb_rcu_free(struct rcu_head *head)
 	kmem_cache_free(br_fdb_cache, ent);
 }
 
-static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
 						 const unsigned char *addr,
 						 __u16 vid)
 {
-	struct net_bridge_fdb_entry *f;
+	struct net_bridge_fdb_key key;
 
 	WARN_ON_ONCE(!rcu_read_lock_held());
 
-	hlist_for_each_entry_rcu(f, head, hlist)
-		if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid)
-			break;
+	key.vlan_id = vid;
+	memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
 
-	return f;
+	return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
 }
 
 /* requires bridge hash_lock */
@@ -104,13 +110,12 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
 						const unsigned char *addr,
 						__u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 
 	lockdep_assert_held_once(&br->hash_lock);
 
 	rcu_read_lock();
-	fdb = fdb_find_rcu(head, addr, vid);
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
 	rcu_read_unlock();
 
 	return fdb;
@@ -120,9 +125,7 @@ struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
 					     const unsigned char *addr,
 					     __u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
-
-	return fdb_find_rcu(head, addr, vid);
+	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
 }
 
 /* When a static FDB entry is added, the mac address from the entry is
@@ -175,9 +178,11 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 	trace_fdb_delete(br, f);
 
 	if (f->is_static)
-		fdb_del_hw_addr(br, f->addr.addr);
+		fdb_del_hw_addr(br, f->key.addr.addr);
 
-	hlist_del_init_rcu(&f->hlist);
+	hlist_del_init_rcu(&f->fdb_node);
+	rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
+			       br_fdb_rht_params);
 	fdb_notify(br, f, RTM_DELNEIGH);
 	call_rcu(&f->rcu, fdb_rcu_free);
 }
@@ -187,11 +192,11 @@ static void fdb_delete_local(struct net_bridge *br,
 			     const struct net_bridge_port *p,
 			     struct net_bridge_fdb_entry *f)
 {
-	const unsigned char *addr = f->addr.addr;
+	const unsigned char *addr = f->key.addr.addr;
 	struct net_bridge_vlan_group *vg;
 	const struct net_bridge_vlan *v;
 	struct net_bridge_port *op;
-	u16 vid = f->vlan_id;
+	u16 vid = f->key.vlan_id;
 
 	/* Maybe another port has same hw addr? */
 	list_for_each_entry(op, &br->port_list, list) {
@@ -233,31 +238,23 @@ void br_fdb_find_delete_local(struct net_bridge *br,
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
 	struct net_bridge_vlan_group *vg;
+	struct net_bridge_fdb_entry *f;
 	struct net_bridge *br = p->br;
 	struct net_bridge_vlan *v;
-	int i;
 
 	spin_lock_bh(&br->hash_lock);
-
 	vg = nbp_vlan_group(p);
-	/* Search all chains since old address/hash is unknown */
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct hlist_node *h;
-		hlist_for_each(h, &br->hash[i]) {
-			struct net_bridge_fdb_entry *f;
-
-			f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-			if (f->dst == p && f->is_local && !f->added_by_user) {
-				/* delete old one */
-				fdb_delete_local(br, p, f);
-
-				/* if this port has no vlan information
-				 * configured, we can safely be done at
-				 * this point.
-				 */
-				if (!vg || !vg->num_vlans)
-					goto insert;
-			}
+	hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
+		if (f->dst == p && f->is_local && !f->added_by_user) {
+			/* delete old one */
+			fdb_delete_local(br, p, f);
+
+			/* if this port has no vlan information
+			 * configured, we can safely be done at
+			 * this point.
+			 */
+			if (!vg || !vg->num_vlans)
+				goto insert;
 		}
 	}
 
@@ -316,35 +313,32 @@ void br_fdb_cleanup(struct work_struct *work)
 {
 	struct net_bridge *br = container_of(work, struct net_bridge,
 					     gc_work.work);
+	struct net_bridge_fdb_entry *f = NULL;
 	unsigned long delay = hold_time(br);
 	unsigned long work_delay = delay;
 	unsigned long now = jiffies;
-	int i;
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct net_bridge_fdb_entry *f;
-		struct hlist_node *n;
+	/* this part is tricky, in order to avoid blocking learning and
+	 * consequently forwarding, we rely on rcu to delete objects with
+	 * delayed freeing allowing us to continue traversing
+	 */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		unsigned long this_timer;
 
-		if (!br->hash[i].first)
+		if (f->is_static || f->added_by_external_learn)
 			continue;
-
-		spin_lock_bh(&br->hash_lock);
-		hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
-			unsigned long this_timer;
-
-			if (f->is_static)
-				continue;
-			if (f->added_by_external_learn)
-				continue;
-			this_timer = f->updated + delay;
-			if (time_after(this_timer, now))
-				work_delay = min(work_delay, this_timer - now);
-			else
+		this_timer = f->updated + delay;
+		if (time_after(this_timer, now)) {
+			work_delay = min(work_delay, this_timer - now);
+		} else {
+			spin_lock_bh(&br->hash_lock);
+			if (!hlist_unhashed(&f->fdb_node))
 				fdb_delete(br, f);
+			spin_unlock_bh(&br->hash_lock);
 		}
-		spin_unlock_bh(&br->hash_lock);
-		cond_resched();
 	}
+	rcu_read_unlock();
 
 	/* Cleanup minimum 10 milliseconds apart */
 	work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
@@ -354,16 +348,13 @@ void br_fdb_cleanup(struct work_struct *work)
 /* Completely flush all dynamic entries in forwarding database.*/
 void br_fdb_flush(struct net_bridge *br)
 {
-	int i;
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
 
 	spin_lock_bh(&br->hash_lock);
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct net_bridge_fdb_entry *f;
-		struct hlist_node *n;
-		hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
-			if (!f->is_static)
-				fdb_delete(br, f);
-		}
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (!f->is_static)
+			fdb_delete(br, f);
 	}
 	spin_unlock_bh(&br->hash_lock);
 }
@@ -377,27 +368,22 @@ void br_fdb_delete_by_port(struct net_bridge *br,
 			   u16 vid,
 			   int do_all)
 {
-	int i;
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
 
 	spin_lock_bh(&br->hash_lock);
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct hlist_node *h, *g;
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (f->dst != p)
+			continue;
 
-		hlist_for_each_safe(h, g, &br->hash[i]) {
-			struct net_bridge_fdb_entry *f
-				= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-			if (f->dst != p)
+		if (!do_all)
+			if (f->is_static || (vid && f->key.vlan_id != vid))
 				continue;
 
-			if (!do_all)
-				if (f->is_static || (vid && f->vlan_id != vid))
-					continue;
-
-			if (f->is_local)
-				fdb_delete_local(br, p, f);
-			else
-				fdb_delete(br, f);
-		}
+		if (f->is_local)
+			fdb_delete_local(br, p, f);
+		else
+			fdb_delete(br, f);
 	}
 	spin_unlock_bh(&br->hash_lock);
 }
@@ -433,52 +419,48 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 		   unsigned long maxnum, unsigned long skip)
 {
-	struct __fdb_entry *fe = buf;
-	int i, num = 0;
 	struct net_bridge_fdb_entry *f;
+	struct __fdb_entry *fe = buf;
+	int num = 0;
 
 	memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
 
 	rcu_read_lock();
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
-			if (num >= maxnum)
-				goto out;
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (num >= maxnum)
+			break;
 
-			if (has_expired(br, f))
-				continue;
+		if (has_expired(br, f))
+			continue;
 
-			/* ignore pseudo entry for local MAC address */
-			if (!f->dst)
-				continue;
+		/* ignore pseudo entry for local MAC address */
+		if (!f->dst)
+			continue;
 
-			if (skip) {
-				--skip;
-				continue;
-			}
+		if (skip) {
+			--skip;
+			continue;
+		}
 
-			/* convert from internal format to API */
-			memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN);
+		/* convert from internal format to API */
+		memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
 
-			/* due to ABI compat need to split into hi/lo */
-			fe->port_no = f->dst->port_no;
-			fe->port_hi = f->dst->port_no >> 8;
+		/* due to ABI compat need to split into hi/lo */
+		fe->port_no = f->dst->port_no;
+		fe->port_hi = f->dst->port_no >> 8;
 
-			fe->is_local = f->is_local;
-			if (!f->is_static)
-				fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
-			++fe;
-			++num;
-		}
+		fe->is_local = f->is_local;
+		if (!f->is_static)
+			fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+		++fe;
+		++num;
 	}
-
- out:
 	rcu_read_unlock();
 
 	return num;
 }
 
-static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
 					       struct net_bridge_port *source,
 					       const unsigned char *addr,
 					       __u16 vid,
@@ -489,16 +471,23 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 
 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
 	if (fdb) {
-		memcpy(fdb->addr.addr, addr, ETH_ALEN);
+		memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
 		fdb->dst = source;
-		fdb->vlan_id = vid;
+		fdb->key.vlan_id = vid;
 		fdb->is_local = is_local;
 		fdb->is_static = is_static;
 		fdb->added_by_user = 0;
 		fdb->added_by_external_learn = 0;
 		fdb->offloaded = 0;
 		fdb->updated = fdb->used = jiffies;
-		hlist_add_head_rcu(&fdb->hlist, head);
+		if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
+						  &fdb->rhnode,
+						  br_fdb_rht_params)) {
+			kmem_cache_free(br_fdb_cache, fdb);
+			fdb = NULL;
+		} else {
+			hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+		}
 	}
 	return fdb;
 }
@@ -506,7 +495,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		  const unsigned char *addr, u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 
 	if (!is_valid_ether_addr(addr))
@@ -524,7 +512,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		fdb_delete(br, fdb);
 	}
 
-	fdb = fdb_create(head, source, addr, vid, 1, 1);
+	fdb = fdb_create(br, source, addr, vid, 1, 1);
 	if (!fdb)
 		return -ENOMEM;
 
@@ -548,7 +536,6 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr, u16 vid, bool added_by_user)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 	bool fdb_modified = false;
 
@@ -561,7 +548,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 	      source->state == BR_STATE_FORWARDING))
 		return;
 
-	fdb = fdb_find_rcu(head, addr, vid);
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
 	if (likely(fdb)) {
 		/* attempt to update an entry for a local interface */
 		if (unlikely(fdb->is_local)) {
@@ -590,14 +577,13 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		}
 	} else {
 		spin_lock(&br->hash_lock);
-		if (likely(!fdb_find_rcu(head, addr, vid))) {
-			fdb = fdb_create(head, source, addr, vid, 0, 0);
-			if (fdb) {
-				if (unlikely(added_by_user))
-					fdb->added_by_user = 1;
-				trace_br_fdb_update(br, source, addr, vid, added_by_user);
-				fdb_notify(br, fdb, RTM_NEWNEIGH);
-			}
+		fdb = fdb_create(br, source, addr, vid, 0, 0);
+		if (fdb) {
+			if (unlikely(added_by_user))
+				fdb->added_by_user = 1;
+			trace_br_fdb_update(br, source, addr, vid,
+					    added_by_user);
+			fdb_notify(br, fdb, RTM_NEWNEIGH);
 		}
 		/* else  we lose race and someone else inserts
 		 * it first, don't bother updating
@@ -646,7 +632,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
 	if (fdb->added_by_external_learn)
 		ndm->ndm_flags |= NTF_EXT_LEARNED;
 
-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
 		goto nla_put_failure;
 	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
 		goto nla_put_failure;
@@ -657,7 +643,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
 	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
 		goto nla_put_failure;
 
-	if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
+	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+					&fdb->key.vlan_id))
 		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
@@ -711,54 +698,48 @@ int br_fdb_dump(struct sk_buff *skb,
 		int *idx)
 {
 	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_fdb_entry *f;
 	int err = 0;
-	int i;
 
 	if (!(dev->priv_flags & IFF_EBRIDGE))
-		goto out;
+		return err;
 
 	if (!filter_dev) {
 		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
 		if (err < 0)
-			goto out;
+			return err;
 	}
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct net_bridge_fdb_entry *f;
-
-		hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
-
-			if (*idx < cb->args[2])
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (*idx < cb->args[2])
+			goto skip;
+		if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+			if (filter_dev != dev)
 				goto skip;
-
-			if (filter_dev &&
-			    (!f->dst || f->dst->dev != filter_dev)) {
-				if (filter_dev != dev)
-					goto skip;
-				/* !f->dst is a special case for bridge
-				 * It means the MAC belongs to the bridge
-				 * Therefore need a little more filtering
-				 * we only want to dump the !f->dst case
-				 */
-				if (f->dst)
-					goto skip;
-			}
-			if (!filter_dev && f->dst)
+			/* !f->dst is a special case for bridge
+			 * It means the MAC belongs to the bridge
+			 * Therefore need a little more filtering
+			 * we only want to dump the !f->dst case
+			 */
+			if (f->dst)
 				goto skip;
-
-			err = fdb_fill_info(skb, br, f,
-					    NETLINK_CB(cb->skb).portid,
-					    cb->nlh->nlmsg_seq,
-					    RTM_NEWNEIGH,
-					    NLM_F_MULTI);
-			if (err < 0)
-				goto out;
-skip:
-			*idx += 1;
 		}
+		if (!filter_dev && f->dst)
+			goto skip;
+
+		err = fdb_fill_info(skb, br, f,
+				    NETLINK_CB(cb->skb).portid,
+				    cb->nlh->nlmsg_seq,
+				    RTM_NEWNEIGH,
+				    NLM_F_MULTI);
+		if (err < 0)
+			break;
+skip:
+		*idx += 1;
 	}
+	rcu_read_unlock();
 
-out:
 	return err;
 }
 
@@ -766,7 +747,6 @@ out:
 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 			 const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 	bool modified = false;
 
@@ -787,7 +767,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 		if (!(flags & NLM_F_CREATE))
 			return -ENOENT;
 
-		fdb = fdb_create(head, source, addr, vid, 0, 0);
+		fdb = fdb_create(br, source, addr, vid, 0, 0);
 		if (!fdb)
 			return -ENOMEM;
 
@@ -1012,65 +992,60 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
 {
-	struct net_bridge_fdb_entry *fdb, *tmp;
-	int i;
+	struct net_bridge_fdb_entry *f, *tmp;
 	int err;
 
 	ASSERT_RTNL();
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry(fdb, &br->hash[i], hlist) {
-			/* We only care for static entries */
-			if (!fdb->is_static)
-				continue;
-
-			err = dev_uc_add(p->dev, fdb->addr.addr);
-			if (err)
-				goto rollback;
-		}
+	/* the key here is that static entries change only under rtnl */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!f->is_static)
+			continue;
+		err = dev_uc_add(p->dev, f->key.addr.addr);
+		if (err)
+			goto rollback;
 	}
-	return 0;
+done:
+	rcu_read_unlock();
 
-rollback:
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry(tmp, &br->hash[i], hlist) {
-			/* If we reached the fdb that failed, we can stop */
-			if (tmp == fdb)
-				break;
-
-			/* We only care for static entries */
-			if (!tmp->is_static)
-				continue;
+	return err;
 
-			dev_uc_del(p->dev, tmp->addr.addr);
-		}
+rollback:
+	hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!tmp->is_static)
+			continue;
+		if (tmp == f)
+			break;
+		dev_uc_del(p->dev, tmp->key.addr.addr);
 	}
-	return err;
+
+	goto done;
 }
 
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
 {
-	struct net_bridge_fdb_entry *fdb;
-	int i;
+	struct net_bridge_fdb_entry *f;
 
 	ASSERT_RTNL();
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
-			/* We only care for static entries */
-			if (!fdb->is_static)
-				continue;
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!f->is_static)
+			continue;
 
-			dev_uc_del(p->dev, fdb->addr.addr);
-		}
+		dev_uc_del(p->dev, f->key.addr.addr);
 	}
+	rcu_read_unlock();
 }
 
 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 			      const unsigned char *addr, u16 vid)
 {
 	struct net_bridge_fdb_entry *fdb;
-	struct hlist_head *head;
 	bool modified = false;
 	int err = 0;
 
@@ -1078,10 +1053,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 
 	spin_lock_bh(&br->hash_lock);
 
-	head = &br->hash[br_mac_hash(addr, vid)];
 	fdb = br_fdb_find(br, addr, vid);
 	if (!fdb) {
-		fdb = fdb_create(head, p, addr, vid, 0, 0);
+		fdb = fdb_create(br, p, addr, vid, 0, 0);
 		if (!fdb) {
 			err = -ENOMEM;
 			goto err_unlock;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b0f4c734900b..6d9f48bd374a 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -760,9 +760,9 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 void br_mdb_init(void)
 {
-	rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
-	rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
-	rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
+	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
 }
 
 void br_mdb_uninit(void)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c2eea1b8737a..27f1d4f2114a 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -991,7 +991,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
 	unsigned int i;
 	int ret;
 
-	e = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]);
+	e = rcu_dereference(net->nf.hooks_bridge[hook]);
 	if (!e)
 		return okfn(net, sk, skb);
 
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 20cbb727df4d..8e2d7cfa4e16 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -78,7 +78,6 @@ void br_netfilter_rtable_init(struct net_bridge *br)
 
 	atomic_set(&rt->dst.__refcnt, 1);
 	rt->dst.dev = br->dev;
-	rt->dst.path = &rt->dst;
 	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
 	rt->dst.flags	= DST_NOXFRM | DST_FAKE_RTABLE;
 	rt->dst.ops = &fake_dst_ops;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1312b8d20ec3..8e13a64d8c99 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -168,12 +168,17 @@ struct net_bridge_vlan_group {
 	u16				pvid;
 };
 
+struct net_bridge_fdb_key {
+	mac_addr addr;
+	u16 vlan_id;
+};
+
 struct net_bridge_fdb_entry {
-	struct hlist_node		hlist;
+	struct rhash_head		rhnode;
 	struct net_bridge_port		*dst;
 
-	mac_addr			addr;
-	__u16				vlan_id;
+	struct net_bridge_fdb_key	key;
+	struct hlist_node		fdb_node;
 	unsigned char			is_local:1,
 					is_static:1,
 					added_by_user:1,
@@ -315,7 +320,7 @@ struct net_bridge {
 	struct net_bridge_vlan_group	__rcu *vlgrp;
 #endif
 
-	struct hlist_head		hash[BR_HASH_SIZE];
+	struct rhashtable		fdb_hash_tbl;
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 	union {
 		struct rtable		fake_rtable;
@@ -405,6 +410,7 @@ struct net_bridge {
 	int offload_fwd_mark;
 #endif
 	bool				neigh_suppress_enabled;
+	struct hlist_head		fdb_list;
 };
 
 struct br_input_skb_cb {
@@ -515,6 +521,8 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
 /* br_fdb.c */
 int br_fdb_init(void);
 void br_fdb_fini(void);
+int br_fdb_hash_init(struct net_bridge *br);
+void br_fdb_hash_fini(struct net_bridge *br);
 void br_fdb_flush(struct net_bridge *br);
 void br_fdb_find_delete_local(struct net_bridge *br,
 			      const struct net_bridge_port *p,
@@ -752,7 +760,7 @@ static inline void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
 
 static inline bool br_multicast_is_router(struct net_bridge *br)
 {
-	return 0;
+	return false;
 }
 
 static inline bool br_multicast_querier_exists(struct net_bridge *br,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 9700e0f3307b..ee775f4ff76c 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -121,13 +121,13 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
 
 	switch (type) {
 	case RTM_DELNEIGH:
-		br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
-						fdb->vlan_id,
+		br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
+						fdb->key.vlan_id,
 						fdb->dst->dev);
 		break;
 	case RTM_NEWNEIGH:
-		br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
-						fdb->vlan_id,
+		br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
+						fdb->key.vlan_id,
 						fdb->dst->dev);
 		break;
 	}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 723f25eed8ea..b1be0dcfba6b 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -272,10 +272,7 @@ static ssize_t group_addr_show(struct device *d,
 			       struct device_attribute *attr, char *buf)
 {
 	struct net_bridge *br = to_bridge(d);
-	return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
-		       br->group_addr[0], br->group_addr[1],
-		       br->group_addr[2], br->group_addr[3],
-		       br->group_addr[4], br->group_addr[5]);
+	return sprintf(buf, "%pM\n", br->group_addr);
 }
 
 static ssize_t group_addr_store(struct device *d,
@@ -284,14 +281,11 @@ static ssize_t group_addr_store(struct device *d,
 {
 	struct net_bridge *br = to_bridge(d);
 	u8 new_addr[6];
-	int i;
 
 	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-		   &new_addr[0], &new_addr[1], &new_addr[2],
-		   &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
+	if (!mac_pton(buf, new_addr))
 		return -EINVAL;
 
 	if (!is_link_local_ether_addr(new_addr))
@@ -306,8 +300,7 @@ static ssize_t group_addr_store(struct device *d,
 		return restart_syscall();
 
 	spin_lock_bh(&br->lock);
-	for (i = 0; i < 6; i++)
-		br->group_addr[i] = new_addr[i];
+	ether_addr_copy(br->group_addr, new_addr);
 	spin_unlock_bh(&br->lock);
 
 	br->group_addr_set = true;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index e7ef1a1ef3a6..225d1668dfdd 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,6 +4,7 @@
 #
 menuconfig NF_TABLES_BRIDGE
 	depends on BRIDGE && NETFILTER && NF_TABLES
+	select NETFILTER_FAMILY_BRIDGE
 	tristate "Ethernet Bridge nf_tables support"
 
 if NF_TABLES_BRIDGE
@@ -29,6 +30,7 @@ endif # NF_TABLES_BRIDGE
 menuconfig BRIDGE_NF_EBTABLES
 	tristate "Ethernet Bridge tables (ebtables) support"
 	depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
+	select NETFILTER_FAMILY_BRIDGE
 	help
 	  ebtables is a general, extensible frame/packet identification
 	  framework. Say 'Y' or 'M' here if you want to do Ethernet
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 37817d25b63d..02c4b409d317 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2445,7 +2445,6 @@ static int __init ebtables_init(void)
 		return ret;
 	}
 
-	printk(KERN_INFO "Ebtables v2.0 registered\n");
 	return 0;
 }
 
@@ -2453,7 +2452,6 @@ static void __exit ebtables_fini(void)
 {
 	nf_unregister_sockopt(&ebt_sockopts);
 	xt_unregister_target(&ebt_standard_target);
-	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
 }
 
 EXPORT_SYMBOL(ebt_register_table);
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 97afdc0744e6..5160cf614176 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -25,63 +25,23 @@ nft_do_chain_bridge(void *priv,
 {
 	struct nft_pktinfo pkt;
 
+	nft_set_pktinfo(&pkt, skb, state);
+
 	switch (eth_hdr(skb)->h_proto) {
 	case htons(ETH_P_IP):
-		nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv4_validate(&pkt, skb);
 		break;
 	case htons(ETH_P_IPV6):
-		nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv6_validate(&pkt, skb);
 		break;
 	default:
-		nft_set_pktinfo_unspec(&pkt, skb, state);
+		nft_set_pktinfo_unspec(&pkt, skb);
 		break;
 	}
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static struct nft_af_info nft_af_bridge __read_mostly = {
-	.family		= NFPROTO_BRIDGE,
-	.nhooks		= NF_BR_NUMHOOKS,
-	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_BR_PRE_ROUTING]	= nft_do_chain_bridge,
-		[NF_BR_LOCAL_IN]	= nft_do_chain_bridge,
-		[NF_BR_FORWARD]		= nft_do_chain_bridge,
-		[NF_BR_LOCAL_OUT]	= nft_do_chain_bridge,
-		[NF_BR_POST_ROUTING]	= nft_do_chain_bridge,
-	},
-};
-
-static int nf_tables_bridge_init_net(struct net *net)
-{
-	net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
-	if (net->nft.bridge == NULL)
-		return -ENOMEM;
-
-	memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
-
-	if (nft_register_afinfo(net, net->nft.bridge) < 0)
-		goto err;
-
-	return 0;
-err:
-	kfree(net->nft.bridge);
-	return -ENOMEM;
-}
-
-static void nf_tables_bridge_exit_net(struct net *net)
-{
-	nft_unregister_afinfo(net, net->nft.bridge);
-	kfree(net->nft.bridge);
-}
-
-static struct pernet_operations nf_tables_bridge_net_ops = {
-	.init	= nf_tables_bridge_init_net,
-	.exit	= nf_tables_bridge_exit_net,
-};
-
 static const struct nf_chain_type filter_bridge = {
 	.name		= "filter",
 	.type		= NFT_CHAIN_T_DEFAULT,
@@ -92,75 +52,23 @@ static const struct nf_chain_type filter_bridge = {
 			  (1 << NF_BR_FORWARD) |
 			  (1 << NF_BR_LOCAL_OUT) |
 			  (1 << NF_BR_POST_ROUTING),
-};
-
-static void nf_br_saveroute(const struct sk_buff *skb,
-			    struct nf_queue_entry *entry)
-{
-}
-
-static int nf_br_reroute(struct net *net, struct sk_buff *skb,
-			 const struct nf_queue_entry *entry)
-{
-	return 0;
-}
-
-static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook,
-			      unsigned int dataoff, u_int8_t protocol)
-{
-	return 0;
-}
-
-static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook,
-				      unsigned int dataoff, unsigned int len,
-				      u_int8_t protocol)
-{
-	return 0;
-}
-
-static int nf_br_route(struct net *net, struct dst_entry **dst,
-		       struct flowi *fl, bool strict __always_unused)
-{
-	return 0;
-}
-
-static const struct nf_afinfo nf_br_afinfo = {
-	.family                 = AF_BRIDGE,
-	.checksum               = nf_br_checksum,
-	.checksum_partial       = nf_br_checksum_partial,
-	.route                  = nf_br_route,
-	.saveroute              = nf_br_saveroute,
-	.reroute                = nf_br_reroute,
-	.route_key_size         = 0,
+	.hooks		= {
+		[NF_BR_PRE_ROUTING]	= nft_do_chain_bridge,
+		[NF_BR_LOCAL_IN]	= nft_do_chain_bridge,
+		[NF_BR_FORWARD]		= nft_do_chain_bridge,
+		[NF_BR_LOCAL_OUT]	= nft_do_chain_bridge,
+		[NF_BR_POST_ROUTING]	= nft_do_chain_bridge,
+	},
 };
 
 static int __init nf_tables_bridge_init(void)
 {
-	int ret;
-
-	nf_register_afinfo(&nf_br_afinfo);
-	ret = nft_register_chain_type(&filter_bridge);
-	if (ret < 0)
-		goto err1;
-
-	ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
-	if (ret < 0)
-		goto err2;
-
-	return ret;
-
-err2:
-	nft_unregister_chain_type(&filter_bridge);
-err1:
-	nf_unregister_afinfo(&nf_br_afinfo);
-	return ret;
+	return nft_register_chain_type(&filter_bridge);
 }
 
 static void __exit nf_tables_bridge_exit(void)
 {
-	unregister_pernet_subsys(&nf_tables_bridge_net_ops);
 	nft_unregister_chain_type(&filter_bridge);
-	nf_unregister_afinfo(&nf_br_afinfo);
 }
 
 module_init(nf_tables_bridge_init);
@@ -168,4 +76,4 @@ module_exit(nf_tables_bridge_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);
+MODULE_ALIAS_NFT_CHAIN(AF_BRIDGE, "filter");
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 655ed7032150..a1e85f032108 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -352,15 +352,14 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 	u8 cmdrsp;
 	u8 cmd;
 	int ret = -1;
-	u16 tmp16;
 	u8 len;
 	u8 param[255];
-	u8 linkid;
+	u8 linkid = 0;
 	struct cfctrl *cfctrl = container_obj(layer);
 	struct cfctrl_request_info rsp, *req;
 
 
-	cfpkt_extr_head(pkt, &cmdrsp, 1);
+	cmdrsp = cfpkt_extr_head_u8(pkt);
 	cmd = cmdrsp & CFCTRL_CMD_MASK;
 	if (cmd != CFCTRL_CMD_LINK_ERR
 	    && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
@@ -378,13 +377,12 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 			u8 physlinkid;
 			u8 prio;
 			u8 tmp;
-			u32 tmp32;
 			u8 *cp;
 			int i;
 			struct cfctrl_link_param linkparam;
 			memset(&linkparam, 0, sizeof(linkparam));
 
-			cfpkt_extr_head(pkt, &tmp, 1);
+			tmp = cfpkt_extr_head_u8(pkt);
 
 			serv = tmp & CFCTRL_SRV_MASK;
 			linkparam.linktype = serv;
@@ -392,13 +390,13 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 			servtype = tmp >> 4;
 			linkparam.chtype = servtype;
 
-			cfpkt_extr_head(pkt, &tmp, 1);
+			tmp = cfpkt_extr_head_u8(pkt);
 			physlinkid = tmp & 0x07;
 			prio = tmp >> 3;
 
 			linkparam.priority = prio;
 			linkparam.phyid = physlinkid;
-			cfpkt_extr_head(pkt, &endpoint, 1);
+			endpoint = cfpkt_extr_head_u8(pkt);
 			linkparam.endpoint = endpoint & 0x03;
 
 			switch (serv) {
@@ -407,45 +405,43 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				break;
 			case CFCTRL_SRV_VIDEO:
-				cfpkt_extr_head(pkt, &tmp, 1);
+				tmp = cfpkt_extr_head_u8(pkt);
 				linkparam.u.video.connid = tmp;
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				break;
 
 			case CFCTRL_SRV_DATAGRAM:
-				cfpkt_extr_head(pkt, &tmp32, 4);
 				linkparam.u.datagram.connid =
-				    le32_to_cpu(tmp32);
+				    cfpkt_extr_head_u32(pkt);
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				break;
 			case CFCTRL_SRV_RFM:
 				/* Construct a frame, convert
 				 * DatagramConnectionID
 				 * to network format long and copy it out...
 				 */
-				cfpkt_extr_head(pkt, &tmp32, 4);
 				linkparam.u.rfm.connid =
-				  le32_to_cpu(tmp32);
+				    cfpkt_extr_head_u32(pkt);
 				cp = (u8 *) linkparam.u.rfm.volume;
-				for (cfpkt_extr_head(pkt, &tmp, 1);
+				for (tmp = cfpkt_extr_head_u8(pkt);
 				     cfpkt_more(pkt) && tmp != '\0';
-				     cfpkt_extr_head(pkt, &tmp, 1))
+				     tmp = cfpkt_extr_head_u8(pkt))
 					*cp++ = tmp;
 				*cp = '\0';
 
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 
 				break;
 			case CFCTRL_SRV_UTIL:
@@ -454,13 +450,11 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 				 * to network format long and copy it out...
 				 */
 				/* Fifosize KB */
-				cfpkt_extr_head(pkt, &tmp16, 2);
 				linkparam.u.utility.fifosize_kb =
-				    le16_to_cpu(tmp16);
+				    cfpkt_extr_head_u16(pkt);
 				/* Fifosize bufs */
-				cfpkt_extr_head(pkt, &tmp16, 2);
 				linkparam.u.utility.fifosize_bufs =
-				    le16_to_cpu(tmp16);
+				    cfpkt_extr_head_u16(pkt);
 				/* name */
 				cp = (u8 *) linkparam.u.utility.name;
 				caif_assert(sizeof(linkparam.u.utility.name)
@@ -468,24 +462,24 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 				for (i = 0;
 				     i < UTILITY_NAME_LENGTH
 				     && cfpkt_more(pkt); i++) {
-					cfpkt_extr_head(pkt, &tmp, 1);
+					tmp = cfpkt_extr_head_u8(pkt);
 					*cp++ = tmp;
 				}
 				/* Length */
-				cfpkt_extr_head(pkt, &len, 1);
+				len = cfpkt_extr_head_u8(pkt);
 				linkparam.u.utility.paramlen = len;
 				/* Param Data */
 				cp = linkparam.u.utility.params;
 				while (cfpkt_more(pkt) && len--) {
-					cfpkt_extr_head(pkt, &tmp, 1);
+					tmp = cfpkt_extr_head_u8(pkt);
 					*cp++ = tmp;
 				}
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				/* Length */
-				cfpkt_extr_head(pkt, &len, 1);
+				len = cfpkt_extr_head_u8(pkt);
 				/* Param Data */
 				cfpkt_extr_head(pkt, &param, len);
 				break;
@@ -522,7 +516,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
 		}
 		break;
 	case CFCTRL_CMD_LINK_DESTROY:
-		cfpkt_extr_head(pkt, &linkid, 1);
+		linkid = cfpkt_extr_head_u8(pkt);
 		cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
 		break;
 	case CFCTRL_CMD_LINK_ERR:
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 71b6ab240dea..38c2b7a890dd 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -8,7 +8,6 @@
 
 #include <linux/string.h>
 #include <linux/skbuff.h>
-#include <linux/hardirq.h>
 #include <linux/export.h>
 #include <net/caif/cfpkt.h>
 
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 922ac1d605b3..53ecda10b790 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -8,7 +8,6 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 
 #include <linux/fs.h>
-#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
diff --git a/net/can/Kconfig b/net/can/Kconfig
index a15c0e0d1fc7..a4399be54ff4 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -11,7 +11,7 @@ menuconfig CAN
 	  1991, mainly for automotive, but now widely used in marine
 	  (NMEA2000), industrial, and medical applications.
 	  More information on the CAN network protocol family PF_CAN
-	  is contained in <Documentation/networking/can.txt>.
+	  is contained in <Documentation/networking/can.rst>.
 
 	  If you want CAN support you should say Y here and also to the
 	  specific driver for your controller(s) below.
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 4d7f988a3130..6da324550eec 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -321,13 +321,13 @@ EXPORT_SYMBOL(can_send);
  * af_can rx path
  */
 
-static struct dev_rcv_lists *find_dev_rcv_lists(struct net *net,
+static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net,
 						struct net_device *dev)
 {
 	if (!dev)
 		return net->can.can_rx_alldev_list;
 	else
-		return (struct dev_rcv_lists *)dev->ml_priv;
+		return (struct can_dev_rcv_lists *)dev->ml_priv;
 }
 
 /**
@@ -381,7 +381,7 @@ static unsigned int effhash(canid_t can_id)
  *  Reduced can_id to have a preprocessed filter compare value.
  */
 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
-					struct dev_rcv_lists *d)
+					struct can_dev_rcv_lists *d)
 {
 	canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
 
@@ -464,7 +464,7 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
 {
 	struct receiver *r;
 	struct hlist_head *rl;
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 	struct s_pstats *can_pstats = net->can.can_pstats;
 	int err = 0;
 
@@ -542,7 +542,7 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
 	struct receiver *r = NULL;
 	struct hlist_head *rl;
 	struct s_pstats *can_pstats = net->can.can_pstats;
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 
 	if (dev && dev->type != ARPHRD_CAN)
 		return;
@@ -615,7 +615,7 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r)
 	r->matches++;
 }
 
-static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
+static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
 {
 	struct receiver *r;
 	int matches = 0;
@@ -682,7 +682,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
 
 static void can_receive(struct sk_buff *skb, struct net_device *dev)
 {
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 	struct net *net = dev_net(dev);
 	struct s_stats *can_stats = net->can.can_stats;
 	int matches;
@@ -821,7 +821,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
 			void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 
 	if (dev->type != ARPHRD_CAN)
 		return NOTIFY_DONE;
@@ -866,7 +866,7 @@ static int can_pernet_init(struct net *net)
 {
 	spin_lock_init(&net->can.can_rcvlists_lock);
 	net->can.can_rx_alldev_list =
-		kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
+		kzalloc(sizeof(struct can_dev_rcv_lists), GFP_KERNEL);
 	if (!net->can.can_rx_alldev_list)
 		goto out;
 	net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
@@ -912,7 +912,7 @@ static void can_pernet_exit(struct net *net)
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
 		if (dev->type == ARPHRD_CAN && dev->ml_priv) {
-			struct dev_rcv_lists *d = dev->ml_priv;
+			struct can_dev_rcv_lists *d = dev->ml_priv;
 
 			BUG_ON(d->entries);
 			kfree(d);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index eca6463c6213..9cb3719632bd 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -67,7 +67,7 @@ struct receiver {
 enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
 
 /* per device receive filters linked at dev->ml_priv */
-struct dev_rcv_lists {
+struct can_dev_rcv_lists {
 	struct hlist_head rx[RX_MAX];
 	struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
 	struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 13690334efa3..ac5e5e34fee3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -246,7 +246,6 @@ static int bcm_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations bcm_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= bcm_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/can/gw.c b/net/can/gw.c
index 73a02af4b5d7..398dd0395ad9 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1014,6 +1014,8 @@ static struct pernet_operations cangw_pernet_ops = {
 
 static __init int cgw_module_init(void)
 {
+	int ret;
+
 	/* sanitize given module parameter */
 	max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
 
@@ -1031,15 +1033,19 @@ static __init int cgw_module_init(void)
 	notifier.notifier_call = cgw_notifier;
 	register_netdevice_notifier(&notifier);
 
-	if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, 0)) {
+	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
+				   NULL, cgw_dump_jobs, 0);
+	if (ret) {
 		unregister_netdevice_notifier(&notifier);
 		kmem_cache_destroy(cgw_cache);
 		return -ENOBUFS;
 	}
 
-	/* Only the first call to __rtnl_register can fail */
-	__rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, 0);
-	__rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, 0);
+	/* Only the first call to rtnl_register_module can fail */
+	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+			     cgw_create_job, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+			     cgw_remove_job, NULL, 0);
 
 	return 0;
 }
diff --git a/net/can/proc.c b/net/can/proc.c
index 0c59f876fe6f..fdf704e9bb8c 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -276,7 +276,6 @@ static int can_stats_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_stats_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_stats_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -310,7 +309,6 @@ static int can_reset_stats_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_reset_stats_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_reset_stats_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -329,7 +327,6 @@ static int can_version_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_version_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_version_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -338,7 +335,7 @@ static const struct file_operations can_version_proc_fops = {
 
 static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
 					     struct net_device *dev,
-					     struct dev_rcv_lists *d)
+					     struct can_dev_rcv_lists *d)
 {
 	if (!hlist_empty(&d->rx[idx])) {
 		can_print_recv_banner(m);
@@ -353,7 +350,7 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
 	/* double cast to prevent GCC warning */
 	int idx = (int)(long)PDE_DATA(m->file->f_inode);
 	struct net_device *dev;
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 	struct net *net = m->private;
 
 	seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
@@ -382,7 +379,6 @@ static int can_rcvlist_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_rcvlist_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_rcvlist_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -417,7 +413,7 @@ static inline void can_rcvlist_proc_show_array(struct seq_file *m,
 static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
 {
 	struct net_device *dev;
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 	struct net *net = m->private;
 
 	/* RX_SFF */
@@ -450,7 +446,6 @@ static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_rcvlist_sff_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_rcvlist_sff_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -461,7 +456,7 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
 static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
 {
 	struct net_device *dev;
-	struct dev_rcv_lists *d;
+	struct can_dev_rcv_lists *d;
 	struct net *net = m->private;
 
 	/* RX_EFF */
@@ -494,7 +489,6 @@ static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_rcvlist_eff_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_rcvlist_eff_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/can/raw.c b/net/can/raw.c
index 864c80dbdb72..f2ecc43376a1 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -401,6 +401,8 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 
 	if (len < sizeof(*addr))
 		return -EINVAL;
+	if (addr->can_family != AF_CAN)
+		return -EINVAL;
 
 	lock_sock(sk);
 
diff --git a/net/core/Makefile b/net/core/Makefile
index 1fd0a9c88b1b..6dbbba8c57ae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 obj-y		     += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
 			neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
 			sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
-			fib_notifier.o
+			fib_notifier.o xdp.o
 
 obj-y += net-sysfs.o
 obj-$(CONFIG_PROC_FS) += net-procfs.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 613fb4066be7..dda9d7b9a840 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1554,6 +1554,23 @@ void dev_disable_lro(struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_disable_lro);
 
+/**
+ *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
+ *	@dev: device
+ *
+ *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
+ *	called under RTNL.  This is needed if Generic XDP is installed on
+ *	the device.
+ */
+static void dev_disable_gro_hw(struct net_device *dev)
+{
+	dev->wanted_features &= ~NETIF_F_GRO_HW;
+	netdev_update_features(dev);
+
+	if (unlikely(dev->features & NETIF_F_GRO_HW))
+		netdev_WARN(dev, "failed to disable GRO_HW!\n");
+}
+
 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
 				   struct net_device *dev)
 {
@@ -1677,7 +1694,6 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
 /**
  *	call_netdevice_notifiers_info - call all network notifier blocks
  *	@val: value passed unmodified to notifier function
- *	@dev: net_device pointer passed unmodified to notifier function
  *	@info: notifier information data
  *
  *	Call all network notifier blocks.  Parameters and return value
@@ -2815,7 +2831,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 
 	segs = skb_mac_gso_segment(skb, features);
 
-	if (unlikely(skb_needs_check(skb, tx_path)))
+	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
 		skb_warn_bad_offload(skb);
 
 	return segs;
@@ -3054,7 +3070,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_csum_hwoffload_help);
 
-static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
 	netdev_features_t features;
 
@@ -3078,9 +3094,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 		    __skb_linearize(skb))
 			goto out_kfree_skb;
 
-		if (validate_xmit_xfrm(skb, features))
-			goto out_kfree_skb;
-
 		/* If packet is not checksummed and device does not
 		 * support checksumming for this protocol, complete
 		 * checksumming here.
@@ -3097,6 +3110,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 		}
 	}
 
+	skb = validate_xmit_xfrm(skb, features, again);
+
 	return skb;
 
 out_kfree_skb:
@@ -3106,7 +3121,7 @@ out_null:
 	return NULL;
 }
 
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
 	struct sk_buff *next, *head = NULL, *tail;
 
@@ -3117,7 +3132,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
 		/* in case skb wont be segmented, point to itself */
 		skb->prev = skb;
 
-		skb = validate_xmit_skb(skb, dev);
+		skb = validate_xmit_skb(skb, dev, again);
 		if (!skb)
 			continue;
 
@@ -3185,6 +3200,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 	int rc;
 
 	qdisc_calculate_pkt_len(skb, q);
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+			__qdisc_drop(skb, &to_free);
+			rc = NET_XMIT_DROP;
+		} else {
+			rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+			__qdisc_run(q);
+		}
+
+		if (unlikely(to_free))
+			kfree_skb_list(to_free);
+		return rc;
+	}
+
 	/*
 	 * Heuristic to force contended enqueues to serialize on a
 	 * separate lock before trying to get qdisc main lock.
@@ -3215,9 +3245,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 				contended = false;
 			}
 			__qdisc_run(q);
-		} else
-			qdisc_run_end(q);
+		}
 
+		qdisc_run_end(q);
 		rc = NET_XMIT_SUCCESS;
 	} else {
 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
@@ -3227,6 +3257,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 				contended = false;
 			}
 			__qdisc_run(q);
+			qdisc_run_end(q);
 		}
 	}
 	spin_unlock(root_lock);
@@ -3399,8 +3430,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 		else
 			queue_index = __netdev_pick_tx(dev, skb);
 
-		if (!accel_priv)
-			queue_index = netdev_cap_txqueue(dev, queue_index);
+		queue_index = netdev_cap_txqueue(dev, queue_index);
 	}
 
 	skb_set_queue_mapping(skb, queue_index);
@@ -3439,6 +3469,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 	struct netdev_queue *txq;
 	struct Qdisc *q;
 	int rc = -ENOMEM;
+	bool again = false;
 
 	skb_reset_mac_header(skb);
 
@@ -3500,7 +3531,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 				     XMIT_RECURSION_LIMIT))
 				goto recursion_alert;
 
-			skb = validate_xmit_skb(skb, dev);
+			skb = validate_xmit_skb(skb, dev, &again);
 			if (!skb)
 				goto out;
 
@@ -3896,9 +3927,33 @@ drop:
 	return NET_RX_DROP;
 }
 
+static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	struct netdev_rx_queue *rxqueue;
+
+	rxqueue = dev->_rx;
+
+	if (skb_rx_queue_recorded(skb)) {
+		u16 index = skb_get_rx_queue(skb);
+
+		if (unlikely(index >= dev->real_num_rx_queues)) {
+			WARN_ONCE(dev->real_num_rx_queues > 1,
+				  "%s received packet on queue %u, but number "
+				  "of RX queues is %u\n",
+				  dev->name, index, dev->real_num_rx_queues);
+
+			return rxqueue; /* Return first rxqueue */
+		}
+		rxqueue += index;
+	}
+	return rxqueue;
+}
+
 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
 				     struct bpf_prog *xdp_prog)
 {
+	struct netdev_rx_queue *rxqueue;
 	u32 metalen, act = XDP_DROP;
 	struct xdp_buff xdp;
 	void *orig_data;
@@ -3942,6 +3997,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
 	xdp.data_hard_start = skb->data - skb_headroom(skb);
 	orig_data = xdp.data;
 
+	rxqueue = netif_get_rxqueue(skb);
+	xdp.rxq = &rxqueue->xdp_rxq;
+
 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 	off = xdp.data - orig_data;
@@ -4166,21 +4224,26 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
 
 		while (head) {
 			struct Qdisc *q = head;
-			spinlock_t *root_lock;
+			spinlock_t *root_lock = NULL;
 
 			head = head->next_sched;
 
-			root_lock = qdisc_lock(q);
-			spin_lock(root_lock);
+			if (!(q->flags & TCQ_F_NOLOCK)) {
+				root_lock = qdisc_lock(q);
+				spin_lock(root_lock);
+			}
 			/* We need to make sure head->next_sched is read
 			 * before clearing __QDISC_STATE_SCHED
 			 */
 			smp_mb__before_atomic();
 			clear_bit(__QDISC_STATE_SCHED, &q->state);
 			qdisc_run(q);
-			spin_unlock(root_lock);
+			if (root_lock)
+				spin_unlock(root_lock);
 		}
 	}
+
+	xfrm_dev_backlog(sd);
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -4568,6 +4631,7 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
 		} else if (new && !old) {
 			static_key_slow_inc(&generic_xdp_needed);
 			dev_disable_lro(dev);
+			dev_disable_gro_hw(dev);
 		}
 		break;
 
@@ -6371,6 +6435,7 @@ rollback:
  * netdev_upper_dev_link - Add a link to the upper device
  * @dev: device
  * @upper_dev: new upper device
+ * @extack: netlink extended ack
  *
  * Adds a link to device which is upper to this one. The caller must hold
  * the RTNL lock. On a failure a negative errno code is returned.
@@ -6392,6 +6457,7 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
  * @upper_dev: new upper device
  * @upper_priv: upper device private
  * @upper_info: upper info to be passed down via notifier
+ * @extack: netlink extended ack
  *
  * Adds a link to device which is upper to this one. In this case, only
  * one master upper device can be linked, although other non-master devices
@@ -6982,6 +7048,35 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
 EXPORT_SYMBOL(dev_set_mtu);
 
 /**
+ *	dev_change_tx_queue_len - Change TX queue length of a netdevice
+ *	@dev: device
+ *	@new_len: new tx queue length
+ */
+int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
+{
+	unsigned int orig_len = dev->tx_queue_len;
+	int res;
+
+	if (new_len != (unsigned int)new_len)
+		return -ERANGE;
+
+	if (new_len != orig_len) {
+		dev->tx_queue_len = new_len;
+		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
+		res = notifier_to_errno(res);
+		if (res) {
+			netdev_err(dev,
+				   "refused to change device tx_queue_len\n");
+			dev->tx_queue_len = orig_len;
+			return res;
+		}
+		return dev_qdisc_change_tx_queue_len(dev);
+	}
+
+	return 0;
+}
+
+/**
  *	dev_set_group - Change group this device belongs to
  *	@dev: device
  *	@new_group: group this device should belong to
@@ -7096,17 +7191,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
 }
 EXPORT_SYMBOL(dev_change_proto_down);
 
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
+void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
+		     struct netdev_bpf *xdp)
 {
-	struct netdev_bpf xdp;
-
-	memset(&xdp, 0, sizeof(xdp));
-	xdp.command = XDP_QUERY_PROG;
+	memset(xdp, 0, sizeof(*xdp));
+	xdp->command = XDP_QUERY_PROG;
 
 	/* Query must always succeed. */
-	WARN_ON(bpf_op(dev, &xdp) < 0);
-	if (prog_id)
-		*prog_id = xdp.prog_id;
+	WARN_ON(bpf_op(dev, xdp) < 0);
+}
+
+static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
+{
+	struct netdev_bpf xdp;
+
+	__dev_xdp_query(dev, bpf_op, &xdp);
 
 	return xdp.prog_attached;
 }
@@ -7129,6 +7228,27 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
 	return bpf_op(dev, &xdp);
 }
 
+static void dev_xdp_uninstall(struct net_device *dev)
+{
+	struct netdev_bpf xdp;
+	bpf_op_t ndo_bpf;
+
+	/* Remove generic XDP */
+	WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
+
+	/* Remove from the driver */
+	ndo_bpf = dev->netdev_ops->ndo_bpf;
+	if (!ndo_bpf)
+		return;
+
+	__dev_xdp_query(dev, ndo_bpf, &xdp);
+	if (xdp.prog_attached == XDP_ATTACHED_NONE)
+		return;
+
+	/* Program removal should always succeed */
+	WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
+}
+
 /**
  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
  *	@dev: device
@@ -7157,10 +7277,10 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
 		bpf_chk = generic_xdp_install;
 
 	if (fd >= 0) {
-		if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
+		if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
 			return -EEXIST;
 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
-		    __dev_xdp_attached(dev, bpf_op, NULL))
+		    __dev_xdp_attached(dev, bpf_op))
 			return -EBUSY;
 
 		prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
@@ -7259,6 +7379,7 @@ static void rollback_registered_many(struct list_head *head)
 		/* Shutdown queueing discipline. */
 		dev_shutdown(dev);
 
+		dev_xdp_uninstall(dev);
 
 		/* Notify protocols, that we are about to destroy
 		 * this device. They should clean all the things.
@@ -7268,7 +7389,7 @@ static void rollback_registered_many(struct list_head *head)
 		if (!dev->rtnl_link_ops ||
 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
-						     GFP_KERNEL, NULL);
+						     GFP_KERNEL, NULL, 0);
 
 		/*
 		 *	Flush the unicast and multicast chains
@@ -7402,6 +7523,18 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
 		features &= ~dev->gso_partial_features;
 	}
 
+	if (!(features & NETIF_F_RXCSUM)) {
+		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
+		 * successfully merged by hardware must also have the
+		 * checksum verified by hardware.  If the user does not
+		 * want to enable RXCSUM, logically, we should disable GRO_HW.
+		 */
+		if (features & NETIF_F_GRO_HW) {
+			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
+			features &= ~NETIF_F_GRO_HW;
+		}
+	}
+
 	return features;
 }
 
@@ -7535,12 +7668,12 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 }
 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
 
-#ifdef CONFIG_SYSFS
 static int netif_alloc_rx_queues(struct net_device *dev)
 {
 	unsigned int i, count = dev->num_rx_queues;
 	struct netdev_rx_queue *rx;
 	size_t sz = count * sizeof(*rx);
+	int err = 0;
 
 	BUG_ON(count < 1);
 
@@ -7550,11 +7683,38 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 
 	dev->_rx = rx;
 
-	for (i = 0; i < count; i++)
+	for (i = 0; i < count; i++) {
 		rx[i].dev = dev;
+
+		/* XDP RX-queue setup */
+		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
+		if (err < 0)
+			goto err_rxq_info;
+	}
 	return 0;
+
+err_rxq_info:
+	/* Rollback successful reg's and free other resources */
+	while (i--)
+		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
+	kvfree(dev->_rx);
+	dev->_rx = NULL;
+	return err;
+}
+
+static void netif_free_rx_queues(struct net_device *dev)
+{
+	unsigned int i, count = dev->num_rx_queues;
+
+	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
+	if (!dev->_rx)
+		return;
+
+	for (i = 0; i < count; i++)
+		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
+
+	kvfree(dev->_rx);
 }
-#endif
 
 static void netdev_init_one_queue(struct net_device *dev,
 				  struct netdev_queue *queue, void *_unused)
@@ -8115,12 +8275,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 		return NULL;
 	}
 
-#ifdef CONFIG_SYSFS
 	if (rxqs < 1) {
 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
 		return NULL;
 	}
-#endif
 
 	alloc_size = sizeof(struct net_device);
 	if (sizeof_priv) {
@@ -8177,12 +8335,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 	if (netif_alloc_netdev_queues(dev))
 		goto free_all;
 
-#ifdef CONFIG_SYSFS
 	dev->num_rx_queues = rxqs;
 	dev->real_num_rx_queues = rxqs;
 	if (netif_alloc_rx_queues(dev))
 		goto free_all;
-#endif
 
 	strcpy(dev->name, name);
 	dev->name_assign_type = name_assign_type;
@@ -8218,13 +8374,10 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
 void free_netdev(struct net_device *dev)
 {
 	struct napi_struct *p, *n;
-	struct bpf_prog *prog;
 
 	might_sleep();
 	netif_free_tx_queues(dev);
-#ifdef CONFIG_SYSFS
-	kvfree(dev->_rx);
-#endif
+	netif_free_rx_queues(dev);
 
 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
 
@@ -8237,12 +8390,6 @@ void free_netdev(struct net_device *dev)
 	free_percpu(dev->pcpu_refcnt);
 	dev->pcpu_refcnt = NULL;
 
-	prog = rcu_dereference_protected(dev->xdp_prog, 1);
-	if (prog) {
-		bpf_prog_put(prog);
-		static_key_slow_dec(&generic_xdp_needed);
-	}
-
 	/*  Compatibility with error handling in drivers */
 	if (dev->reg_state == NETREG_UNINITIALIZED) {
 		netdev_freemem(dev);
@@ -8355,7 +8502,7 @@ EXPORT_SYMBOL(unregister_netdev);
 
 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
 {
-	int err, new_nsid;
+	int err, new_nsid, new_ifindex;
 
 	ASSERT_RTNL();
 
@@ -8411,11 +8558,16 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
 	rcu_barrier();
 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
-	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net)
-		new_nsid = peernet2id_alloc(dev_net(dev), net);
+
+	new_nsid = peernet2id_alloc(dev_net(dev), net);
+	/* If there is an ifindex conflict assign a new one */
+	if (__dev_get_by_index(net, dev->ifindex))
+		new_ifindex = dev_new_index(net);
 	else
-		new_nsid = peernet2id(dev_net(dev), net);
-	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid);
+		new_ifindex = dev->ifindex;
+
+	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
+			    new_ifindex);
 
 	/*
 	 *	Flush the unicast and multicast chains
@@ -8429,10 +8581,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
 	/* Actually switch the network namespace */
 	dev_net_set(dev, net);
-
-	/* If there is an ifindex conflict assign a new one */
-	if (__dev_get_by_index(net, dev->ifindex))
-		dev->ifindex = dev_new_index(net);
+	dev->ifindex = new_ifindex;
 
 	/* Send a netdev-add uevent to the new namespace */
 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
@@ -8830,6 +8979,9 @@ static int __init net_dev_init(void)
 
 		skb_queue_head_init(&sd->input_pkt_queue);
 		skb_queue_head_init(&sd->process_queue);
+#ifdef CONFIG_XFRM_OFFLOAD
+		skb_queue_head_init(&sd->xfrm_backlog);
+#endif
 		INIT_LIST_HEAD(&sd->poll_list);
 		sd->output_queue_tailp = &sd->output_queue;
 #ifdef CONFIG_RPS
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 7e690d0ccd05..0ab1af04296c 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -18,26 +18,10 @@
  *	match.  --pb
  */
 
-static int dev_ifname(struct net *net, struct ifreq __user *arg)
+static int dev_ifname(struct net *net, struct ifreq *ifr)
 {
-	struct ifreq ifr;
-	int error;
-
-	/*
-	 *	Fetch the caller's info block.
-	 */
-
-	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-		return -EFAULT;
-	ifr.ifr_name[IFNAMSIZ-1] = 0;
-
-	error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
-	if (error)
-		return error;
-
-	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-		return -EFAULT;
-	return 0;
+	ifr->ifr_name[IFNAMSIZ-1] = 0;
+	return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex);
 }
 
 static gifconf_func_t *gifconf_list[NPROTO];
@@ -66,9 +50,8 @@ EXPORT_SYMBOL(register_gifconf);
  *	Thus we will need a 'compatibility mode'.
  */
 
-static int dev_ifconf(struct net *net, char __user *arg)
+int dev_ifconf(struct net *net, struct ifconf *ifc, int size)
 {
-	struct ifconf ifc;
 	struct net_device *dev;
 	char __user *pos;
 	int len;
@@ -79,11 +62,8 @@ static int dev_ifconf(struct net *net, char __user *arg)
 	 *	Fetch the caller's info block.
 	 */
 
-	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
-		return -EFAULT;
-
-	pos = ifc.ifc_buf;
-	len = ifc.ifc_len;
+	pos = ifc->ifc_buf;
+	len = ifc->ifc_len;
 
 	/*
 	 *	Loop over the interfaces, and write an info block for each.
@@ -95,10 +75,10 @@ static int dev_ifconf(struct net *net, char __user *arg)
 			if (gifconf_list[i]) {
 				int done;
 				if (!pos)
-					done = gifconf_list[i](dev, NULL, 0);
+					done = gifconf_list[i](dev, NULL, 0, size);
 				else
 					done = gifconf_list[i](dev, pos + total,
-							       len - total);
+							       len - total, size);
 				if (done < 0)
 					return -EFAULT;
 				total += done;
@@ -109,12 +89,12 @@ static int dev_ifconf(struct net *net, char __user *arg)
 	/*
 	 *	All done.  Write the updated control block back to the caller.
 	 */
-	ifc.ifc_len = total;
+	ifc->ifc_len = total;
 
 	/*
 	 * 	Both BSD and Solaris return 0 here, so we do too.
 	 */
-	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
+	return 0;
 }
 
 /*
@@ -406,53 +386,24 @@ EXPORT_SYMBOL(dev_load);
  *	positive or a negative errno code on error.
  */
 
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_copyout)
 {
-	struct ifreq ifr;
 	int ret;
 	char *colon;
 
-	/* One special case: SIOCGIFCONF takes ifconf argument
-	   and requires shared lock, because it sleeps writing
-	   to user space.
-	 */
-
-	if (cmd == SIOCGIFCONF) {
-		rtnl_lock();
-		ret = dev_ifconf(net, (char __user *) arg);
-		rtnl_unlock();
-		return ret;
-	}
+	if (need_copyout)
+		*need_copyout = true;
 	if (cmd == SIOCGIFNAME)
-		return dev_ifname(net, (struct ifreq __user *)arg);
-
-	/*
-	 * Take care of Wireless Extensions. Unfortunately struct iwreq
-	 * isn't a proper subset of struct ifreq (it's 8 byte shorter)
-	 * so we need to treat it specially, otherwise applications may
-	 * fault if the struct they're passing happens to land at the
-	 * end of a mapped page.
-	 */
-	if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
-		struct iwreq iwr;
-
-		if (copy_from_user(&iwr, arg, sizeof(iwr)))
-			return -EFAULT;
-
-		iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
+		return dev_ifname(net, ifr);
 
-		return wext_handle_ioctl(net, &iwr, cmd, arg);
-	}
-
-	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-		return -EFAULT;
-
-	ifr.ifr_name[IFNAMSIZ-1] = 0;
+	ifr->ifr_name[IFNAMSIZ-1] = 0;
 
-	colon = strchr(ifr.ifr_name, ':');
+	colon = strchr(ifr->ifr_name, ':');
 	if (colon)
 		*colon = 0;
 
+	dev_load(net, ifr->ifr_name);
+
 	/*
 	 *	See which interface the caller is talking about.
 	 */
@@ -472,31 +423,19 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	case SIOCGIFMAP:
 	case SIOCGIFINDEX:
 	case SIOCGIFTXQLEN:
-		dev_load(net, ifr.ifr_name);
 		rcu_read_lock();
-		ret = dev_ifsioc_locked(net, &ifr, cmd);
+		ret = dev_ifsioc_locked(net, ifr, cmd);
 		rcu_read_unlock();
-		if (!ret) {
-			if (colon)
-				*colon = ':';
-			if (copy_to_user(arg, &ifr,
-					 sizeof(struct ifreq)))
-				ret = -EFAULT;
-		}
+		if (colon)
+			*colon = ':';
 		return ret;
 
 	case SIOCETHTOOL:
-		dev_load(net, ifr.ifr_name);
 		rtnl_lock();
-		ret = dev_ethtool(net, &ifr);
+		ret = dev_ethtool(net, ifr);
 		rtnl_unlock();
-		if (!ret) {
-			if (colon)
-				*colon = ':';
-			if (copy_to_user(arg, &ifr,
-					 sizeof(struct ifreq)))
-				ret = -EFAULT;
-		}
+		if (colon)
+			*colon = ':';
 		return ret;
 
 	/*
@@ -510,17 +449,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	case SIOCSIFNAME:
 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
-		dev_load(net, ifr.ifr_name);
 		rtnl_lock();
-		ret = dev_ifsioc(net, &ifr, cmd);
+		ret = dev_ifsioc(net, ifr, cmd);
 		rtnl_unlock();
-		if (!ret) {
-			if (colon)
-				*colon = ':';
-			if (copy_to_user(arg, &ifr,
-					 sizeof(struct ifreq)))
-				ret = -EFAULT;
-		}
+		if (colon)
+			*colon = ':';
 		return ret;
 
 	/*
@@ -561,10 +494,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 		/* fall through */
 	case SIOCBONDSLAVEINFOQUERY:
 	case SIOCBONDINFOQUERY:
-		dev_load(net, ifr.ifr_name);
 		rtnl_lock();
-		ret = dev_ifsioc(net, &ifr, cmd);
+		ret = dev_ifsioc(net, ifr, cmd);
 		rtnl_unlock();
+		if (need_copyout)
+			*need_copyout = false;
 		return ret;
 
 	case SIOCGIFMEM:
@@ -584,13 +518,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 		    cmd == SIOCGHWTSTAMP ||
 		    (cmd >= SIOCDEVPRIVATE &&
 		     cmd <= SIOCDEVPRIVATE + 15)) {
-			dev_load(net, ifr.ifr_name);
 			rtnl_lock();
-			ret = dev_ifsioc(net, &ifr, cmd);
+			ret = dev_ifsioc(net, ifr, cmd);
 			rtnl_unlock();
-			if (!ret && copy_to_user(arg, &ifr,
-						 sizeof(struct ifreq)))
-				ret = -EFAULT;
 			return ret;
 		}
 		return -ENOTTY;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 7d430c1d9c3e..18d385ed8237 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -92,12 +92,6 @@ static LIST_HEAD(devlink_list);
  */
 static DEFINE_MUTEX(devlink_mutex);
 
-/* devlink_port_mutex
- *
- * Shared lock to guard lists of ports in all devlink devices.
- */
-static DEFINE_MUTEX(devlink_port_mutex);
-
 static struct net *devlink_net(const struct devlink *devlink)
 {
 	return read_pnet(&devlink->_net);
@@ -335,15 +329,18 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
 #define DEVLINK_NL_FLAG_NEED_DEVLINK	BIT(0)
 #define DEVLINK_NL_FLAG_NEED_PORT	BIT(1)
 #define DEVLINK_NL_FLAG_NEED_SB		BIT(2)
-#define DEVLINK_NL_FLAG_LOCK_PORTS	BIT(3)
-	/* port is not needed but we need to ensure they don't
-	 * change in the middle of command
-	 */
+
+/* The per devlink instance lock is taken by default in the pre-doit
+ * operation, yet several commands do not require this. The global
+ * devlink lock is taken and protects from disruption by user-calls.
+ */
+#define DEVLINK_NL_FLAG_NO_LOCK		BIT(3)
 
 static int devlink_nl_pre_doit(const struct genl_ops *ops,
 			       struct sk_buff *skb, struct genl_info *info)
 {
 	struct devlink *devlink;
+	int err;
 
 	mutex_lock(&devlink_mutex);
 	devlink = devlink_get_from_info(info);
@@ -351,44 +348,47 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
 		mutex_unlock(&devlink_mutex);
 		return PTR_ERR(devlink);
 	}
+	if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+		mutex_lock(&devlink->lock);
 	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
 		info->user_ptr[0] = devlink;
 	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
 		struct devlink_port *devlink_port;
 
-		mutex_lock(&devlink_port_mutex);
 		devlink_port = devlink_port_get_from_info(devlink, info);
 		if (IS_ERR(devlink_port)) {
-			mutex_unlock(&devlink_port_mutex);
-			mutex_unlock(&devlink_mutex);
-			return PTR_ERR(devlink_port);
+			err = PTR_ERR(devlink_port);
+			goto unlock;
 		}
 		info->user_ptr[0] = devlink_port;
 	}
-	if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
-		mutex_lock(&devlink_port_mutex);
-	}
 	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
 		struct devlink_sb *devlink_sb;
 
 		devlink_sb = devlink_sb_get_from_info(devlink, info);
 		if (IS_ERR(devlink_sb)) {
-			if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
-				mutex_unlock(&devlink_port_mutex);
-			mutex_unlock(&devlink_mutex);
-			return PTR_ERR(devlink_sb);
+			err = PTR_ERR(devlink_sb);
+			goto unlock;
 		}
 		info->user_ptr[1] = devlink_sb;
 	}
 	return 0;
+
+unlock:
+	if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+		mutex_unlock(&devlink->lock);
+	mutex_unlock(&devlink_mutex);
+	return err;
 }
 
 static void devlink_nl_post_doit(const struct genl_ops *ops,
 				 struct sk_buff *skb, struct genl_info *info)
 {
-	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
-	    ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
-		mutex_unlock(&devlink_port_mutex);
+	struct devlink *devlink;
+
+	devlink = devlink_get_from_info(info);
+	if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+		mutex_unlock(&devlink->lock);
 	mutex_unlock(&devlink_mutex);
 }
 
@@ -614,10 +614,10 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
 	int err;
 
 	mutex_lock(&devlink_mutex);
-	mutex_lock(&devlink_port_mutex);
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_port, &devlink->port_list, list) {
 			if (idx < start) {
 				idx++;
@@ -628,13 +628,15 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
 						   NETLINK_CB(cb->skb).portid,
 						   cb->nlh->nlmsg_seq,
 						   NLM_F_MULTI);
-			if (err)
+			if (err) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 			idx++;
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
-	mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 
 	cb->args[0] = idx;
@@ -801,6 +803,7 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			if (idx < start) {
 				idx++;
@@ -811,10 +814,13 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
 						 NETLINK_CB(cb->skb).portid,
 						 cb->nlh->nlmsg_seq,
 						 NLM_F_MULTI);
-			if (err)
+			if (err) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 			idx++;
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
 	mutex_unlock(&devlink_mutex);
@@ -935,14 +941,18 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
 		    !devlink->ops || !devlink->ops->sb_pool_get)
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
 						   devlink_sb,
 						   NETLINK_CB(cb->skb).portid,
 						   cb->nlh->nlmsg_seq);
-			if (err && err != -EOPNOTSUPP)
+			if (err && err != -EOPNOTSUPP) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
 	mutex_unlock(&devlink_mutex);
@@ -1123,22 +1133,24 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
 	int err;
 
 	mutex_lock(&devlink_mutex);
-	mutex_lock(&devlink_port_mutex);
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
 		    !devlink->ops || !devlink->ops->sb_port_pool_get)
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_port_pool_get_dumpit(msg, start, &idx,
 							devlink, devlink_sb,
 							NETLINK_CB(cb->skb).portid,
 							cb->nlh->nlmsg_seq);
-			if (err && err != -EOPNOTSUPP)
+			if (err && err != -EOPNOTSUPP) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
-	mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 
 	cb->args[0] = idx;
@@ -1347,23 +1359,26 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 	int err;
 
 	mutex_lock(&devlink_mutex);
-	mutex_lock(&devlink_port_mutex);
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
 		    !devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
 			continue;
+
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
 							   devlink,
 							   devlink_sb,
 							   NETLINK_CB(cb->skb).portid,
 							   cb->nlh->nlmsg_seq);
-			if (err && err != -EOPNOTSUPP)
+			if (err && err != -EOPNOTSUPP) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
-	mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 
 	cb->args[0] = idx;
@@ -1679,6 +1694,12 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
 		       table->counters_enabled))
 		goto nla_put_failure;
 
+	if (table->resource_valid) {
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+				  table->resource_id, DEVLINK_ATTR_PAD);
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+				  table->resource_units, DEVLINK_ATTR_PAD);
+	}
 	if (devlink_dpipe_matches_put(table, skb))
 		goto nla_put_failure;
 
@@ -2273,6 +2294,273 @@ static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
 						counters_enable);
 }
 
+static struct devlink_resource *
+devlink_resource_find(struct devlink *devlink,
+		      struct devlink_resource *resource, u64 resource_id)
+{
+	struct list_head *resource_list;
+
+	if (resource)
+		resource_list = &resource->resource_list;
+	else
+		resource_list = &devlink->resource_list;
+
+	list_for_each_entry(resource, resource_list, list) {
+		struct devlink_resource *child_resource;
+
+		if (resource->id == resource_id)
+			return resource;
+
+		child_resource = devlink_resource_find(devlink, resource,
+						       resource_id);
+		if (child_resource)
+			return child_resource;
+	}
+	return NULL;
+}
+
+static void
+devlink_resource_validate_children(struct devlink_resource *resource)
+{
+	struct devlink_resource *child_resource;
+	bool size_valid = true;
+	u64 parts_size = 0;
+
+	if (list_empty(&resource->resource_list))
+		goto out;
+
+	list_for_each_entry(child_resource, &resource->resource_list, list)
+		parts_size += child_resource->size_new;
+
+	if (parts_size > resource->size)
+		size_valid = false;
+out:
+	resource->size_valid = size_valid;
+}
+
+static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
+				       struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_resource *resource;
+	u64 resource_id;
+	u64 size;
+	int err;
+
+	if (!info->attrs[DEVLINK_ATTR_RESOURCE_ID] ||
+	    !info->attrs[DEVLINK_ATTR_RESOURCE_SIZE])
+		return -EINVAL;
+	resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
+
+	resource = devlink_resource_find(devlink, NULL, resource_id);
+	if (!resource)
+		return -EINVAL;
+
+	if (!resource->resource_ops->size_validate)
+		return -EINVAL;
+
+	size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
+	err = resource->resource_ops->size_validate(devlink, size,
+						    info->extack);
+	if (err)
+		return err;
+
+	resource->size_new = size;
+	devlink_resource_validate_children(resource);
+	if (resource->parent)
+		devlink_resource_validate_children(resource->parent);
+	return 0;
+}
+
+static void
+devlink_resource_size_params_put(struct devlink_resource *resource,
+				 struct sk_buff *skb)
+{
+	struct devlink_resource_size_params *size_params;
+
+	size_params = resource->size_params;
+	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+			  size_params->size_granularity, DEVLINK_ATTR_PAD);
+	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+			  size_params->size_max, DEVLINK_ATTR_PAD);
+	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+			  size_params->size_min, DEVLINK_ATTR_PAD);
+	nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
+}
+
+static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
+				struct devlink_resource *resource)
+{
+	struct devlink_resource *child_resource;
+	struct nlattr *child_resource_attr;
+	struct nlattr *resource_attr;
+
+	resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE);
+	if (!resource_attr)
+		return -EMSGSIZE;
+
+	if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
+	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
+			      DEVLINK_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
+			      DEVLINK_ATTR_PAD))
+		goto nla_put_failure;
+	if (resource->size != resource->size_new)
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
+				  resource->size_new, DEVLINK_ATTR_PAD);
+	if (resource->resource_ops && resource->resource_ops->occ_get)
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+				  resource->resource_ops->occ_get(devlink),
+				  DEVLINK_ATTR_PAD);
+	devlink_resource_size_params_put(resource, skb);
+	if (list_empty(&resource->resource_list))
+		goto out;
+
+	if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
+		       resource->size_valid))
+		goto nla_put_failure;
+
+	child_resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+	if (!child_resource_attr)
+		goto nla_put_failure;
+
+	list_for_each_entry(child_resource, &resource->resource_list, list) {
+		if (devlink_resource_put(devlink, skb, child_resource))
+			goto resource_put_failure;
+	}
+
+	nla_nest_end(skb, child_resource_attr);
+out:
+	nla_nest_end(skb, resource_attr);
+	return 0;
+
+resource_put_failure:
+	nla_nest_cancel(skb, child_resource_attr);
+nla_put_failure:
+	nla_nest_cancel(skb, resource_attr);
+	return -EMSGSIZE;
+}
+
+static int devlink_resource_fill(struct genl_info *info,
+				 enum devlink_command cmd, int flags)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_resource *resource;
+	struct nlattr *resources_attr;
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh;
+	bool incomplete;
+	void *hdr;
+	int i;
+	int err;
+
+	resource = list_first_entry(&devlink->resource_list,
+				    struct devlink_resource, list);
+start_again:
+	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+	if (err)
+		return err;
+
+	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+			  &devlink_nl_family, NLM_F_MULTI, cmd);
+	if (!hdr) {
+		nlmsg_free(skb);
+		return -EMSGSIZE;
+	}
+
+	if (devlink_nl_put_handle(skb, devlink))
+		goto nla_put_failure;
+
+	resources_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+	if (!resources_attr)
+		goto nla_put_failure;
+
+	incomplete = false;
+	i = 0;
+	list_for_each_entry_from(resource, &devlink->resource_list, list) {
+		err = devlink_resource_put(devlink, skb, resource);
+		if (err) {
+			if (!i)
+				goto err_resource_put;
+			incomplete = true;
+			break;
+		}
+		i++;
+	}
+	nla_nest_end(skb, resources_attr);
+	genlmsg_end(skb, hdr);
+	if (incomplete)
+		goto start_again;
+send_done:
+	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+	if (!nlh) {
+		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+		if (err)
+			goto err_skb_send_alloc;
+		goto send_done;
+	}
+	return genlmsg_reply(skb, info);
+
+nla_put_failure:
+	err = -EMSGSIZE;
+err_resource_put:
+err_skb_send_alloc:
+	genlmsg_cancel(skb, hdr);
+	nlmsg_free(skb);
+	return err;
+}
+
+static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
+					struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+
+	if (list_empty(&devlink->resource_list))
+		return -EOPNOTSUPP;
+
+	return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
+}
+
+static int
+devlink_resources_validate(struct devlink *devlink,
+			   struct devlink_resource *resource,
+			   struct genl_info *info)
+{
+	struct list_head *resource_list;
+	int err = 0;
+
+	if (resource)
+		resource_list = &resource->resource_list;
+	else
+		resource_list = &devlink->resource_list;
+
+	list_for_each_entry(resource, resource_list, list) {
+		if (!resource->size_valid)
+			return -EINVAL;
+		err = devlink_resources_validate(devlink, resource, info);
+		if (err)
+			return err;
+	}
+	return err;
+}
+
+static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	int err;
+
+	if (!devlink->ops->reload)
+		return -EOPNOTSUPP;
+
+	err = devlink_resources_validate(devlink, NULL, info);
+	if (err) {
+		NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
+		return err;
+	}
+	return devlink->ops->reload(devlink);
+}
+
 static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
 	[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
 	[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
@@ -2291,6 +2579,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
 	[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
 	[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
 	[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
+	[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
+	[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
 };
 
 static const struct genl_ops devlink_nl_ops[] = {
@@ -2322,14 +2612,16 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.doit = devlink_nl_cmd_port_split_doit,
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NO_LOCK,
 	},
 	{
 		.cmd = DEVLINK_CMD_PORT_UNSPLIT,
 		.doit = devlink_nl_cmd_port_unsplit_doit,
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NO_LOCK,
 	},
 	{
 		.cmd = DEVLINK_CMD_SB_GET,
@@ -2397,8 +2689,7 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-				  DEVLINK_NL_FLAG_NEED_SB |
-				  DEVLINK_NL_FLAG_LOCK_PORTS,
+				  DEVLINK_NL_FLAG_NEED_SB,
 	},
 	{
 		.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
@@ -2406,8 +2697,7 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-				  DEVLINK_NL_FLAG_NEED_SB |
-				  DEVLINK_NL_FLAG_LOCK_PORTS,
+				  DEVLINK_NL_FLAG_NEED_SB,
 	},
 	{
 		.cmd = DEVLINK_CMD_ESWITCH_GET,
@@ -2451,6 +2741,28 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
 	},
+	{
+		.cmd = DEVLINK_CMD_RESOURCE_SET,
+		.doit = devlink_nl_cmd_resource_set,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+	},
+	{
+		.cmd = DEVLINK_CMD_RESOURCE_DUMP,
+		.doit = devlink_nl_cmd_resource_dump,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+	},
+	{
+		.cmd = DEVLINK_CMD_RELOAD,
+		.doit = devlink_nl_cmd_reload,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NO_LOCK,
+	},
 };
 
 static struct genl_family devlink_nl_family __ro_after_init = {
@@ -2488,6 +2800,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
 	INIT_LIST_HEAD(&devlink->port_list);
 	INIT_LIST_HEAD(&devlink->sb_list);
 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
+	INIT_LIST_HEAD(&devlink->resource_list);
+	mutex_init(&devlink->lock);
 	return devlink;
 }
 EXPORT_SYMBOL_GPL(devlink_alloc);
@@ -2550,16 +2864,16 @@ int devlink_port_register(struct devlink *devlink,
 			  struct devlink_port *devlink_port,
 			  unsigned int port_index)
 {
-	mutex_lock(&devlink_port_mutex);
+	mutex_lock(&devlink->lock);
 	if (devlink_port_index_exists(devlink, port_index)) {
-		mutex_unlock(&devlink_port_mutex);
+		mutex_unlock(&devlink->lock);
 		return -EEXIST;
 	}
 	devlink_port->devlink = devlink;
 	devlink_port->index = port_index;
 	devlink_port->registered = true;
 	list_add_tail(&devlink_port->list, &devlink->port_list);
-	mutex_unlock(&devlink_port_mutex);
+	mutex_unlock(&devlink->lock);
 	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
 	return 0;
 }
@@ -2572,10 +2886,12 @@ EXPORT_SYMBOL_GPL(devlink_port_register);
  */
 void devlink_port_unregister(struct devlink_port *devlink_port)
 {
+	struct devlink *devlink = devlink_port->devlink;
+
 	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
-	mutex_lock(&devlink_port_mutex);
+	mutex_lock(&devlink->lock);
 	list_del(&devlink_port->list);
-	mutex_unlock(&devlink_port_mutex);
+	mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_port_unregister);
 
@@ -2651,7 +2967,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
 	struct devlink_sb *devlink_sb;
 	int err = 0;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	if (devlink_sb_index_exists(devlink, sb_index)) {
 		err = -EEXIST;
 		goto unlock;
@@ -2670,7 +2986,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
 	devlink_sb->egress_tc_count = egress_tc_count;
 	list_add_tail(&devlink_sb->list, &devlink->sb_list);
 unlock:
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	return err;
 }
 EXPORT_SYMBOL_GPL(devlink_sb_register);
@@ -2679,11 +2995,11 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
 {
 	struct devlink_sb *devlink_sb;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
 	WARN_ON(!devlink_sb);
 	list_del(&devlink_sb->list);
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	kfree(devlink_sb);
 }
 EXPORT_SYMBOL_GPL(devlink_sb_unregister);
@@ -2699,9 +3015,9 @@ EXPORT_SYMBOL_GPL(devlink_sb_unregister);
 int devlink_dpipe_headers_register(struct devlink *devlink,
 				   struct devlink_dpipe_headers *dpipe_headers)
 {
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	devlink->dpipe_headers = dpipe_headers;
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
@@ -2715,9 +3031,9 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
  */
 void devlink_dpipe_headers_unregister(struct devlink *devlink)
 {
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	devlink->dpipe_headers = NULL;
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister);
 
@@ -2783,9 +3099,9 @@ int devlink_dpipe_table_register(struct devlink *devlink,
 	table->priv = priv;
 	table->counter_control_extern = counter_control_extern;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
@@ -2801,20 +3117,182 @@ void devlink_dpipe_table_unregister(struct devlink *devlink,
 {
 	struct devlink_dpipe_table *table;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
 					 table_name);
 	if (!table)
 		goto unlock;
 	list_del_rcu(&table->list);
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	kfree_rcu(table, rcu);
 	return;
 unlock:
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
 
+/**
+ *	devlink_resource_register - devlink resource register
+ *
+ *	@devlink: devlink
+ *	@resource_name: resource's name
+ *	@top_hierarchy: top hierarchy
+ *	@reload_required: reload is required for new configuration to
+ *			  apply
+ *	@resource_size: resource's size
+ *	@resource_id: resource's id
+ *	@parent_reosurce_id: resource's parent id
+ *	@size params: size parameters
+ *	@resource_ops: resource ops
+ */
+int devlink_resource_register(struct devlink *devlink,
+			      const char *resource_name,
+			      bool top_hierarchy,
+			      u64 resource_size,
+			      u64 resource_id,
+			      u64 parent_resource_id,
+			      struct devlink_resource_size_params *size_params,
+			      const struct devlink_resource_ops *resource_ops)
+{
+	struct devlink_resource *resource;
+	struct list_head *resource_list;
+	int err = 0;
+
+	mutex_lock(&devlink->lock);
+	resource = devlink_resource_find(devlink, NULL, resource_id);
+	if (resource) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+	if (!resource) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (top_hierarchy) {
+		resource_list = &devlink->resource_list;
+	} else {
+		struct devlink_resource *parent_resource;
+
+		parent_resource = devlink_resource_find(devlink, NULL,
+							parent_resource_id);
+		if (parent_resource) {
+			resource_list = &parent_resource->resource_list;
+			resource->parent = parent_resource;
+		} else {
+			kfree(resource);
+			err = -EINVAL;
+			goto out;
+		}
+	}
+
+	resource->name = resource_name;
+	resource->size = resource_size;
+	resource->size_new = resource_size;
+	resource->id = resource_id;
+	resource->resource_ops = resource_ops;
+	resource->size_valid = true;
+	resource->size_params = size_params;
+	INIT_LIST_HEAD(&resource->resource_list);
+	list_add_tail(&resource->list, resource_list);
+out:
+	mutex_unlock(&devlink->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_register);
+
+/**
+ *	devlink_resources_unregister - free all resources
+ *
+ *	@devlink: devlink
+ *	@resource: resource
+ */
+void devlink_resources_unregister(struct devlink *devlink,
+				  struct devlink_resource *resource)
+{
+	struct devlink_resource *tmp, *child_resource;
+	struct list_head *resource_list;
+
+	if (resource)
+		resource_list = &resource->resource_list;
+	else
+		resource_list = &devlink->resource_list;
+
+	if (!resource)
+		mutex_lock(&devlink->lock);
+
+	list_for_each_entry_safe(child_resource, tmp, resource_list, list) {
+		devlink_resources_unregister(devlink, child_resource);
+		list_del(&child_resource->list);
+		kfree(child_resource);
+	}
+
+	if (!resource)
+		mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_resources_unregister);
+
+/**
+ *	devlink_resource_size_get - get and update size
+ *
+ *	@devlink: devlink
+ *	@resource_id: the requested resource id
+ *	@p_resource_size: ptr to update
+ */
+int devlink_resource_size_get(struct devlink *devlink,
+			      u64 resource_id,
+			      u64 *p_resource_size)
+{
+	struct devlink_resource *resource;
+	int err = 0;
+
+	mutex_lock(&devlink->lock);
+	resource = devlink_resource_find(devlink, NULL, resource_id);
+	if (!resource) {
+		err = -EINVAL;
+		goto out;
+	}
+	*p_resource_size = resource->size_new;
+	resource->size = resource->size_new;
+out:
+	mutex_unlock(&devlink->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_size_get);
+
+/**
+ *	devlink_dpipe_table_resource_set - set the resource id
+ *
+ *	@devlink: devlink
+ *	@table_name: table name
+ *	@resource_id: resource id
+ *	@resource_units: number of resource's units consumed per table's entry
+ */
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+				     const char *table_name, u64 resource_id,
+				     u64 resource_units)
+{
+	struct devlink_dpipe_table *table;
+	int err = 0;
+
+	mutex_lock(&devlink->lock);
+	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+					 table_name);
+	if (!table) {
+		err = -EINVAL;
+		goto out;
+	}
+	table->resource_id = resource_id;
+	table->resource_units = resource_units;
+	table->resource_valid = true;
+out:
+	mutex_unlock(&devlink->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
+
 static int __init devlink_module_init(void)
 {
 	return genl_register_family(&devlink_nl_family);
diff --git a/net/core/dst.c b/net/core/dst.c
index 662a2d4a3d19..007aa0b08291 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -21,6 +21,7 @@
 #include <linux/sched.h>
 #include <linux/prefetch.h>
 #include <net/lwtunnel.h>
+#include <net/xfrm.h>
 
 #include <net/dst.h>
 #include <net/dst_metadata.h>
@@ -62,15 +63,12 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 	      struct net_device *dev, int initial_ref, int initial_obsolete,
 	      unsigned short flags)
 {
-	dst->child = NULL;
 	dst->dev = dev;
 	if (dev)
 		dev_hold(dev);
 	dst->ops = ops;
 	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 	dst->expires = 0UL;
-	dst->path = dst;
-	dst->from = NULL;
 #ifdef CONFIG_XFRM
 	dst->xfrm = NULL;
 #endif
@@ -88,7 +86,6 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 	dst->__use = 0;
 	dst->lastuse = jiffies;
 	dst->flags = flags;
-	dst->next = NULL;
 	if (!(flags & DST_NOCOUNT))
 		dst_entries_add(ops, 1);
 }
@@ -116,12 +113,17 @@ EXPORT_SYMBOL(dst_alloc);
 
 struct dst_entry *dst_destroy(struct dst_entry * dst)
 {
-	struct dst_entry *child;
+	struct dst_entry *child = NULL;
 
 	smp_rmb();
 
-	child = dst->child;
+#ifdef CONFIG_XFRM
+	if (dst->xfrm) {
+		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
 
+		child = xdst->child;
+	}
+#endif
 	if (!(dst->flags & DST_NOCOUNT))
 		dst_entries_add(dst->ops, -1);
 
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 8225416911ae..107b122c8969 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -73,6 +73,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
 	[NETIF_F_LLTX_BIT] =             "tx-lockless",
 	[NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
 	[NETIF_F_GRO_BIT] =              "rx-gro",
+	[NETIF_F_GRO_HW_BIT] =           "rx-gro-hw",
 	[NETIF_F_LRO_BIT] =              "rx-lro",
 
 	[NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
@@ -1692,14 +1693,23 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
 
 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
 {
-	struct ethtool_ringparam ringparam;
+	struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
 
-	if (!dev->ethtool_ops->set_ringparam)
+	if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
 		return -EOPNOTSUPP;
 
 	if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
 		return -EFAULT;
 
+	dev->ethtool_ops->get_ringparam(dev, &max);
+
+	/* ensure new ring parameters are within the maximums */
+	if (ringparam.rx_pending > max.rx_max_pending ||
+	    ringparam.rx_mini_pending > max.rx_mini_max_pending ||
+	    ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending ||
+	    ringparam.tx_pending > max.tx_max_pending)
+		return -EINVAL;
+
 	return dev->ethtool_ops->set_ringparam(dev, &ringparam);
 }
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 1c0eb436671f..08ab4c65a998 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -401,8 +401,8 @@ do_pass:
 		/* Classic BPF expects A and X to be reset first. These need
 		 * to be guaranteed to be the first two instructions.
 		 */
-		*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
-		*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
+		*new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
+		*new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
 
 		/* All programs must keep CTX in callee saved BPF_REG_CTX.
 		 * In eBPF case it's done by the compiler, here we need to
@@ -459,8 +459,15 @@ do_pass:
 				break;
 
 			if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
-			    fp->code == (BPF_ALU | BPF_MOD | BPF_X))
+			    fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
 				*insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
+				/* Error with exception code on div/mod by 0.
+				 * For cBPF programs, this was always return 0.
+				 */
+				*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
+				*insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
+				*insn++ = BPF_EXIT_INSN();
+			}
 
 			*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
 			break;
@@ -2686,8 +2693,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
 	return 0;
 }
 
-int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb,
-				struct bpf_prog *xdp_prog)
+static int xdp_do_generic_redirect_map(struct net_device *dev,
+				       struct sk_buff *skb,
+				       struct bpf_prog *xdp_prog)
 {
 	struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 	unsigned long map_owner = ri->map_owner;
@@ -2864,7 +2872,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
 	.arg2_type	= ARG_CONST_MAP_PTR,
 	.arg3_type	= ARG_ANYTHING,
 	.arg4_type	= ARG_PTR_TO_MEM,
-	.arg5_type	= ARG_CONST_SIZE,
+	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 };
 
 static unsigned short bpf_tunnel_key_af(u64 flags)
@@ -3015,6 +3023,8 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
 	info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
 	if (flags & BPF_F_DONT_FRAGMENT)
 		info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+	if (flags & BPF_F_ZERO_CSUM_TX)
+		info->key.tun_flags &= ~TUNNEL_CSUM;
 
 	info->key.tun_id = cpu_to_be64(from->tunnel_id);
 	info->key.tos = from->tunnel_tos;
@@ -3028,8 +3038,6 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
 				  IPV6_FLOWLABEL_MASK;
 	} else {
 		info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
-		if (flags & BPF_F_ZERO_CSUM_TX)
-			info->key.tun_flags &= ~TUNNEL_CSUM;
 	}
 
 	return 0;
@@ -3153,7 +3161,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
 	.arg2_type	= ARG_CONST_MAP_PTR,
 	.arg3_type	= ARG_ANYTHING,
 	.arg4_type	= ARG_PTR_TO_MEM,
-	.arg5_type	= ARG_CONST_SIZE,
+	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 };
 
 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
@@ -3231,6 +3239,29 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
 			ret = -EINVAL;
 		}
 #ifdef CONFIG_INET
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (level == SOL_IPV6) {
+		if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
+			return -EINVAL;
+
+		val = *((int *)optval);
+		/* Only some options are supported */
+		switch (optname) {
+		case IPV6_TCLASS:
+			if (val < -1 || val > 0xff) {
+				ret = -EINVAL;
+			} else {
+				struct ipv6_pinfo *np = inet6_sk(sk);
+
+				if (val == -1)
+					val = 0;
+				np->tclass = val;
+			}
+			break;
+		default:
+			ret = -EINVAL;
+		}
+#endif
 	} else if (level == SOL_TCP &&
 		   sk->sk_prot->setsockopt == tcp_setsockopt) {
 		if (optname == TCP_CONGESTION) {
@@ -3240,7 +3271,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
 			strncpy(name, optval, min_t(long, optlen,
 						    TCP_CA_NAME_MAX-1));
 			name[TCP_CA_NAME_MAX-1] = 0;
-			ret = tcp_set_congestion_control(sk, name, false, reinit);
+			ret = tcp_set_congestion_control(sk, name, false,
+							 reinit);
 		} else {
 			struct tcp_sock *tp = tcp_sk(sk);
 
@@ -3306,6 +3338,22 @@ BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
 		} else {
 			goto err_clear;
 		}
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (level == SOL_IPV6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
+			goto err_clear;
+
+		/* Only some options are supported */
+		switch (optname) {
+		case IPV6_TCLASS:
+			*((int *)optval) = (int)np->tclass;
+			break;
+		default:
+			goto err_clear;
+		}
+#endif
 	} else {
 		goto err_clear;
 	}
@@ -3327,6 +3375,33 @@ static const struct bpf_func_proto bpf_getsockopt_proto = {
 	.arg5_type	= ARG_CONST_SIZE,
 };
 
+BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
+	   int, argval)
+{
+	struct sock *sk = bpf_sock->sk;
+	int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
+
+	if (!sk_fullsock(sk))
+		return -EINVAL;
+
+#ifdef CONFIG_INET
+	if (val)
+		tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
+
+	return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
+#else
+	return -EINVAL;
+#endif
+}
+
+static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
+	.func		= bpf_sock_ops_cb_flags_set,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *
 bpf_base_func_proto(enum bpf_func_id func_id)
 {
@@ -3459,6 +3534,8 @@ xdp_func_proto(enum bpf_func_id func_id)
 		return &bpf_xdp_event_output_proto;
 	case BPF_FUNC_get_smp_processor_id:
 		return &bpf_get_smp_processor_id_proto;
+	case BPF_FUNC_csum_diff:
+		return &bpf_csum_diff_proto;
 	case BPF_FUNC_xdp_adjust_head:
 		return &bpf_xdp_adjust_head_proto;
 	case BPF_FUNC_xdp_adjust_meta:
@@ -3507,6 +3584,8 @@ static const struct bpf_func_proto *
 		return &bpf_setsockopt_proto;
 	case BPF_FUNC_getsockopt:
 		return &bpf_getsockopt_proto;
+	case BPF_FUNC_sock_ops_cb_flags_set:
+		return &bpf_sock_ops_cb_flags_set_proto;
 	case BPF_FUNC_sock_map_update:
 		return &bpf_sock_map_update_proto;
 	default:
@@ -3823,34 +3902,44 @@ void bpf_warn_invalid_xdp_action(u32 act)
 }
 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
 
-static bool __is_valid_sock_ops_access(int off, int size)
+static bool sock_ops_is_valid_access(int off, int size,
+				     enum bpf_access_type type,
+				     struct bpf_insn_access_aux *info)
 {
+	const int size_default = sizeof(__u32);
+
 	if (off < 0 || off >= sizeof(struct bpf_sock_ops))
 		return false;
+
 	/* The verifier guarantees that size > 0. */
 	if (off % size != 0)
 		return false;
-	if (size != sizeof(__u32))
-		return false;
 
-	return true;
-}
-
-static bool sock_ops_is_valid_access(int off, int size,
-				     enum bpf_access_type type,
-				     struct bpf_insn_access_aux *info)
-{
 	if (type == BPF_WRITE) {
 		switch (off) {
-		case offsetof(struct bpf_sock_ops, op) ...
-		     offsetof(struct bpf_sock_ops, replylong[3]):
+		case offsetof(struct bpf_sock_ops, reply):
+		case offsetof(struct bpf_sock_ops, sk_txhash):
+			if (size != size_default)
+				return false;
 			break;
 		default:
 			return false;
 		}
+	} else {
+		switch (off) {
+		case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
+					bytes_acked):
+			if (size != sizeof(__u64))
+				return false;
+			break;
+		default:
+			if (size != size_default)
+				return false;
+			break;
+		}
 	}
 
-	return __is_valid_sock_ops_access(off, size);
+	return true;
 }
 
 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
@@ -4305,6 +4394,24 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
 				      si->dst_reg, si->src_reg,
 				      offsetof(struct xdp_buff, data_end));
 		break;
+	case offsetof(struct xdp_md, ingress_ifindex):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct xdp_buff, rxq));
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
+				      si->dst_reg, si->dst_reg,
+				      offsetof(struct xdp_rxq_info, dev));
+		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+				      offsetof(struct net_device, ifindex));
+		break;
+	case offsetof(struct xdp_md, rx_queue_index):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct xdp_buff, rxq));
+		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+				      offsetof(struct xdp_rxq_info,
+					       queue_index));
+		break;
 	}
 
 	return insn - insn_buf;
@@ -4439,6 +4546,211 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
 				      offsetof(struct sock_common, skc_num));
 		break;
+
+	case offsetof(struct bpf_sock_ops, is_fullsock):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+						struct bpf_sock_ops_kern,
+						is_fullsock),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct bpf_sock_ops_kern,
+					       is_fullsock));
+		break;
+
+	case offsetof(struct bpf_sock_ops, state):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
+
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+						struct bpf_sock_ops_kern, sk),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct bpf_sock_ops_kern, sk));
+		*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
+				      offsetof(struct sock_common, skc_state));
+		break;
+
+	case offsetof(struct bpf_sock_ops, rtt_min):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+			     sizeof(struct minmax));
+		BUILD_BUG_ON(sizeof(struct minmax) <
+			     sizeof(struct minmax_sample));
+
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+						struct bpf_sock_ops_kern, sk),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct bpf_sock_ops_kern, sk));
+		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+				      offsetof(struct tcp_sock, rtt_min) +
+				      FIELD_SIZEOF(struct minmax_sample, t));
+		break;
+
+/* Helper macro for adding read access to tcp_sock or sock fields. */
+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \
+	do {								      \
+		BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >		      \
+			     FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
+						struct bpf_sock_ops_kern,     \
+						is_fullsock),		      \
+				      si->dst_reg, si->src_reg,		      \
+				      offsetof(struct bpf_sock_ops_kern,      \
+					       is_fullsock));		      \
+		*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);	      \
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
+						struct bpf_sock_ops_kern, sk),\
+				      si->dst_reg, si->src_reg,		      \
+				      offsetof(struct bpf_sock_ops_kern, sk));\
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,		      \
+						       OBJ_FIELD),	      \
+				      si->dst_reg, si->dst_reg,		      \
+				      offsetof(OBJ, OBJ_FIELD));	      \
+	} while (0)
+
+/* Helper macro for adding write access to tcp_sock or sock fields.
+ * The macro is called with two registers, dst_reg which contains a pointer
+ * to ctx (context) and src_reg which contains the value that should be
+ * stored. However, we need an additional register since we cannot overwrite
+ * dst_reg because it may be used later in the program.
+ * Instead we "borrow" one of the other register. We first save its value
+ * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
+ * it at the end of the macro.
+ */
+#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \
+	do {								      \
+		int reg = BPF_REG_9;					      \
+		BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >		      \
+			     FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
+		if (si->dst_reg == reg || si->src_reg == reg)		      \
+			reg--;						      \
+		if (si->dst_reg == reg || si->src_reg == reg)		      \
+			reg--;						      \
+		*insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg,		      \
+				      offsetof(struct bpf_sock_ops_kern,      \
+					       temp));			      \
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
+						struct bpf_sock_ops_kern,     \
+						is_fullsock),		      \
+				      reg, si->dst_reg,			      \
+				      offsetof(struct bpf_sock_ops_kern,      \
+					       is_fullsock));		      \
+		*insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);		      \
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
+						struct bpf_sock_ops_kern, sk),\
+				      reg, si->dst_reg,			      \
+				      offsetof(struct bpf_sock_ops_kern, sk));\
+		*insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),	      \
+				      reg, si->src_reg,			      \
+				      offsetof(OBJ, OBJ_FIELD));	      \
+		*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg,		      \
+				      offsetof(struct bpf_sock_ops_kern,      \
+					       temp));			      \
+	} while (0)
+
+#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE)	      \
+	do {								      \
+		if (TYPE == BPF_WRITE)					      \
+			SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);	      \
+		else							      \
+			SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);	      \
+	} while (0)
+
+	case offsetof(struct bpf_sock_ops, snd_cwnd):
+		SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, srtt_us):
+		SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
+		SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
+				   struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, snd_ssthresh):
+		SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, rcv_nxt):
+		SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, snd_nxt):
+		SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, snd_una):
+		SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, mss_cache):
+		SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, ecn_flags):
+		SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, rate_delivered):
+		SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
+				   struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, rate_interval_us):
+		SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
+				   struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, packets_out):
+		SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, retrans_out):
+		SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, total_retrans):
+		SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
+				   struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, segs_in):
+		SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, data_segs_in):
+		SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, segs_out):
+		SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, data_segs_out):
+		SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
+				   struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, lost_out):
+		SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, sacked_out):
+		SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, sk_txhash):
+		SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
+					  struct sock, type);
+		break;
+
+	case offsetof(struct bpf_sock_ops, bytes_received):
+		SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
+				   struct tcp_sock);
+		break;
+
+	case offsetof(struct bpf_sock_ops, bytes_acked):
+		SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
+		break;
+
 	}
 	return insn - insn_buf;
 }
@@ -4475,6 +4787,7 @@ const struct bpf_verifier_ops sk_filter_verifier_ops = {
 };
 
 const struct bpf_prog_ops sk_filter_prog_ops = {
+	.test_run		= bpf_prog_test_run_skb,
 };
 
 const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 544bddf08e13..559db9ea8d86 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -24,6 +24,7 @@
 #include <linux/tcp.h>
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
+#include <uapi/linux/batadv_packet.h>
 
 static void dissector_set_key(struct flow_dissector *flow_dissector,
 			      enum flow_dissector_key_id key_id)
@@ -133,10 +134,10 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
 	ctrl->addr_type = type;
 }
 
-static void
-__skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
-			       struct flow_dissector *flow_dissector,
-			       void *target_container)
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+			     struct flow_dissector *flow_dissector,
+			     void *target_container)
 {
 	struct ip_tunnel_info *info;
 	struct ip_tunnel_key *key;
@@ -212,6 +213,7 @@ __skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
 		tp->dst = key->tp_dst;
 	}
 }
+EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
 
 static enum flow_dissect_ret
 __skb_flow_dissect_mpls(const struct sk_buff *skb,
@@ -436,6 +438,57 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
 	return FLOW_DISSECT_RET_PROTO_AGAIN;
 }
 
+/**
+ * __skb_flow_dissect_batadv() - dissect batman-adv header
+ * @skb: sk_buff to with the batman-adv header
+ * @key_control: flow dissectors control key
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @p_proto: pointer used to update the protocol to process next
+ * @p_nhoff: pointer used to update inner network header offset
+ * @hlen: packet header length
+ * @flags: any combination of FLOW_DISSECTOR_F_*
+ *
+ * ETH_P_BATMAN packets are tried to be dissected. Only
+ * &struct batadv_unicast packets are actually processed because they contain an
+ * inner ethernet header and are usually followed by actual network header. This
+ * allows the flow dissector to continue processing the packet.
+ *
+ * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
+ *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
+ *  otherwise FLOW_DISSECT_RET_OUT_BAD
+ */
+static enum flow_dissect_ret
+__skb_flow_dissect_batadv(const struct sk_buff *skb,
+			  struct flow_dissector_key_control *key_control,
+			  void *data, __be16 *p_proto, int *p_nhoff, int hlen,
+			  unsigned int flags)
+{
+	struct {
+		struct batadv_unicast_packet batadv_unicast;
+		struct ethhdr eth;
+	} *hdr, _hdr;
+
+	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
+				   &_hdr);
+	if (!hdr)
+		return FLOW_DISSECT_RET_OUT_BAD;
+
+	if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
+		return FLOW_DISSECT_RET_OUT_BAD;
+
+	if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
+		return FLOW_DISSECT_RET_OUT_BAD;
+
+	*p_proto = hdr->eth.h_proto;
+	*p_nhoff += sizeof(*hdr);
+
+	key_control->flags |= FLOW_DIS_ENCAPSULATION;
+	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+		return FLOW_DISSECT_RET_OUT_GOOD;
+
+	return FLOW_DISSECT_RET_PROTO_AGAIN;
+}
+
 static void
 __skb_flow_dissect_tcp(const struct sk_buff *skb,
 		       struct flow_dissector *flow_dissector,
@@ -576,9 +629,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 					      FLOW_DISSECTOR_KEY_BASIC,
 					      target_container);
 
-	__skb_flow_dissect_tunnel_info(skb, flow_dissector,
-				       target_container);
-
 	if (dissector_uses_key(flow_dissector,
 			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 		struct ethhdr *eth = eth_hdr(skb);
@@ -817,6 +867,11 @@ proto_again:
 					       nhoff, hlen);
 		break;
 
+	case htons(ETH_P_BATMAN):
+		fdret = __skb_flow_dissect_batadv(skb, key_control, data,
+						  &proto, &nhoff, hlen, flags);
+		break;
+
 	default:
 		fdret = FLOW_DISSECT_RET_OUT_BAD;
 		break;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9834cfa21b21..0a3f88f08727 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -159,7 +159,11 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 	est->intvl_log = intvl_log;
 	est->cpu_bstats = cpu_bstats;
 
+	if (stats_lock)
+		local_bh_disable();
 	est_fetch_counters(est, &b);
+	if (stats_lock)
+		local_bh_enable();
 	est->last_bytes = b.bytes;
 	est->last_packets = b.packets;
 	old = rcu_dereference_protected(*rate_est, 1);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 87f28557b329..b2b2323bdc84 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -252,10 +252,10 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
 	}
 }
 
-static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
-				    const struct gnet_stats_queue __percpu *cpu,
-				    const struct gnet_stats_queue *q,
-				    __u32 qlen)
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+			     const struct gnet_stats_queue __percpu *cpu,
+			     const struct gnet_stats_queue *q,
+			     __u32 qlen)
 {
 	if (cpu) {
 		__gnet_stats_copy_queue_cpu(qstats, cpu);
@@ -269,6 +269,7 @@ static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
 
 	qstats->qlen = qlen;
 }
+EXPORT_SYMBOL(__gnet_stats_copy_queue);
 
 /**
  * gnet_stats_copy_queue - copy queue statistics into statistics TLV
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 982861607f88..e38e641e98d5 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -92,7 +92,7 @@ static bool linkwatch_urgent_event(struct net_device *dev)
 	if (dev->ifindex != dev_get_iflink(dev))
 		return true;
 
-	if (dev->priv_flags & IFF_TEAM_PORT)
+	if (netif_is_lag_port(dev) || netif_is_lag_master(dev))
 		return true;
 
 	return netif_carrier_ok(dev) &&	qdisc_tx_changing(dev);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7f831711b6e0..7b7a14abba28 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2862,7 +2862,6 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
 };
 
 static const struct file_operations neigh_stat_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open 	 = neigh_stat_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 615ccab55f38..e010bb800d7b 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -182,7 +182,6 @@ static int dev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dev_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = dev_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -202,7 +201,6 @@ static int softnet_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations softnet_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = softnet_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -306,7 +304,6 @@ static int ptype_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ptype_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ptype_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -387,7 +384,6 @@ static int dev_mc_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dev_mc_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = dev_mc_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 799b75268291..60a5ad2c33ee 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -295,10 +295,31 @@ static ssize_t carrier_changes_show(struct device *dev,
 	struct net_device *netdev = to_net_dev(dev);
 
 	return sprintf(buf, fmt_dec,
-		       atomic_read(&netdev->carrier_changes));
+		       atomic_read(&netdev->carrier_up_count) +
+		       atomic_read(&netdev->carrier_down_count));
 }
 static DEVICE_ATTR_RO(carrier_changes);
 
+static ssize_t carrier_up_count_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+
+	return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
+}
+static DEVICE_ATTR_RO(carrier_up_count);
+
+static ssize_t carrier_down_count_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+
+	return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
+}
+static DEVICE_ATTR_RO(carrier_down_count);
+
 /* read-write attributes */
 
 static int change_mtu(struct net_device *dev, unsigned long new_mtu)
@@ -325,29 +346,6 @@ static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
 }
 NETDEVICE_SHOW_RW(flags, fmt_hex);
 
-static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
-{
-	unsigned int orig_len = dev->tx_queue_len;
-	int res;
-
-	if (new_len != (unsigned int)new_len)
-		return -ERANGE;
-
-	if (new_len != orig_len) {
-		dev->tx_queue_len = new_len;
-		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-		res = notifier_to_errno(res);
-		if (res) {
-			netdev_err(dev,
-				   "refused to change device tx_queue_len\n");
-			dev->tx_queue_len = orig_len;
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
 static ssize_t tx_queue_len_store(struct device *dev,
 				  struct device_attribute *attr,
 				  const char *buf, size_t len)
@@ -355,7 +353,7 @@ static ssize_t tx_queue_len_store(struct device *dev,
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	return netdev_store(dev, attr, buf, len, change_tx_queue_len);
+	return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
 }
 NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
 
@@ -547,6 +545,8 @@ static struct attribute *net_class_attrs[] __ro_after_init = {
 	&dev_attr_phys_port_name.attr,
 	&dev_attr_phys_switch_id.attr,
 	&dev_attr_proto_down.attr,
+	&dev_attr_carrier_up_count.attr,
+	&dev_attr_carrier_down_count.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
@@ -961,7 +961,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
 	while (--i >= new_num) {
 		struct kobject *kobj = &dev->_rx[i].kobj;
 
-		if (!atomic_read(&dev_net(dev)->count))
+		if (!refcount_read(&dev_net(dev)->count))
 			kobj->uevent_suppress = 1;
 		if (dev->sysfs_rx_queue_group)
 			sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1367,7 +1367,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
 	while (--i >= new_num) {
 		struct netdev_queue *queue = dev->_tx + i;
 
-		if (!atomic_read(&dev_net(dev)->count))
+		if (!refcount_read(&dev_net(dev)->count))
 			queue->kobj.uevent_suppress = 1;
 #ifdef CONFIG_BQL
 		sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
 {
 	struct device *dev = &ndev->dev;
 
-	if (!atomic_read(&dev_net(ndev)->count))
+	if (!refcount_read(&dev_net(ndev)->count))
 		dev_set_uevent_suppress(dev, 1);
 
 	kobject_get(&dev->kobj);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 60a71be75aea..3cad5f51afd3 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -35,7 +35,7 @@ LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
 
 struct net init_net = {
-	.count		= ATOMIC_INIT(1),
+	.count		= REFCOUNT_INIT(1),
 	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
 };
 EXPORT_SYMBOL(init_net);
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
  */
 int peernet2id_alloc(struct net *net, struct net *peer)
 {
-	bool alloc;
+	bool alloc = false, alive = false;
 	int id;
 
-	if (atomic_read(&net->count) == 0)
+	if (refcount_read(&net->count) == 0)
 		return NETNSA_NSID_NOT_ASSIGNED;
 	spin_lock_bh(&net->nsid_lock);
-	alloc = atomic_read(&peer->count) == 0 ? false : true;
+	/*
+	 * When peer is obtained from RCU lists, we may race with
+	 * its cleanup. Check whether it's alive, and this guarantees
+	 * we never hash a peer back to net->netns_ids, after it has
+	 * just been idr_remove()'d from there in cleanup_net().
+	 */
+	if (maybe_get_net(peer))
+		alive = alloc = true;
 	id = __peernet2id_alloc(net, peer, &alloc);
 	spin_unlock_bh(&net->nsid_lock);
 	if (alloc && id >= 0)
 		rtnl_net_notifyid(net, RTM_NEWNSID, id);
+	if (alive)
+		put_net(peer);
 	return id;
 }
 EXPORT_SYMBOL_GPL(peernet2id_alloc);
@@ -264,11 +273,9 @@ struct net *get_net_ns_by_id(struct net *net, int id)
 		return NULL;
 
 	rcu_read_lock();
-	spin_lock_bh(&net->nsid_lock);
 	peer = idr_find(&net->netns_ids, id);
 	if (peer)
 		peer = maybe_get_net(peer);
-	spin_unlock_bh(&net->nsid_lock);
 	rcu_read_unlock();
 
 	return peer;
@@ -284,7 +291,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 	int error = 0;
 	LIST_HEAD(net_exit_list);
 
-	atomic_set(&net->count, 1);
+	refcount_set(&net->count, 1);
 	refcount_set(&net->passive, 1);
 	net->dev_base_seq = 1;
 	net->user_ns = user_ns;
@@ -432,13 +439,40 @@ struct net *copy_net_ns(unsigned long flags,
 	return net;
 }
 
+static void unhash_nsid(struct net *net, struct net *last)
+{
+	struct net *tmp;
+	/* This function is only called from cleanup_net() work,
+	 * and this work is the only process, that may delete
+	 * a net from net_namespace_list. So, when the below
+	 * is executing, the list may only grow. Thus, we do not
+	 * use for_each_net_rcu() or rtnl_lock().
+	 */
+	for_each_net(tmp) {
+		int id;
+
+		spin_lock_bh(&tmp->nsid_lock);
+		id = __peernet2id(tmp, net);
+		if (id >= 0)
+			idr_remove(&tmp->netns_ids, id);
+		spin_unlock_bh(&tmp->nsid_lock);
+		if (id >= 0)
+			rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+		if (tmp == last)
+			break;
+	}
+	spin_lock_bh(&net->nsid_lock);
+	idr_destroy(&net->netns_ids);
+	spin_unlock_bh(&net->nsid_lock);
+}
+
 static DEFINE_SPINLOCK(cleanup_list_lock);
 static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
 
 static void cleanup_net(struct work_struct *work)
 {
 	const struct pernet_operations *ops;
-	struct net *net, *tmp;
+	struct net *net, *tmp, *last;
 	struct list_head net_kill_list;
 	LIST_HEAD(net_exit_list);
 
@@ -451,26 +485,25 @@ static void cleanup_net(struct work_struct *work)
 
 	/* Don't let anyone else find us. */
 	rtnl_lock();
-	list_for_each_entry(net, &net_kill_list, cleanup_list) {
+	list_for_each_entry(net, &net_kill_list, cleanup_list)
 		list_del_rcu(&net->list);
-		list_add_tail(&net->exit_list, &net_exit_list);
-		for_each_net(tmp) {
-			int id;
-
-			spin_lock_bh(&tmp->nsid_lock);
-			id = __peernet2id(tmp, net);
-			if (id >= 0)
-				idr_remove(&tmp->netns_ids, id);
-			spin_unlock_bh(&tmp->nsid_lock);
-			if (id >= 0)
-				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
-		}
-		spin_lock_bh(&net->nsid_lock);
-		idr_destroy(&net->netns_ids);
-		spin_unlock_bh(&net->nsid_lock);
+	/* Cache last net. After we unlock rtnl, no one new net
+	 * added to net_namespace_list can assign nsid pointer
+	 * to a net from net_kill_list (see peernet2id_alloc()).
+	 * So, we skip them in unhash_nsid().
+	 *
+	 * Note, that unhash_nsid() does not delete nsid links
+	 * between net_kill_list's nets, as they've already
+	 * deleted from net_namespace_list. But, this would be
+	 * useless anyway, as netns_ids are destroyed there.
+	 */
+	last = list_last_entry(&net_namespace_list, struct net, list);
+	rtnl_unlock();
 
+	list_for_each_entry(net, &net_kill_list, cleanup_list) {
+		unhash_nsid(net, last);
+		list_add_tail(&net->exit_list, &net_exit_list);
 	}
-	rtnl_unlock();
 
 	/*
 	 * Another CPU might be rcu-iterating the list, wait for it.
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f95a15086225..b8ab5c829511 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -184,25 +184,44 @@
 
 #define func_enter() pr_debug("entering %s\n", __func__);
 
+#define PKT_FLAGS							\
+	pf(IPV6)		/* Interface in IPV6 Mode */		\
+	pf(IPSRC_RND)		/* IP-Src Random  */			\
+	pf(IPDST_RND)		/* IP-Dst Random  */			\
+	pf(TXSIZE_RND)		/* Transmit size is random */		\
+	pf(UDPSRC_RND)		/* UDP-Src Random */			\
+	pf(UDPDST_RND)		/* UDP-Dst Random */			\
+	pf(UDPCSUM)		/* Include UDP checksum */		\
+	pf(NO_TIMESTAMP)	/* Don't timestamp packets (default TS) */ \
+	pf(MPLS_RND)		/* Random MPLS labels */		\
+	pf(QUEUE_MAP_RND)	/* queue map Random */			\
+	pf(QUEUE_MAP_CPU)	/* queue map mirrors smp_processor_id() */ \
+	pf(FLOW_SEQ)		/* Sequential flows */			\
+	pf(IPSEC)		/* ipsec on for flows */		\
+	pf(MACSRC_RND)		/* MAC-Src Random */			\
+	pf(MACDST_RND)		/* MAC-Dst Random */			\
+	pf(VID_RND)		/* Random VLAN ID */			\
+	pf(SVID_RND)		/* Random SVLAN ID */			\
+	pf(NODE)		/* Node memory alloc*/			\
+
+#define pf(flag)		flag##_SHIFT,
+enum pkt_flags {
+	PKT_FLAGS
+};
+#undef pf
+
 /* Device flag bits */
-#define F_IPSRC_RND   (1<<0)	/* IP-Src Random  */
-#define F_IPDST_RND   (1<<1)	/* IP-Dst Random  */
-#define F_UDPSRC_RND  (1<<2)	/* UDP-Src Random */
-#define F_UDPDST_RND  (1<<3)	/* UDP-Dst Random */
-#define F_MACSRC_RND  (1<<4)	/* MAC-Src Random */
-#define F_MACDST_RND  (1<<5)	/* MAC-Dst Random */
-#define F_TXSIZE_RND  (1<<6)	/* Transmit size is random */
-#define F_IPV6        (1<<7)	/* Interface in IPV6 Mode */
-#define F_MPLS_RND    (1<<8)	/* Random MPLS labels */
-#define F_VID_RND     (1<<9)	/* Random VLAN ID */
-#define F_SVID_RND    (1<<10)	/* Random SVLAN ID */
-#define F_FLOW_SEQ    (1<<11)	/* Sequential flows */
-#define F_IPSEC_ON    (1<<12)	/* ipsec on for flows */
-#define F_QUEUE_MAP_RND (1<<13)	/* queue map Random */
-#define F_QUEUE_MAP_CPU (1<<14)	/* queue map mirrors smp_processor_id() */
-#define F_NODE          (1<<15)	/* Node memory alloc*/
-#define F_UDPCSUM       (1<<16)	/* Include UDP checksum */
-#define F_NO_TIMESTAMP  (1<<17)	/* Don't timestamp packets (default TS) */
+#define pf(flag)		static const __u32 F_##flag = (1<<flag##_SHIFT);
+PKT_FLAGS
+#undef pf
+
+#define pf(flag)		__stringify(flag),
+static char *pkt_flag_names[] = {
+	PKT_FLAGS
+};
+#undef pf
+
+#define NR_PKT_FLAGS		ARRAY_SIZE(pkt_flag_names)
 
 /* Thread control flag bits */
 #define T_STOP        (1<<0)	/* Stop run */
@@ -399,7 +418,7 @@ struct pktgen_dev {
 	__u8	ipsmode;		/* IPSEC mode (config) */
 	__u8	ipsproto;		/* IPSEC type (config) */
 	__u32	spi;
-	struct dst_entry dst;
+	struct xfrm_dst xdst;
 	struct dst_ops dstops;
 #endif
 	char result[512];
@@ -523,7 +542,6 @@ static int pgctrl_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pktgen_fops = {
-	.owner   = THIS_MODULE,
 	.open    = pgctrl_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -535,6 +553,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 {
 	const struct pktgen_dev *pkt_dev = seq->private;
 	ktime_t stopped;
+	unsigned int i;
 	u64 idle;
 
 	seq_printf(seq,
@@ -596,7 +615,6 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 		   pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
 
 	if (pkt_dev->nr_labels) {
-		unsigned int i;
 		seq_puts(seq, "     mpls: ");
 		for (i = 0; i < pkt_dev->nr_labels; i++)
 			seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -632,68 +650,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 
 	seq_puts(seq, "     Flags: ");
 
-	if (pkt_dev->flags & F_IPV6)
-		seq_puts(seq, "IPV6  ");
-
-	if (pkt_dev->flags & F_IPSRC_RND)
-		seq_puts(seq, "IPSRC_RND  ");
-
-	if (pkt_dev->flags & F_IPDST_RND)
-		seq_puts(seq, "IPDST_RND  ");
-
-	if (pkt_dev->flags & F_TXSIZE_RND)
-		seq_puts(seq, "TXSIZE_RND  ");
-
-	if (pkt_dev->flags & F_UDPSRC_RND)
-		seq_puts(seq, "UDPSRC_RND  ");
-
-	if (pkt_dev->flags & F_UDPDST_RND)
-		seq_puts(seq, "UDPDST_RND  ");
-
-	if (pkt_dev->flags & F_UDPCSUM)
-		seq_puts(seq, "UDPCSUM  ");
-
-	if (pkt_dev->flags & F_NO_TIMESTAMP)
-		seq_puts(seq, "NO_TIMESTAMP  ");
-
-	if (pkt_dev->flags & F_MPLS_RND)
-		seq_puts(seq,  "MPLS_RND  ");
-
-	if (pkt_dev->flags & F_QUEUE_MAP_RND)
-		seq_puts(seq,  "QUEUE_MAP_RND  ");
+	for (i = 0; i < NR_PKT_FLAGS; i++) {
+		if (i == F_FLOW_SEQ)
+			if (!pkt_dev->cflows)
+				continue;
 
-	if (pkt_dev->flags & F_QUEUE_MAP_CPU)
-		seq_puts(seq,  "QUEUE_MAP_CPU  ");
-
-	if (pkt_dev->cflows) {
-		if (pkt_dev->flags & F_FLOW_SEQ)
-			seq_puts(seq,  "FLOW_SEQ  "); /*in sequence flows*/
-		else
-			seq_puts(seq,  "FLOW_RND  ");
-	}
+		if (pkt_dev->flags & (1 << i))
+			seq_printf(seq, "%s  ", pkt_flag_names[i]);
+		else if (i == F_FLOW_SEQ)
+			seq_puts(seq, "FLOW_RND  ");
 
 #ifdef CONFIG_XFRM
-	if (pkt_dev->flags & F_IPSEC_ON) {
-		seq_puts(seq,  "IPSEC  ");
-		if (pkt_dev->spi)
+		if (i == F_IPSEC && pkt_dev->spi)
 			seq_printf(seq, "spi:%u", pkt_dev->spi);
-	}
 #endif
-
-	if (pkt_dev->flags & F_MACSRC_RND)
-		seq_puts(seq, "MACSRC_RND  ");
-
-	if (pkt_dev->flags & F_MACDST_RND)
-		seq_puts(seq, "MACDST_RND  ");
-
-	if (pkt_dev->flags & F_VID_RND)
-		seq_puts(seq, "VID_RND  ");
-
-	if (pkt_dev->flags & F_SVID_RND)
-		seq_puts(seq, "SVID_RND  ");
-
-	if (pkt_dev->flags & F_NODE)
-		seq_puts(seq, "NODE_ALLOC  ");
+	}
 
 	seq_puts(seq, "\n");
 
@@ -859,6 +830,35 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
 	return i;
 }
 
+static __u32 pktgen_read_flag(const char *f, bool *disable)
+{
+	__u32 i;
+
+	if (f[0] == '!') {
+		*disable = true;
+		f++;
+	}
+
+	for (i = 0; i < NR_PKT_FLAGS; i++) {
+		if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT)
+			continue;
+
+		/* allow only disabling ipv6 flag */
+		if (!*disable && i == IPV6_SHIFT)
+			continue;
+
+		if (strcmp(f, pkt_flag_names[i]) == 0)
+			return 1 << i;
+	}
+
+	if (strcmp(f, "FLOW_RND") == 0) {
+		*disable = !*disable;
+		return F_FLOW_SEQ;
+	}
+
+	return 0;
+}
+
 static ssize_t pktgen_if_write(struct file *file,
 			       const char __user * user_buffer, size_t count,
 			       loff_t * offset)
@@ -1216,7 +1216,10 @@ static ssize_t pktgen_if_write(struct file *file,
 		return count;
 	}
 	if (!strcmp(name, "flag")) {
+		__u32 flag;
 		char f[32];
+		bool disable = false;
+
 		memset(f, 0, 32);
 		len = strn_len(&user_buffer[i], sizeof(f) - 1);
 		if (len < 0)
@@ -1225,107 +1228,15 @@ static ssize_t pktgen_if_write(struct file *file,
 		if (copy_from_user(f, &user_buffer[i], len))
 			return -EFAULT;
 		i += len;
-		if (strcmp(f, "IPSRC_RND") == 0)
-			pkt_dev->flags |= F_IPSRC_RND;
-
-		else if (strcmp(f, "!IPSRC_RND") == 0)
-			pkt_dev->flags &= ~F_IPSRC_RND;
-
-		else if (strcmp(f, "TXSIZE_RND") == 0)
-			pkt_dev->flags |= F_TXSIZE_RND;
-
-		else if (strcmp(f, "!TXSIZE_RND") == 0)
-			pkt_dev->flags &= ~F_TXSIZE_RND;
-
-		else if (strcmp(f, "IPDST_RND") == 0)
-			pkt_dev->flags |= F_IPDST_RND;
-
-		else if (strcmp(f, "!IPDST_RND") == 0)
-			pkt_dev->flags &= ~F_IPDST_RND;
-
-		else if (strcmp(f, "UDPSRC_RND") == 0)
-			pkt_dev->flags |= F_UDPSRC_RND;
-
-		else if (strcmp(f, "!UDPSRC_RND") == 0)
-			pkt_dev->flags &= ~F_UDPSRC_RND;
-
-		else if (strcmp(f, "UDPDST_RND") == 0)
-			pkt_dev->flags |= F_UDPDST_RND;
-
-		else if (strcmp(f, "!UDPDST_RND") == 0)
-			pkt_dev->flags &= ~F_UDPDST_RND;
-
-		else if (strcmp(f, "MACSRC_RND") == 0)
-			pkt_dev->flags |= F_MACSRC_RND;
-
-		else if (strcmp(f, "!MACSRC_RND") == 0)
-			pkt_dev->flags &= ~F_MACSRC_RND;
-
-		else if (strcmp(f, "MACDST_RND") == 0)
-			pkt_dev->flags |= F_MACDST_RND;
-
-		else if (strcmp(f, "!MACDST_RND") == 0)
-			pkt_dev->flags &= ~F_MACDST_RND;
-
-		else if (strcmp(f, "MPLS_RND") == 0)
-			pkt_dev->flags |= F_MPLS_RND;
-
-		else if (strcmp(f, "!MPLS_RND") == 0)
-			pkt_dev->flags &= ~F_MPLS_RND;
 
-		else if (strcmp(f, "VID_RND") == 0)
-			pkt_dev->flags |= F_VID_RND;
+		flag = pktgen_read_flag(f, &disable);
 
-		else if (strcmp(f, "!VID_RND") == 0)
-			pkt_dev->flags &= ~F_VID_RND;
-
-		else if (strcmp(f, "SVID_RND") == 0)
-			pkt_dev->flags |= F_SVID_RND;
-
-		else if (strcmp(f, "!SVID_RND") == 0)
-			pkt_dev->flags &= ~F_SVID_RND;
-
-		else if (strcmp(f, "FLOW_SEQ") == 0)
-			pkt_dev->flags |= F_FLOW_SEQ;
-
-		else if (strcmp(f, "QUEUE_MAP_RND") == 0)
-			pkt_dev->flags |= F_QUEUE_MAP_RND;
-
-		else if (strcmp(f, "!QUEUE_MAP_RND") == 0)
-			pkt_dev->flags &= ~F_QUEUE_MAP_RND;
-
-		else if (strcmp(f, "QUEUE_MAP_CPU") == 0)
-			pkt_dev->flags |= F_QUEUE_MAP_CPU;
-
-		else if (strcmp(f, "!QUEUE_MAP_CPU") == 0)
-			pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
-#ifdef CONFIG_XFRM
-		else if (strcmp(f, "IPSEC") == 0)
-			pkt_dev->flags |= F_IPSEC_ON;
-#endif
-
-		else if (strcmp(f, "!IPV6") == 0)
-			pkt_dev->flags &= ~F_IPV6;
-
-		else if (strcmp(f, "NODE_ALLOC") == 0)
-			pkt_dev->flags |= F_NODE;
-
-		else if (strcmp(f, "!NODE_ALLOC") == 0)
-			pkt_dev->flags &= ~F_NODE;
-
-		else if (strcmp(f, "UDPCSUM") == 0)
-			pkt_dev->flags |= F_UDPCSUM;
-
-		else if (strcmp(f, "!UDPCSUM") == 0)
-			pkt_dev->flags &= ~F_UDPCSUM;
-
-		else if (strcmp(f, "NO_TIMESTAMP") == 0)
-			pkt_dev->flags |= F_NO_TIMESTAMP;
-
-		else if (strcmp(f, "!NO_TIMESTAMP") == 0)
-			pkt_dev->flags &= ~F_NO_TIMESTAMP;
-
-		else {
+		if (flag) {
+			if (disable)
+				pkt_dev->flags &= ~flag;
+			else
+				pkt_dev->flags |= flag;
+		} else {
 			sprintf(pg_result,
 				"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
 				f,
@@ -1804,7 +1715,6 @@ static int pktgen_if_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pktgen_if_fops = {
-	.owner   = THIS_MODULE,
 	.open    = pktgen_if_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1942,7 +1852,6 @@ static int pktgen_thread_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pktgen_thread_fops = {
-	.owner   = THIS_MODULE,
 	.open    = pktgen_thread_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -2544,7 +2453,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
 				pkt_dev->flows[flow].cur_daddr =
 				    pkt_dev->cur_daddr;
 #ifdef CONFIG_XFRM
-				if (pkt_dev->flags & F_IPSEC_ON)
+				if (pkt_dev->flags & F_IPSEC)
 					get_ipsec_sa(pkt_dev, flow);
 #endif
 				pkt_dev->nflows++;
@@ -2609,7 +2518,7 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
 	 * supports both transport/tunnel mode + ESP/AH type.
 	 */
 	if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
-		skb->_skb_refdst = (unsigned long)&pkt_dev->dst | SKB_DST_NOREF;
+		skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
 
 	rcu_read_lock_bh();
 	err = x->outer_mode->output(x, skb);
@@ -2649,7 +2558,7 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
 static int process_ipsec(struct pktgen_dev *pkt_dev,
 			      struct sk_buff *skb, __be16 protocol)
 {
-	if (pkt_dev->flags & F_IPSEC_ON) {
+	if (pkt_dev->flags & F_IPSEC) {
 		struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
 		int nhead = 0;
 		if (x) {
@@ -3742,10 +3651,10 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
 	 * performance under such circumstance.
 	 */
 	pkt_dev->dstops.family = AF_INET;
-	pkt_dev->dst.dev = pkt_dev->odev;
-	dst_init_metrics(&pkt_dev->dst, pktgen_dst_metrics, false);
-	pkt_dev->dst.child = &pkt_dev->dst;
-	pkt_dev->dst.ops = &pkt_dev->dstops;
+	pkt_dev->xdst.u.dst.dev = pkt_dev->odev;
+	dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false);
+	pkt_dev->xdst.child = &pkt_dev->xdst.u.dst;
+	pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops;
 #endif
 
 	return add_dev_to_thread(t, pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 778d7f03404a..204297dffd2a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -62,7 +62,9 @@
 struct rtnl_link {
 	rtnl_doit_func		doit;
 	rtnl_dumpit_func	dumpit;
+	struct module		*owner;
 	unsigned int		flags;
+	struct rcu_head		rcu;
 };
 
 static DEFINE_MUTEX(rtnl_mutex);
@@ -127,8 +129,7 @@ bool lockdep_rtnl_is_held(void)
 EXPORT_SYMBOL(lockdep_rtnl_is_held);
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 
-static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
-static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
+static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
 
 static inline int rtm_msgindex(int msgtype)
 {
@@ -144,72 +145,127 @@ static inline int rtm_msgindex(int msgtype)
 	return msgindex;
 }
 
-/**
- * __rtnl_register - Register a rtnetlink message type
- * @protocol: Protocol family or PF_UNSPEC
- * @msgtype: rtnetlink message type
- * @doit: Function pointer called for each request message
- * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
- * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
- *
- * Registers the specified function pointers (at least one of them has
- * to be non-NULL) to be called whenever a request message for the
- * specified protocol family and message type is received.
- *
- * The special protocol family PF_UNSPEC may be used to define fallback
- * function pointers for the case when no entry for the specific protocol
- * family exists.
- *
- * Returns 0 on success or a negative error code.
- */
-int __rtnl_register(int protocol, int msgtype,
-		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
-		    unsigned int flags)
+static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
+{
+	struct rtnl_link **tab;
+
+	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
+		protocol = PF_UNSPEC;
+
+	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
+	if (!tab)
+		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
+
+	return tab[msgtype];
+}
+
+static int rtnl_register_internal(struct module *owner,
+				  int protocol, int msgtype,
+				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+				  unsigned int flags)
 {
-	struct rtnl_link *tab;
+	struct rtnl_link *link, *old;
+	struct rtnl_link __rcu **tab;
 	int msgindex;
+	int ret = -ENOBUFS;
 
 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 	msgindex = rtm_msgindex(msgtype);
 
-	tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
+	rtnl_lock();
+	tab = rtnl_msg_handlers[protocol];
 	if (tab == NULL) {
-		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
-		if (tab == NULL)
-			return -ENOBUFS;
+		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
+		if (!tab)
+			goto unlock;
 
+		/* ensures we see the 0 stores */
 		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
 	}
 
+	old = rtnl_dereference(tab[msgindex]);
+	if (old) {
+		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
+		if (!link)
+			goto unlock;
+	} else {
+		link = kzalloc(sizeof(*link), GFP_KERNEL);
+		if (!link)
+			goto unlock;
+	}
+
+	WARN_ON(link->owner && link->owner != owner);
+	link->owner = owner;
+
+	WARN_ON(doit && link->doit && link->doit != doit);
 	if (doit)
-		tab[msgindex].doit = doit;
+		link->doit = doit;
+	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
 	if (dumpit)
-		tab[msgindex].dumpit = dumpit;
-	tab[msgindex].flags |= flags;
+		link->dumpit = dumpit;
 
-	return 0;
+	link->flags |= flags;
+
+	/* publish protocol:msgtype */
+	rcu_assign_pointer(tab[msgindex], link);
+	ret = 0;
+	if (old)
+		kfree_rcu(old, rcu);
+unlock:
+	rtnl_unlock();
+	return ret;
 }
-EXPORT_SYMBOL_GPL(__rtnl_register);
+
+/**
+ * rtnl_register_module - Register a rtnetlink message type
+ *
+ * @owner: module registering the hook (THIS_MODULE)
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ *
+ * Like rtnl_register, but for use by removable modules.
+ */
+int rtnl_register_module(struct module *owner,
+			 int protocol, int msgtype,
+			 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+			 unsigned int flags)
+{
+	return rtnl_register_internal(owner, protocol, msgtype,
+				      doit, dumpit, flags);
+}
+EXPORT_SYMBOL_GPL(rtnl_register_module);
 
 /**
  * rtnl_register - Register a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ *
+ * Registers the specified function pointers (at least one of them has
+ * to be non-NULL) to be called whenever a request message for the
+ * specified protocol family and message type is received.
  *
- * Identical to __rtnl_register() but panics on failure. This is useful
- * as failure of this function is very unlikely, it can only happen due
- * to lack of memory when allocating the chain to store all message
- * handlers for a protocol. Meant for use in init functions where lack
- * of memory implies no sense in continuing.
+ * The special protocol family PF_UNSPEC may be used to define fallback
+ * function pointers for the case when no entry for the specific protocol
+ * family exists.
  */
 void rtnl_register(int protocol, int msgtype,
 		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 		   unsigned int flags)
 {
-	if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
-		panic("Unable to register rtnetlink message handler, "
-		      "protocol = %d, message type = %d\n",
-		      protocol, msgtype);
+	int err;
+
+	err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
+				     flags);
+	if (err)
+		pr_err("Unable to register rtnetlink message handler, "
+		       "protocol = %d, message type = %d\n", protocol, msgtype);
 }
-EXPORT_SYMBOL_GPL(rtnl_register);
 
 /**
  * rtnl_unregister - Unregister a rtnetlink message type
@@ -220,24 +276,25 @@ EXPORT_SYMBOL_GPL(rtnl_register);
  */
 int rtnl_unregister(int protocol, int msgtype)
 {
-	struct rtnl_link *handlers;
+	struct rtnl_link **tab, *link;
 	int msgindex;
 
 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 	msgindex = rtm_msgindex(msgtype);
 
 	rtnl_lock();
-	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
-	if (!handlers) {
+	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
+	if (!tab) {
 		rtnl_unlock();
 		return -ENOENT;
 	}
 
-	handlers[msgindex].doit = NULL;
-	handlers[msgindex].dumpit = NULL;
-	handlers[msgindex].flags = 0;
+	link = tab[msgindex];
+	rcu_assign_pointer(tab[msgindex], NULL);
 	rtnl_unlock();
 
+	kfree_rcu(link, rcu);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rtnl_unregister);
@@ -251,20 +308,27 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
  */
 void rtnl_unregister_all(int protocol)
 {
-	struct rtnl_link *handlers;
+	struct rtnl_link **tab, *link;
+	int msgindex;
 
 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 
 	rtnl_lock();
-	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
+	tab = rtnl_msg_handlers[protocol];
 	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
+	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
+		link = tab[msgindex];
+		if (!link)
+			continue;
+
+		rcu_assign_pointer(tab[msgindex], NULL);
+		kfree_rcu(link, rcu);
+	}
 	rtnl_unlock();
 
 	synchronize_net();
 
-	while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
-		schedule();
-	kfree(handlers);
+	kfree(tab);
 }
 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 
@@ -840,6 +904,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
 			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_MULTICAST */
 			 nla_total_size_64bit(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_RX_DROPPED */
+			 nla_total_size_64bit(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_TX_DROPPED */
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 nla_total_size(sizeof(struct ifla_vf_trust)));
 		return size;
 	} else
@@ -920,8 +988,11 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
 	       + rtnl_xdp_size() /* IFLA_XDP */
 	       + nla_total_size(4)  /* IFLA_EVENT */
 	       + nla_total_size(4)  /* IFLA_NEW_NETNSID */
+	       + nla_total_size(4)  /* IFLA_NEW_IFINDEX */
 	       + nla_total_size(1)  /* IFLA_PROTO_DOWN */
 	       + nla_total_size(4)  /* IFLA_IF_NETNSID */
+	       + nla_total_size(4)  /* IFLA_CARRIER_UP_COUNT */
+	       + nla_total_size(4)  /* IFLA_CARRIER_DOWN_COUNT */
 	       + 0;
 }
 
@@ -1194,7 +1265,11 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
 			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
-			      vf_stats.multicast, IFLA_VF_STATS_PAD)) {
+			      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+			      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+			      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
 		nla_nest_cancel(skb, vfstats);
 		goto nla_put_vf_failure;
 	}
@@ -1261,6 +1336,7 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
 	const struct bpf_prog *generic_xdp_prog;
+	struct netdev_bpf xdp;
 
 	ASSERT_RTNL();
 
@@ -1273,7 +1349,10 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
 	if (!ops->ndo_bpf)
 		return XDP_ATTACHED_NONE;
 
-	return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id);
+	__dev_xdp_query(dev, ops->ndo_bpf, &xdp);
+	*prog_id = xdp.prog_id;
+
+	return xdp.prog_attached;
 }
 
 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1433,7 +1512,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
 			    struct net_device *dev, struct net *src_net,
 			    int type, u32 pid, u32 seq, u32 change,
 			    unsigned int flags, u32 ext_filter_mask,
-			    u32 event, int *new_nsid, int tgt_netnsid)
+			    u32 event, int *new_nsid, int new_ifindex,
+			    int tgt_netnsid)
 {
 	struct ifinfomsg *ifm;
 	struct nlmsghdr *nlh;
@@ -1475,8 +1555,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
 	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
 	    nla_put_ifalias(skb, dev) ||
 	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
-			atomic_read(&dev->carrier_changes)) ||
-	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
+			atomic_read(&dev->carrier_up_count) +
+			atomic_read(&dev->carrier_down_count)) ||
+	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
+	    nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
+			atomic_read(&dev->carrier_up_count)) ||
+	    nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
+			atomic_read(&dev->carrier_down_count)))
 		goto nla_put_failure;
 
 	if (event != IFLA_EVENT_NONE) {
@@ -1525,6 +1610,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
 	if (new_nsid &&
 	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
 		goto nla_put_failure;
+	if (new_ifindex &&
+	    nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
+		goto nla_put_failure;
+
 
 	rcu_read_lock();
 	if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
@@ -1569,6 +1658,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
 	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
 	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
 	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
+	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
+	[IFLA_GSO_MAX_SIZE]	= { .type = NLA_U32 },
 	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
 	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
 	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
@@ -1578,6 +1669,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
 	[IFLA_EVENT]		= { .type = NLA_U32 },
 	[IFLA_GROUP]		= { .type = NLA_U32 },
 	[IFLA_IF_NETNSID]	= { .type = NLA_S32 },
+	[IFLA_CARRIER_UP_COUNT]	= { .type = NLA_U32 },
+	[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1766,7 +1859,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
 					       NETLINK_CB(cb->skb).portid,
 					       cb->nlh->nlmsg_seq, 0,
 					       flags,
-					       ext_filter_mask, 0, NULL,
+					       ext_filter_mask, 0, NULL, 0,
 					       netnsid);
 
 			if (err < 0) {
@@ -1815,6 +1908,49 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
 }
 EXPORT_SYMBOL(rtnl_link_get_net);
 
+/* Figure out which network namespace we are talking about by
+ * examining the link attributes in the following order:
+ *
+ * 1. IFLA_NET_NS_PID
+ * 2. IFLA_NET_NS_FD
+ * 3. IFLA_IF_NETNSID
+ */
+static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
+					       struct nlattr *tb[])
+{
+	struct net *net;
+
+	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
+		return rtnl_link_get_net(src_net, tb);
+
+	if (!tb[IFLA_IF_NETNSID])
+		return get_net(src_net);
+
+	net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID]));
+	if (!net)
+		return ERR_PTR(-EINVAL);
+
+	return net;
+}
+
+static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
+					     struct net *src_net,
+					     struct nlattr *tb[], int cap)
+{
+	struct net *net;
+
+	net = rtnl_link_get_net_by_nlattr(src_net, tb);
+	if (IS_ERR(net))
+		return net;
+
+	if (!netlink_ns_capable(skb, net->user_ns, cap)) {
+		put_net(net);
+		return ERR_PTR(-EPERM);
+	}
+
+	return net;
+}
+
 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
 {
 	if (dev) {
@@ -2077,17 +2213,14 @@ static int do_setlink(const struct sk_buff *skb,
 	const struct net_device_ops *ops = dev->netdev_ops;
 	int err;
 
-	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
-		struct net *net = rtnl_link_get_net(dev_net(dev), tb);
+	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
+		struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
+							    tb, CAP_NET_ADMIN);
 		if (IS_ERR(net)) {
 			err = PTR_ERR(net);
 			goto errout;
 		}
-		if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
-			put_net(net);
-			err = -EPERM;
-			goto errout;
-		}
+
 		err = dev_change_net_namespace(dev, net, ifname);
 		put_net(net);
 		if (err)
@@ -2204,17 +2337,37 @@ static int do_setlink(const struct sk_buff *skb,
 
 	if (tb[IFLA_TXQLEN]) {
 		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
-		unsigned int orig_len = dev->tx_queue_len;
-
-		if (dev->tx_queue_len ^ value) {
-			dev->tx_queue_len = value;
-			err = call_netdevice_notifiers(
-			      NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-			err = notifier_to_errno(err);
-			if (err) {
-				dev->tx_queue_len = orig_len;
-				goto errout;
-			}
+
+		err = dev_change_tx_queue_len(dev, value);
+		if (err)
+			goto errout;
+		status |= DO_SETLINK_MODIFIED;
+	}
+
+	if (tb[IFLA_GSO_MAX_SIZE]) {
+		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
+
+		if (max_size > GSO_MAX_SIZE) {
+			err = -EINVAL;
+			goto errout;
+		}
+
+		if (dev->gso_max_size ^ max_size) {
+			netif_set_gso_max_size(dev, max_size);
+			status |= DO_SETLINK_MODIFIED;
+		}
+	}
+
+	if (tb[IFLA_GSO_MAX_SEGS]) {
+		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
+
+		if (max_segs > GSO_MAX_SEGS) {
+			err = -EINVAL;
+			goto errout;
+		}
+
+		if (dev->gso_max_segs ^ max_segs) {
+			dev->gso_max_segs = max_segs;
 			status |= DO_SETLINK_MODIFIED;
 		}
 	}
@@ -2400,9 +2553,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (err < 0)
 		goto errout;
 
-	if (tb[IFLA_IF_NETNSID])
-		return -EOPNOTSUPP;
-
 	if (tb[IFLA_IFNAME])
 		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
 	else
@@ -2487,36 +2637,53 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
 			struct netlink_ext_ack *extack)
 {
 	struct net *net = sock_net(skb->sk);
-	struct net_device *dev;
+	struct net *tgt_net = net;
+	struct net_device *dev = NULL;
 	struct ifinfomsg *ifm;
 	char ifname[IFNAMSIZ];
 	struct nlattr *tb[IFLA_MAX+1];
 	int err;
+	int netnsid = -1;
 
 	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (tb[IFLA_IF_NETNSID])
-		return -EOPNOTSUPP;
-
 	if (tb[IFLA_IFNAME])
 		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
 
+	if (tb[IFLA_IF_NETNSID]) {
+		netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
+		tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
+		if (IS_ERR(tgt_net))
+			return PTR_ERR(tgt_net);
+	}
+
+	err = -EINVAL;
 	ifm = nlmsg_data(nlh);
 	if (ifm->ifi_index > 0)
-		dev = __dev_get_by_index(net, ifm->ifi_index);
+		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
 	else if (tb[IFLA_IFNAME])
-		dev = __dev_get_by_name(net, ifname);
+		dev = __dev_get_by_name(tgt_net, ifname);
 	else if (tb[IFLA_GROUP])
-		return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
+		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
 	else
-		return -EINVAL;
+		goto out;
 
-	if (!dev)
-		return -ENODEV;
+	if (!dev) {
+		if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
+			err = -ENODEV;
 
-	return rtnl_delete_link(dev);
+		goto out;
+	}
+
+	err = rtnl_delete_link(dev);
+
+out:
+	if (netnsid >= 0)
+		put_net(tgt_net);
+
+	return err;
 }
 
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
@@ -2583,6 +2750,10 @@ struct net_device *rtnl_create_link(struct net *net,
 		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
 	if (tb[IFLA_GROUP])
 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+	if (tb[IFLA_GSO_MAX_SIZE])
+		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
+	if (tb[IFLA_GSO_MAX_SEGS])
+		dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
 
 	return dev;
 }
@@ -2781,14 +2952,10 @@ replay:
 			name_assign_type = NET_NAME_ENUM;
 		}
 
-		dest_net = rtnl_link_get_net(net, tb);
+		dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
 		if (IS_ERR(dest_net))
 			return PTR_ERR(dest_net);
 
-		err = -EPERM;
-		if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
-			goto out;
-
 		if (tb[IFLA_LINK_NETNSID]) {
 			int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
 
@@ -2915,7 +3082,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 	err = rtnl_fill_ifinfo(nskb, dev, net,
 			       RTM_NEWLINK, NETLINK_CB(skb).portid,
 			       nlh->nlmsg_seq, 0, 0, ext_filter_mask,
-			       0, NULL, netnsid);
+			       0, NULL, 0, netnsid);
 	if (err < 0) {
 		/* -EMSGSIZE implies BUG in if_nlmsg_size */
 		WARN_ON(err == -EMSGSIZE);
@@ -2973,18 +3140,26 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
 		s_idx = 1;
 
 	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
+		struct rtnl_link **tab;
 		int type = cb->nlh->nlmsg_type-RTM_BASE;
-		struct rtnl_link *handlers;
+		struct rtnl_link *link;
 		rtnl_dumpit_func dumpit;
 
 		if (idx < s_idx || idx == PF_PACKET)
 			continue;
 
-		handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
-		if (!handlers)
+		if (type < 0 || type >= RTM_NR_MSGTYPES)
 			continue;
 
-		dumpit = READ_ONCE(handlers[type].dumpit);
+		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
+		if (!tab)
+			continue;
+
+		link = tab[type];
+		if (!link)
+			continue;
+
+		dumpit = link->dumpit;
 		if (!dumpit)
 			continue;
 
@@ -3003,7 +3178,8 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
 
 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
 				       unsigned int change,
-				       u32 event, gfp_t flags, int *new_nsid)
+				       u32 event, gfp_t flags, int *new_nsid,
+				       int new_ifindex)
 {
 	struct net *net = dev_net(dev);
 	struct sk_buff *skb;
@@ -3016,7 +3192,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
 
 	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
 			       type, 0, 0, change, 0, 0, event,
-			       new_nsid, -1);
+			       new_nsid, new_ifindex, -1);
 	if (err < 0) {
 		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
 		WARN_ON(err == -EMSGSIZE);
@@ -3039,14 +3215,15 @@ void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
 
 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
 			       unsigned int change, u32 event,
-			       gfp_t flags, int *new_nsid)
+			       gfp_t flags, int *new_nsid, int new_ifindex)
 {
 	struct sk_buff *skb;
 
 	if (dev->reg_state != NETREG_REGISTERED)
 		return;
 
-	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid);
+	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
+				     new_ifindex);
 	if (skb)
 		rtmsg_ifinfo_send(skb, dev, flags);
 }
@@ -3054,14 +3231,15 @@ static void rtmsg_ifinfo_event(int type, struct net_device *dev,
 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
 		  gfp_t flags)
 {
-	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, NULL);
+	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
+			   NULL, 0);
 }
 
 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
-			 gfp_t flags, int *new_nsid)
+			 gfp_t flags, int *new_nsid, int new_ifindex)
 {
 	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
-			   new_nsid);
+			   new_nsid, new_ifindex);
 }
 
 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
@@ -4314,7 +4492,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 			     struct netlink_ext_ack *extack)
 {
 	struct net *net = sock_net(skb->sk);
-	struct rtnl_link *handlers;
+	struct rtnl_link *link;
+	struct module *owner;
 	int err = -EOPNOTSUPP;
 	rtnl_doit_func doit;
 	unsigned int flags;
@@ -4338,79 +4517,85 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (family >= ARRAY_SIZE(rtnl_msg_handlers))
-		family = PF_UNSPEC;
-
 	rcu_read_lock();
-	handlers = rcu_dereference(rtnl_msg_handlers[family]);
-	if (!handlers) {
-		family = PF_UNSPEC;
-		handlers = rcu_dereference(rtnl_msg_handlers[family]);
-	}
-
 	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
 		struct sock *rtnl;
 		rtnl_dumpit_func dumpit;
 		u16 min_dump_alloc = 0;
 
-		dumpit = READ_ONCE(handlers[type].dumpit);
-		if (!dumpit) {
+		link = rtnl_get_link(family, type);
+		if (!link || !link->dumpit) {
 			family = PF_UNSPEC;
-			handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
-			if (!handlers)
-				goto err_unlock;
-
-			dumpit = READ_ONCE(handlers[type].dumpit);
-			if (!dumpit)
+			link = rtnl_get_link(family, type);
+			if (!link || !link->dumpit)
 				goto err_unlock;
 		}
-
-		refcount_inc(&rtnl_msg_handlers_ref[family]);
+		owner = link->owner;
+		dumpit = link->dumpit;
 
 		if (type == RTM_GETLINK - RTM_BASE)
 			min_dump_alloc = rtnl_calcit(skb, nlh);
 
+		err = 0;
+		/* need to do this before rcu_read_unlock() */
+		if (!try_module_get(owner))
+			err = -EPROTONOSUPPORT;
+
 		rcu_read_unlock();
 
 		rtnl = net->rtnl;
-		{
+		if (err == 0) {
 			struct netlink_dump_control c = {
 				.dump		= dumpit,
 				.min_dump_alloc	= min_dump_alloc,
+				.module		= owner,
 			};
 			err = netlink_dump_start(rtnl, skb, nlh, &c);
+			/* netlink_dump_start() will keep a reference on
+			 * module if dump is still in progress.
+			 */
+			module_put(owner);
 		}
-		refcount_dec(&rtnl_msg_handlers_ref[family]);
 		return err;
 	}
 
-	doit = READ_ONCE(handlers[type].doit);
-	if (!doit) {
+	link = rtnl_get_link(family, type);
+	if (!link || !link->doit) {
 		family = PF_UNSPEC;
-		handlers = rcu_dereference(rtnl_msg_handlers[family]);
+		link = rtnl_get_link(PF_UNSPEC, type);
+		if (!link || !link->doit)
+			goto out_unlock;
+	}
+
+	owner = link->owner;
+	if (!try_module_get(owner)) {
+		err = -EPROTONOSUPPORT;
+		goto out_unlock;
 	}
 
-	flags = READ_ONCE(handlers[type].flags);
+	flags = link->flags;
 	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
-		refcount_inc(&rtnl_msg_handlers_ref[family]);
-		doit = READ_ONCE(handlers[type].doit);
+		doit = link->doit;
 		rcu_read_unlock();
 		if (doit)
 			err = doit(skb, nlh, extack);
-		refcount_dec(&rtnl_msg_handlers_ref[family]);
+		module_put(owner);
 		return err;
 	}
-
 	rcu_read_unlock();
 
 	rtnl_lock();
-	handlers = rtnl_dereference(rtnl_msg_handlers[family]);
-	if (handlers) {
-		doit = READ_ONCE(handlers[type].doit);
-		if (doit)
-			err = doit(skb, nlh, extack);
-	}
+	link = rtnl_get_link(family, type);
+	if (link && link->doit)
+		err = link->doit(skb, nlh, extack);
 	rtnl_unlock();
+
+	module_put(owner);
+
+	return err;
+
+out_unlock:
+	rcu_read_unlock();
 	return err;
 
 err_unlock:
@@ -4454,7 +4639,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
 	case NETDEV_CHANGELOWERSTATE:
 	case NETDEV_CHANGE_TX_QUEUE_LEN:
 		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
-				   GFP_KERNEL, NULL);
+				   GFP_KERNEL, NULL, 0);
 		break;
 	default:
 		break;
@@ -4498,11 +4683,6 @@ static struct pernet_operations rtnetlink_net_ops = {
 
 void __init rtnetlink_init(void)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
-		refcount_set(&rtnl_msg_handlers_ref[i], 1);
-
 	if (register_pernet_subsys(&rtnetlink_net_ops))
 		panic("rtnetlink_init: cannot initialize rtnetlink\n");
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 08f574081315..01e8285aea73 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3656,6 +3656,10 @@ normal:
 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
 					      SKBTX_SHARED_FRAG;
 
+		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+			goto err;
+
 		while (pos < offset + len) {
 			if (i >= nfrags) {
 				BUG_ON(skb_headlen(list_skb));
@@ -3667,6 +3671,11 @@ normal:
 
 				BUG_ON(!nfrags);
 
+				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+				    skb_zerocopy_clone(nskb, frag_skb,
+						       GFP_ATOMIC))
+					goto err;
+
 				list_skb = list_skb->next;
 			}
 
@@ -3678,11 +3687,6 @@ normal:
 				goto err;
 			}
 
-			if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
-				goto err;
-			if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
-				goto err;
-
 			*nskb_frag = *frag;
 			__skb_frag_ref(nskb_frag);
 			size = skb_frag_size(nskb_frag);
diff --git a/net/core/sock.c b/net/core/sock.c
index 1211159718ad..1033f8ab0547 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -145,6 +145,8 @@
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+static void sock_inuse_add(struct net *net, int val);
+
 /**
  * sk_ns_capable - General socket capability test
  * @sk: Socket to use a capability on or through
@@ -1531,8 +1533,11 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
 		sk->sk_kern_sock = kern;
 		sock_lock_init(sk);
 		sk->sk_net_refcnt = kern ? 0 : 1;
-		if (likely(sk->sk_net_refcnt))
+		if (likely(sk->sk_net_refcnt)) {
 			get_net(net);
+			sock_inuse_add(net, 1);
+		}
+
 		sock_net_set(sk, net);
 		refcount_set(&sk->sk_wmem_alloc, 1);
 
@@ -1595,6 +1600,9 @@ void sk_destruct(struct sock *sk)
 
 static void __sk_free(struct sock *sk)
 {
+	if (likely(sk->sk_net_refcnt))
+		sock_inuse_add(sock_net(sk), -1);
+
 	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
 		sock_diag_broadcast_destroy(sk);
 	else
@@ -1716,6 +1724,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 		newsk->sk_priority = 0;
 		newsk->sk_incoming_cpu = raw_smp_processor_id();
 		atomic64_set(&newsk->sk_cookie, 0);
+		if (likely(newsk->sk_net_refcnt))
+			sock_inuse_add(sock_net(newsk), 1);
 
 		/*
 		 * Before updating sk_refcnt, we must commit prior changes to memory
@@ -3045,7 +3055,7 @@ static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
 
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
 {
-	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
+	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
 
@@ -3055,21 +3065,50 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
 	int res = 0;
 
 	for_each_possible_cpu(cpu)
-		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
 
 	return res >= 0 ? res : 0;
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
 
+static void sock_inuse_add(struct net *net, int val)
+{
+	this_cpu_add(*net->core.sock_inuse, val);
+}
+
+int sock_inuse_get(struct net *net)
+{
+	int cpu, res = 0;
+
+	for_each_possible_cpu(cpu)
+		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+
+	return res;
+}
+
+EXPORT_SYMBOL_GPL(sock_inuse_get);
+
 static int __net_init sock_inuse_init_net(struct net *net)
 {
-	net->core.inuse = alloc_percpu(struct prot_inuse);
-	return net->core.inuse ? 0 : -ENOMEM;
+	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
+	if (net->core.prot_inuse == NULL)
+		return -ENOMEM;
+
+	net->core.sock_inuse = alloc_percpu(int);
+	if (net->core.sock_inuse == NULL)
+		goto out;
+
+	return 0;
+
+out:
+	free_percpu(net->core.prot_inuse);
+	return -ENOMEM;
 }
 
 static void __net_exit sock_inuse_exit_net(struct net *net)
 {
-	free_percpu(net->core.inuse);
+	free_percpu(net->core.prot_inuse);
+	free_percpu(net->core.sock_inuse);
 }
 
 static struct pernet_operations net_inuse_ops = {
@@ -3112,6 +3151,10 @@ static inline void assign_proto_idx(struct proto *prot)
 static inline void release_proto_idx(struct proto *prot)
 {
 }
+
+static void sock_inuse_add(struct net *net, int val)
+{
+}
 #endif
 
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
@@ -3319,7 +3362,6 @@ static int proto_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proto_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= proto_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 5eeb1d20cc38..c5bb52bc73a1 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -235,7 +235,9 @@ struct sock *reuseport_select_sock(struct sock *sk,
 
 		if (prog && skb)
 			sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
-		else
+
+		/* no bpf or invalid bpf result: fall back to hash usage */
+		if (!sk2)
 			sk2 = reuse->socks[reciprocal_scale(hash, socks)];
 	}
 
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a47ad6cd41c0..f2d0462611c3 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -25,6 +25,7 @@
 
 static int zero = 0;
 static int one = 1;
+static int two __maybe_unused = 2;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
 static int max_skb_frags = MAX_SKB_FRAGS;
@@ -250,6 +251,46 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
 	return proc_dostring(&fake_table, write, buffer, lenp, ppos);
 }
 
+#ifdef CONFIG_BPF_JIT
+static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
+					   void __user *buffer, size_t *lenp,
+					   loff_t *ppos)
+{
+	int ret, jit_enable = *(int *)table->data;
+	struct ctl_table tmp = *table;
+
+	if (write && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	tmp.data = &jit_enable;
+	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+	if (write && !ret) {
+		if (jit_enable < 2 ||
+		    (jit_enable == 2 && bpf_dump_raw_ok())) {
+			*(int *)table->data = jit_enable;
+			if (jit_enable == 2)
+				pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
+		} else {
+			ret = -EPERM;
+		}
+	}
+	return ret;
+}
+
+# ifdef CONFIG_HAVE_EBPF_JIT
+static int
+proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+				    void __user *buffer, size_t *lenp,
+				    loff_t *ppos)
+{
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+}
+# endif
+#endif
+
 static struct ctl_table net_core_table[] = {
 #ifdef CONFIG_NET
 	{
@@ -325,13 +366,14 @@ static struct ctl_table net_core_table[] = {
 		.data		= &bpf_jit_enable,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-		.proc_handler	= proc_dointvec
-#else
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_dointvec_minmax_bpf_enable,
+# ifdef CONFIG_BPF_JIT_ALWAYS_ON
 		.extra1		= &one,
 		.extra2		= &one,
-#endif
+# else
+		.extra1		= &zero,
+		.extra2		= &two,
+# endif
 	},
 # ifdef CONFIG_HAVE_EBPF_JIT
 	{
@@ -339,14 +381,18 @@ static struct ctl_table net_core_table[] = {
 		.data		= &bpf_jit_harden,
 		.maxlen		= sizeof(int),
 		.mode		= 0600,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax_bpf_restricted,
+		.extra1		= &zero,
+		.extra2		= &two,
 	},
 	{
 		.procname	= "bpf_jit_kallsyms",
 		.data		= &bpf_jit_kallsyms,
 		.maxlen		= sizeof(int),
 		.mode		= 0600,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax_bpf_restricted,
+		.extra1		= &zero,
+		.extra2		= &one,
 	},
 # endif
 #endif
diff --git a/net/core/xdp.c b/net/core/xdp.c
new file mode 100644
index 000000000000..097a0f74e004
--- /dev/null
+++ b/net/core/xdp.c
@@ -0,0 +1,73 @@
+/* net/core/xdp.c
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2.  See COPYING.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <net/xdp.h>
+
+#define REG_STATE_NEW		0x0
+#define REG_STATE_REGISTERED	0x1
+#define REG_STATE_UNREGISTERED	0x2
+#define REG_STATE_UNUSED	0x3
+
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
+{
+	/* Simplify driver cleanup code paths, allow unreg "unused" */
+	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
+		return;
+
+	WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
+
+	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
+	xdp_rxq->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
+
+static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
+{
+	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
+}
+
+/* Returns 0 on success, negative on failure */
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+		     struct net_device *dev, u32 queue_index)
+{
+	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
+		WARN(1, "Driver promised not to register this");
+		return -EINVAL;
+	}
+
+	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
+		WARN(1, "Missing unregister, handled but fix driver");
+		xdp_rxq_info_unreg(xdp_rxq);
+	}
+
+	if (!dev) {
+		WARN(1, "Missing net_device from driver");
+		return -ENODEV;
+	}
+
+	/* State either UNREGISTERED or NEW */
+	xdp_rxq_info_init(xdp_rxq);
+	xdp_rxq->dev = dev;
+	xdp_rxq->queue_index = queue_index;
+
+	xdp_rxq->reg_state = REG_STATE_REGISTERED;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
+
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
+{
+	xdp_rxq->reg_state = REG_STATE_UNUSED;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
+
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
+{
+	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 8c0ef71bed2f..b270e84d9c13 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -39,23 +39,6 @@ config IP_DCCP_DEBUG
 
 	  Just say N.
 
-config NET_DCCPPROBE
-	tristate "DCCP connection probing"
-	depends on PROC_FS && KPROBES
-	---help---
-	This module allows for capturing the changes to DCCP connection
-	state in response to incoming packets. It is used for debugging
-	DCCP congestion avoidance modules. If you don't understand
-	what was just said, you don't need it: say N.
-
-	Documentation on how to use DCCP connection probing can be found
-	at:
-	
-	  http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
-
-	To compile this code as a module, choose M here: the
-	module will be called dccp_probe.
-
 
 endmenu
 
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 2e7b56097bc4..5b4ff37bc806 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -21,9 +21,10 @@ obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
 dccp_ipv6-y := ipv6.o
 
 obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
-obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
 
 dccp-$(CONFIG_SYSCTL) += sysctl.o
 
 dccp_diag-y := diag.o
-dccp_probe-y := probe.o
+
+# build with local directory for trace.h
+CFLAGS_proto.o := -I$(src)
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 3de0d0362d7f..2a24f7d171a5 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -228,7 +228,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
 	}
 
 	if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
-		DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
+		DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
 		av->av_overflow = true;
 	}
 
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 178bb9833311..37ccbe62eb1a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -63,9 +63,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
 		 */
 		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
-		/* Linkage updates. */
-		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
-		inet_twsk_put(tw);
+		/* Linkage updates.
+		 * Note that access to tw after this point is illegal.
+		 */
+		inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
deleted file mode 100644
index 3d3fda05b32d..000000000000
--- a/net/dccp/probe.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * dccp_probe - Observe the DCCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for DCCP from Stephen Hemminger's code
- * Copyright (C) 2006, Ian McDonald <ian.mcdonald@jandi.co.nz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/dccp.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/vmalloc.h>
-#include <linux/time64.h>
-#include <linux/gfp.h>
-#include <net/net_namespace.h>
-
-#include "dccp.h"
-#include "ccid.h"
-#include "ccids/ccid3.h"
-
-static int port;
-
-static int bufsize = 64 * 1024;
-
-static const char procname[] = "dccpprobe";
-
-static struct {
-	struct kfifo	  fifo;
-	spinlock_t	  lock;
-	wait_queue_head_t wait;
-	struct timespec64 tstart;
-} dccpw;
-
-static void printl(const char *fmt, ...)
-{
-	va_list args;
-	int len;
-	struct timespec64 now;
-	char tbuf[256];
-
-	va_start(args, fmt);
-	getnstimeofday64(&now);
-
-	now = timespec64_sub(now, dccpw.tstart);
-
-	len = sprintf(tbuf, "%lu.%06lu ",
-		      (unsigned long) now.tv_sec,
-		      (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-	len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
-	va_end(args);
-
-	kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
-	wake_up(&dccpw.wait);
-}
-
-static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
-{
-	const struct inet_sock *inet = inet_sk(sk);
-	struct ccid3_hc_tx_sock *hc = NULL;
-
-	if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
-		hc = ccid3_hc_tx_sk(sk);
-
-	if (port == 0 || ntohs(inet->inet_dport) == port ||
-	    ntohs(inet->inet_sport) == port) {
-		if (hc)
-			printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n",
-			       &inet->inet_saddr, ntohs(inet->inet_sport),
-			       &inet->inet_daddr, ntohs(inet->inet_dport), size,
-			       hc->tx_s, hc->tx_rtt, hc->tx_p,
-			       hc->tx_x_calc, hc->tx_x_recv >> 6,
-			       hc->tx_x >> 6, hc->tx_t_ipi);
-		else
-			printl("%pI4:%u %pI4:%u %d\n",
-			       &inet->inet_saddr, ntohs(inet->inet_sport),
-			       &inet->inet_daddr, ntohs(inet->inet_dport),
-			       size);
-	}
-
-	jprobe_return();
-	return 0;
-}
-
-static struct jprobe dccp_send_probe = {
-	.kp	= {
-		.symbol_name = "dccp_sendmsg",
-	},
-	.entry	= jdccp_sendmsg,
-};
-
-static int dccpprobe_open(struct inode *inode, struct file *file)
-{
-	kfifo_reset(&dccpw.fifo);
-	getnstimeofday64(&dccpw.tstart);
-	return 0;
-}
-
-static ssize_t dccpprobe_read(struct file *file, char __user *buf,
-			      size_t len, loff_t *ppos)
-{
-	int error = 0, cnt = 0;
-	unsigned char *tbuf;
-
-	if (!buf)
-		return -EINVAL;
-
-	if (len == 0)
-		return 0;
-
-	tbuf = vmalloc(len);
-	if (!tbuf)
-		return -ENOMEM;
-
-	error = wait_event_interruptible(dccpw.wait,
-					 kfifo_len(&dccpw.fifo) != 0);
-	if (error)
-		goto out_free;
-
-	cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
-	error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
-	vfree(tbuf);
-
-	return error ? error : cnt;
-}
-
-static const struct file_operations dccpprobe_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = dccpprobe_open,
-	.read    = dccpprobe_read,
-	.llseek  = noop_llseek,
-};
-
-static __init int dccpprobe_init(void)
-{
-	int ret = -ENOMEM;
-
-	init_waitqueue_head(&dccpw.wait);
-	spin_lock_init(&dccpw.lock);
-	if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
-		return ret;
-	if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
-		goto err0;
-
-	ret = register_jprobe(&dccp_send_probe);
-	if (ret) {
-		ret = request_module("dccp");
-		if (!ret)
-			ret = register_jprobe(&dccp_send_probe);
-	}
-
-	if (ret)
-		goto err1;
-
-	pr_info("DCCP watch registered (port=%d)\n", port);
-	return 0;
-err1:
-	remove_proc_entry(procname, init_net.proc_net);
-err0:
-	kfifo_free(&dccpw.fifo);
-	return ret;
-}
-module_init(dccpprobe_init);
-
-static __exit void dccpprobe_exit(void)
-{
-	kfifo_free(&dccpw.fifo);
-	remove_proc_entry(procname, init_net.proc_net);
-	unregister_jprobe(&dccp_send_probe);
-
-}
-module_exit(dccpprobe_exit);
-
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>");
-MODULE_DESCRIPTION("DCCP snooper");
-MODULE_LICENSE("GPL");
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 8b8db3d481bd..74685fecfdb9 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -38,6 +38,9 @@
 #include "dccp.h"
 #include "feat.h"
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
 
 EXPORT_SYMBOL_GPL(dccp_statistics);
@@ -110,7 +113,7 @@ void dccp_set_state(struct sock *sk, const int state)
 	/* Change state AFTER socket is unhashed to avoid closed
 	 * socket sitting in hash tables.
 	 */
-	sk->sk_state = state;
+	inet_sk_set_state(sk, state);
 }
 
 EXPORT_SYMBOL_GPL(dccp_set_state);
@@ -761,6 +764,8 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 	int rc, size;
 	long timeo;
 
+	trace_dccp_probe(sk, len);
+
 	if (len > dp->dccps_mss_cache)
 		return -EMSGSIZE;
 
diff --git a/net/dccp/trace.h b/net/dccp/trace.h
new file mode 100644
index 000000000000..5062421beee9
--- /dev/null
+++ b/net/dccp/trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dccp
+
+#if !defined(_TRACE_DCCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCCP_H
+
+#include <net/sock.h>
+#include "dccp.h"
+#include "ccids/ccid3.h"
+#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(dccp_probe,
+
+	TP_PROTO(struct sock *sk, size_t size),
+
+	TP_ARGS(sk, size),
+
+	TP_STRUCT__entry(
+		/* sockaddr_in6 is always bigger than sockaddr_in */
+		__array(__u8, saddr, sizeof(struct sockaddr_in6))
+		__array(__u8, daddr, sizeof(struct sockaddr_in6))
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__field(__u16, size)
+		__field(__u16, tx_s)
+		__field(__u32, tx_rtt)
+		__field(__u32, tx_p)
+		__field(__u32, tx_x_calc)
+		__field(__u64, tx_x_recv)
+		__field(__u64, tx_x)
+		__field(__u32, tx_t_ipi)
+	),
+
+	TP_fast_assign(
+		const struct inet_sock *inet = inet_sk(sk);
+		struct ccid3_hc_tx_sock *hc = NULL;
+
+		if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
+			hc = ccid3_hc_tx_sk(sk);
+
+		memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+		memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+		TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+		/* For filtering use */
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+
+		__entry->size = size;
+		if (hc) {
+			__entry->tx_s = hc->tx_s;
+			__entry->tx_rtt = hc->tx_rtt;
+			__entry->tx_p = hc->tx_p;
+			__entry->tx_x_calc = hc->tx_x_calc;
+			__entry->tx_x_recv = hc->tx_x_recv >> 6;
+			__entry->tx_x = hc->tx_x >> 6;
+			__entry->tx_t_ipi = hc->tx_t_ipi;
+		} else {
+			__entry->tx_s = 0;
+			memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi -
+			       (void *)&__entry->tx_rtt +
+			       sizeof(__entry->tx_t_ipi));
+		}
+	),
+
+	TP_printk("src=%pISpc dest=%pISpc size=%d tx_s=%d tx_rtt=%d "
+		  "tx_p=%d tx_x_calc=%u tx_x_recv=%llu tx_x=%llu tx_t_ipi=%d",
+		  __entry->saddr, __entry->daddr, __entry->size,
+		  __entry->tx_s, __entry->tx_rtt, __entry->tx_p,
+		  __entry->tx_x_calc, __entry->tx_x_recv, __entry->tx_x,
+		  __entry->tx_t_ipi)
+);
+
+#endif /* _TRACE_TCP_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9c2dde819817..cc1b505453a8 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2320,7 +2320,6 @@ static int dn_socket_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_socket_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= dn_socket_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 9153247dad28..c9f5e1ebb9c8 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1389,7 +1389,6 @@ static int dn_dev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = dn_dev_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -1418,9 +1417,12 @@ void __init dn_dev_init(void)
 
 	dn_dev_devices_on();
 
-	rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, 0);
-	rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, 0);
-	rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWADDR,
+			     dn_nl_newaddr, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELADDR,
+			     dn_nl_deladdr, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETADDR,
+			     NULL, dn_nl_dump_ifaddr, 0);
 
 	proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
 
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index b37a1b833c77..fce94cbd4378 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -792,8 +792,10 @@ void __init dn_fib_init(void)
 
 	register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
 
-	rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, 0);
-	rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWROUTE,
+			     dn_fib_rtm_newroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE,
+			     dn_fib_rtm_delroute, NULL, 0);
 }
 
 
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 528119a5618e..6e37d9e6345e 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -597,7 +597,6 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_neigh_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= dn_neigh_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 324cb9f2f551..ef20b8e31669 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -199,11 +199,11 @@ static void dn_dst_check_expire(struct timer_list *unused)
 						lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
 			if (atomic_read(&rt->dst.__refcnt) > 1 ||
 			    (now - rt->dst.lastuse) < expire) {
-				rtp = &rt->dst.dn_next;
+				rtp = &rt->dn_next;
 				continue;
 			}
-			*rtp = rt->dst.dn_next;
-			rt->dst.dn_next = NULL;
+			*rtp = rt->dn_next;
+			rt->dn_next = NULL;
 			dst_dev_put(&rt->dst);
 			dst_release(&rt->dst);
 		}
@@ -233,11 +233,11 @@ static int dn_dst_gc(struct dst_ops *ops)
 						lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
 			if (atomic_read(&rt->dst.__refcnt) > 1 ||
 			    (now - rt->dst.lastuse) < expire) {
-				rtp = &rt->dst.dn_next;
+				rtp = &rt->dn_next;
 				continue;
 			}
-			*rtp = rt->dst.dn_next;
-			rt->dst.dn_next = NULL;
+			*rtp = rt->dn_next;
+			rt->dn_next = NULL;
 			dst_dev_put(&rt->dst);
 			dst_release(&rt->dst);
 			break;
@@ -333,8 +333,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
 						lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
 		if (compare_keys(&rth->fld, &rt->fld)) {
 			/* Put it first */
-			*rthp = rth->dst.dn_next;
-			rcu_assign_pointer(rth->dst.dn_next,
+			*rthp = rth->dn_next;
+			rcu_assign_pointer(rth->dn_next,
 					   dn_rt_hash_table[hash].chain);
 			rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
 
@@ -345,10 +345,10 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
 			*rp = rth;
 			return 0;
 		}
-		rthp = &rth->dst.dn_next;
+		rthp = &rth->dn_next;
 	}
 
-	rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
+	rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain);
 	rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
 
 	dst_hold_and_use(&rt->dst, now);
@@ -369,8 +369,8 @@ static void dn_run_flush(struct timer_list *unused)
 			goto nothing_to_declare;
 
 		for(; rt; rt = next) {
-			next = rcu_dereference_raw(rt->dst.dn_next);
-			RCU_INIT_POINTER(rt->dst.dn_next, NULL);
+			next = rcu_dereference_raw(rt->dn_next);
+			RCU_INIT_POINTER(rt->dn_next, NULL);
 			dst_dev_put(&rt->dst);
 			dst_release(&rt->dst);
 		}
@@ -1183,6 +1183,7 @@ make_route:
 	if (rt == NULL)
 		goto e_nobufs;
 
+	rt->dn_next = NULL;
 	memset(&rt->fld, 0, sizeof(rt->fld));
 	rt->fld.saddr        = oldflp->saddr;
 	rt->fld.daddr        = oldflp->daddr;
@@ -1252,7 +1253,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *
 	if (!(flags & MSG_TRYHARD)) {
 		rcu_read_lock_bh();
 		for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
-			rt = rcu_dereference_bh(rt->dst.dn_next)) {
+			rt = rcu_dereference_bh(rt->dn_next)) {
 			if ((flp->daddr == rt->fld.daddr) &&
 			    (flp->saddr == rt->fld.saddr) &&
 			    (flp->flowidn_mark == rt->fld.flowidn_mark) &&
@@ -1448,6 +1449,7 @@ make_route:
 	if (rt == NULL)
 		goto e_nobufs;
 
+	rt->dn_next = NULL;
 	memset(&rt->fld, 0, sizeof(rt->fld));
 	rt->rt_saddr      = fld.saddr;
 	rt->rt_daddr      = fld.daddr;
@@ -1529,7 +1531,7 @@ static int dn_route_input(struct sk_buff *skb)
 
 	rcu_read_lock();
 	for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
-	    rt = rcu_dereference(rt->dst.dn_next)) {
+	    rt = rcu_dereference(rt->dn_next)) {
 		if ((rt->fld.saddr == cb->src) &&
 		    (rt->fld.daddr == cb->dst) &&
 		    (rt->fld.flowidn_oif == 0) &&
@@ -1749,7 +1751,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
 		rcu_read_lock_bh();
 		for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
 			rt;
-			rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
+			rt = rcu_dereference_bh(rt->dn_next), idx++) {
 			if (idx < s_idx)
 				continue;
 			skb_dst_set(skb, dst_clone(&rt->dst));
@@ -1795,7 +1797,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
 {
 	struct dn_rt_cache_iter_state *s = seq->private;
 
-	rt = rcu_dereference_bh(rt->dst.dn_next);
+	rt = rcu_dereference_bh(rt->dn_next);
 	while (!rt) {
 		rcu_read_unlock_bh();
 		if (--s->bucket < 0)
@@ -1858,7 +1860,6 @@ static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_rt_cache_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = dn_rt_cache_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -1921,11 +1922,11 @@ void __init dn_route_init(void)
 		    &dn_rt_cache_seq_fops);
 
 #ifdef CONFIG_DECNET_ROUTER
-	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
-		      dn_fib_dump, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
+			     dn_cache_getroute, dn_fib_dump, 0);
 #else
-	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
-		      dn_cache_dump, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
+			     dn_cache_getroute, dn_cache_dump, 0);
 #endif
 }
 
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 03c3bdf25468..bbf2c82cf7b2 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -16,6 +16,15 @@ config NET_DSA
 
 if NET_DSA
 
+config NET_DSA_LEGACY
+	bool "Support for older platform device and Device Tree registration"
+	default y
+	---help---
+	  Say Y if you want to enable support for the older platform device and
+	  deprecated Device Tree binding registration.
+
+	  This feature is scheduled for removal in 4.17.
+
 # tagging formats
 config NET_DSA_TAG_BRCM
 	bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 0e13c1f95d13..9e4d3536f977 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 # the core
 obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o dsa2.o legacy.o master.o port.o slave.o switch.o
+dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
+dsa_core-$(CONFIG_NET_DSA_LEGACY) += legacy.o
 
 # tagging formats
 dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 1e287420ff49..adf50fbc4c13 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -241,7 +241,7 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
 		for (port = 0; port < ds->num_ports; port++) {
 			dp = &ds->ports[port];
 
-			if (dsa_port_is_user(dp))
+			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 				dp->cpu_dp = dst->cpu_dp;
 		}
 	}
@@ -271,13 +271,12 @@ static int dsa_port_setup(struct dsa_port *dp)
 		break;
 	case DSA_PORT_TYPE_CPU:
 	case DSA_PORT_TYPE_DSA:
-		err = dsa_port_fixed_link_register_of(dp);
+		err = dsa_port_link_register_of(dp);
 		if (err) {
-			dev_err(ds->dev, "failed to register fixed link for port %d.%d\n",
+			dev_err(ds->dev, "failed to setup link for port %d.%d\n",
 				ds->index, dp->index);
 			return err;
 		}
-
 		break;
 	case DSA_PORT_TYPE_USER:
 		err = dsa_slave_create(dp);
@@ -301,7 +300,7 @@ static void dsa_port_teardown(struct dsa_port *dp)
 		break;
 	case DSA_PORT_TYPE_CPU:
 	case DSA_PORT_TYPE_DSA:
-		dsa_port_fixed_link_unregister_of(dp);
+		dsa_port_link_unregister_of(dp);
 		break;
 	case DSA_PORT_TYPE_USER:
 		if (dp->slave) {
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 7d036696e8c4..70de7895e5b8 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -97,8 +97,17 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
 bool dsa_schedule_work(struct work_struct *work);
 
 /* legacy.c */
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 int dsa_legacy_register(void);
 void dsa_legacy_unregister(void);
+#else
+static inline int dsa_legacy_register(void)
+{
+	return 0;
+}
+
+static inline void dsa_legacy_unregister(void) { }
+#endif
 int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 		       struct net_device *dev,
 		       const unsigned char *addr, u16 vid,
@@ -157,8 +166,8 @@ int dsa_port_vlan_add(struct dsa_port *dp,
 		      struct switchdev_trans *trans);
 int dsa_port_vlan_del(struct dsa_port *dp,
 		      const struct switchdev_obj_port_vlan *vlan);
-int dsa_port_fixed_link_register_of(struct dsa_port *dp);
-void dsa_port_fixed_link_unregister_of(struct dsa_port *dp);
+int dsa_port_link_register_of(struct dsa_port *dp);
+void dsa_port_link_unregister_of(struct dsa_port *dp);
 
 /* slave.c */
 extern const struct dsa_device_ops notag_netdev_ops;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index 84611d7fcfa2..cb54b81d0bd9 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -86,7 +86,7 @@ static int dsa_cpu_dsa_setups(struct dsa_switch *ds)
 		if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
 			continue;
 
-		ret = dsa_port_fixed_link_register_of(&ds->ports[port]);
+		ret = dsa_port_link_register_of(&ds->ports[port]);
 		if (ret)
 			return ret;
 	}
@@ -275,7 +275,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 	for (port = 0; port < ds->num_ports; port++) {
 		if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
 			continue;
-		dsa_port_fixed_link_unregister_of(&ds->ports[port]);
+		dsa_port_link_unregister_of(&ds->ports[port]);
 	}
 
 	if (ds->slave_mii_bus && ds->ops->phy_read)
@@ -718,26 +718,6 @@ static int dsa_resume(struct device *d)
 }
 #endif
 
-/* legacy way, bypassing the bridge *****************************************/
-int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-		       struct net_device *dev,
-		       const unsigned char *addr, u16 vid,
-		       u16 flags)
-{
-	struct dsa_port *dp = dsa_slave_to_port(dev);
-
-	return dsa_port_fdb_add(dp, addr, vid);
-}
-
-int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-		       struct net_device *dev,
-		       const unsigned char *addr, u16 vid)
-{
-	struct dsa_port *dp = dsa_slave_to_port(dev);
-
-	return dsa_port_fdb_del(dp, addr, vid);
-}
-
 static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume);
 
 static const struct of_device_id dsa_of_match_table[] = {
diff --git a/net/dsa/port.c b/net/dsa/port.c
index bb4be2679904..7acc1169d75e 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -273,7 +273,56 @@ int dsa_port_vlan_del(struct dsa_port *dp,
 	return 0;
 }
 
-int dsa_port_fixed_link_register_of(struct dsa_port *dp)
+static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
+{
+	struct device_node *port_dn = dp->dn;
+	struct device_node *phy_dn;
+	struct dsa_switch *ds = dp->ds;
+	struct phy_device *phydev;
+	int port = dp->index;
+	int err = 0;
+
+	phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
+	if (!phy_dn)
+		return 0;
+
+	phydev = of_phy_find_device(phy_dn);
+	if (!phydev) {
+		err = -EPROBE_DEFER;
+		goto err_put_of;
+	}
+
+	if (enable) {
+		err = genphy_config_init(phydev);
+		if (err < 0)
+			goto err_put_dev;
+
+		err = genphy_resume(phydev);
+		if (err < 0)
+			goto err_put_dev;
+
+		err = genphy_read_status(phydev);
+		if (err < 0)
+			goto err_put_dev;
+	} else {
+		err = genphy_suspend(phydev);
+		if (err < 0)
+			goto err_put_dev;
+	}
+
+	if (ds->ops->adjust_link)
+		ds->ops->adjust_link(ds, port, phydev);
+
+	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
+
+err_put_dev:
+	put_device(&phydev->mdio.dev);
+err_put_of:
+	of_node_put(phy_dn);
+	return err;
+}
+
+static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
 {
 	struct device_node *dn = dp->dn;
 	struct dsa_switch *ds = dp->ds;
@@ -282,38 +331,44 @@ int dsa_port_fixed_link_register_of(struct dsa_port *dp)
 	int mode;
 	int err;
 
-	if (of_phy_is_fixed_link(dn)) {
-		err = of_phy_register_fixed_link(dn);
-		if (err) {
-			dev_err(ds->dev,
-				"failed to register the fixed PHY of port %d\n",
-				port);
-			return err;
-		}
+	err = of_phy_register_fixed_link(dn);
+	if (err) {
+		dev_err(ds->dev,
+			"failed to register the fixed PHY of port %d\n",
+			port);
+		return err;
+	}
 
-		phydev = of_phy_find_device(dn);
+	phydev = of_phy_find_device(dn);
 
-		mode = of_get_phy_mode(dn);
-		if (mode < 0)
-			mode = PHY_INTERFACE_MODE_NA;
-		phydev->interface = mode;
+	mode = of_get_phy_mode(dn);
+	if (mode < 0)
+		mode = PHY_INTERFACE_MODE_NA;
+	phydev->interface = mode;
 
-		genphy_config_init(phydev);
-		genphy_read_status(phydev);
+	genphy_config_init(phydev);
+	genphy_read_status(phydev);
 
-		if (ds->ops->adjust_link)
-			ds->ops->adjust_link(ds, port, phydev);
+	if (ds->ops->adjust_link)
+		ds->ops->adjust_link(ds, port, phydev);
 
-		put_device(&phydev->mdio.dev);
-	}
+	put_device(&phydev->mdio.dev);
 
 	return 0;
 }
 
-void dsa_port_fixed_link_unregister_of(struct dsa_port *dp)
+int dsa_port_link_register_of(struct dsa_port *dp)
 {
-	struct device_node *dn = dp->dn;
+	if (of_phy_is_fixed_link(dp->dn))
+		return dsa_port_fixed_link_register_of(dp);
+	else
+		return dsa_port_setup_phy_of(dp, true);
+}
 
-	if (of_phy_is_fixed_link(dn))
-		of_phy_deregister_fixed_link(dn);
+void dsa_port_link_unregister_of(struct dsa_port *dp)
+{
+	if (of_phy_is_fixed_link(dp->dn))
+		of_phy_deregister_fixed_link(dp->dn);
+	else
+		dsa_port_setup_phy_of(dp, false);
 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a95a55f79137..f52307296de4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -708,14 +708,12 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_mall_tc_entry *mall_tc_entry;
 	__be16 protocol = cls->common.protocol;
-	struct net *net = dev_net(dev);
 	struct dsa_switch *ds = dp->ds;
 	struct net_device *to_dev;
 	const struct tc_action *a;
 	struct dsa_port *to_dp;
 	int err = -EOPNOTSUPP;
 	LIST_HEAD(actions);
-	int ifindex;
 
 	if (!ds->ops->port_mirror_add)
 		return err;
@@ -729,8 +727,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
 	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
 		struct dsa_mall_mirror_tc_entry *mirror;
 
-		ifindex = tcf_mirred_ifindex(a);
-		to_dev = __dev_get_by_index(net, ifindex);
+		to_dev = tcf_mirred_dev(a);
 		if (!to_dev)
 			return -EINVAL;
 
@@ -943,6 +940,26 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
 	.set_rxnfc		= dsa_slave_set_rxnfc,
 };
 
+/* legacy way, bypassing the bridge *****************************************/
+int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+		       struct net_device *dev,
+		       const unsigned char *addr, u16 vid,
+		       u16 flags)
+{
+	struct dsa_port *dp = dsa_slave_to_port(dev);
+
+	return dsa_port_fdb_add(dp, addr, vid);
+}
+
+int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+		       struct net_device *dev,
+		       const unsigned char *addr, u16 vid)
+{
+	struct dsa_port *dp = dsa_slave_to_port(dev);
+
+	return dsa_port_fdb_del(dp, addr, vid);
+}
+
 static const struct net_device_ops dsa_slave_netdev_ops = {
 	.ndo_open	 	= dsa_slave_open,
 	.ndo_stop		= dsa_slave_close,
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 29608d087a7c..b93511726069 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -83,29 +83,52 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
 static int dsa_switch_fdb_add(struct dsa_switch *ds,
 			      struct dsa_notifier_fdb_info *info)
 {
-	/* Do not care yet about other switch chips of the fabric */
-	if (ds->index != info->sw_index)
-		return 0;
+	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
 	if (!ds->ops->port_fdb_add)
 		return -EOPNOTSUPP;
 
-	return ds->ops->port_fdb_add(ds, info->port, info->addr,
-				     info->vid);
+	return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
 }
 
 static int dsa_switch_fdb_del(struct dsa_switch *ds,
 			      struct dsa_notifier_fdb_info *info)
 {
-	/* Do not care yet about other switch chips of the fabric */
-	if (ds->index != info->sw_index)
-		return 0;
+	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
 	if (!ds->ops->port_fdb_del)
 		return -EOPNOTSUPP;
 
-	return ds->ops->port_fdb_del(ds, info->port, info->addr,
-				     info->vid);
+	return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
+}
+
+static int
+dsa_switch_mdb_prepare_bitmap(struct dsa_switch *ds,
+			      const struct switchdev_obj_port_mdb *mdb,
+			      const unsigned long *bitmap)
+{
+	int port, err;
+
+	if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+		return -EOPNOTSUPP;
+
+	for_each_set_bit(port, bitmap, ds->num_ports) {
+		err = ds->ops->port_mdb_prepare(ds, port, mdb);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
+				      const struct switchdev_obj_port_mdb *mdb,
+				      const unsigned long *bitmap)
+{
+	int port;
+
+	for_each_set_bit(port, bitmap, ds->num_ports)
+		ds->ops->port_mdb_add(ds, port, mdb);
 }
 
 static int dsa_switch_mdb_add(struct dsa_switch *ds,
@@ -114,7 +137,7 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
 	const struct switchdev_obj_port_mdb *mdb = info->mdb;
 	struct switchdev_trans *trans = info->trans;
 	DECLARE_BITMAP(group, ds->num_ports);
-	int port, err;
+	int port;
 
 	/* Build a mask of Multicast group members */
 	bitmap_zero(group, ds->num_ports);
@@ -124,21 +147,10 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
 		if (dsa_is_dsa_port(ds, port))
 			set_bit(port, group);
 
-	if (switchdev_trans_ph_prepare(trans)) {
-		if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
-			return -EOPNOTSUPP;
-
-		for_each_set_bit(port, group, ds->num_ports) {
-			err = ds->ops->port_mdb_prepare(ds, port, mdb, trans);
-			if (err)
-				return err;
-		}
-
-		return 0;
-	}
+	if (switchdev_trans_ph_prepare(trans))
+		return dsa_switch_mdb_prepare_bitmap(ds, mdb, group);
 
-	for_each_set_bit(port, group, ds->num_ports)
-		ds->ops->port_mdb_add(ds, port, mdb, trans);
+	dsa_switch_mdb_add_bitmap(ds, mdb, group);
 
 	return 0;
 }
@@ -157,13 +169,43 @@ static int dsa_switch_mdb_del(struct dsa_switch *ds,
 	return 0;
 }
 
+static int
+dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
+			       const struct switchdev_obj_port_vlan *vlan,
+			       const unsigned long *bitmap)
+{
+	int port, err;
+
+	if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
+		return -EOPNOTSUPP;
+
+	for_each_set_bit(port, bitmap, ds->num_ports) {
+		err = ds->ops->port_vlan_prepare(ds, port, vlan);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void
+dsa_switch_vlan_add_bitmap(struct dsa_switch *ds,
+			   const struct switchdev_obj_port_vlan *vlan,
+			   const unsigned long *bitmap)
+{
+	int port;
+
+	for_each_set_bit(port, bitmap, ds->num_ports)
+		ds->ops->port_vlan_add(ds, port, vlan);
+}
+
 static int dsa_switch_vlan_add(struct dsa_switch *ds,
 			       struct dsa_notifier_vlan_info *info)
 {
 	const struct switchdev_obj_port_vlan *vlan = info->vlan;
 	struct switchdev_trans *trans = info->trans;
 	DECLARE_BITMAP(members, ds->num_ports);
-	int port, err;
+	int port;
 
 	/* Build a mask of VLAN members */
 	bitmap_zero(members, ds->num_ports);
@@ -173,21 +215,10 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
 		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
 			set_bit(port, members);
 
-	if (switchdev_trans_ph_prepare(trans)) {
-		if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
-			return -EOPNOTSUPP;
-
-		for_each_set_bit(port, members, ds->num_ports) {
-			err = ds->ops->port_vlan_prepare(ds, port, vlan, trans);
-			if (err)
-				return err;
-		}
-
-		return 0;
-	}
+	if (switchdev_trans_ph_prepare(trans))
+		return dsa_switch_vlan_prepare_bitmap(ds, vlan, members);
 
-	for_each_set_bit(port, members, ds->num_ports)
-		ds->ops->port_vlan_add(ds, port, vlan, trans);
+	dsa_switch_vlan_add_bitmap(ds, vlan, members);
 
 	return 0;
 }
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index e6e0b7b6025c..2b06bb91318b 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -70,6 +70,18 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
 	if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
 		return NULL;
 
+	/* The Ethernet switch we are interfaced with needs packets to be at
+	 * least 64 bytes (including FCS) otherwise they will be discarded when
+	 * they enter the switch port logic. When Broadcom tags are enabled, we
+	 * need to make sure that packets are at least 68 bytes
+	 * (including FCS and tag) because the length verification is done after
+	 * the Broadcom tag is stripped off the ingress packet.
+	 *
+	 * Let dsa_slave_xmit() free the SKB
+	 */
+	if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
+		return NULL;
+
 	skb_push(skb, BRCM_TAG_LEN);
 
 	if (offset)
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 8475434af7d5..11535bc70743 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -13,10 +13,13 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 
 #include "dsa_priv.h"
 
 #define MTK_HDR_LEN		4
+#define MTK_HDR_XMIT_UNTAGGED		0
+#define MTK_HDR_XMIT_TAGGED_TPID_8100	1
 #define MTK_HDR_RECV_SOURCE_PORT_MASK	GENMASK(2, 0)
 #define MTK_HDR_XMIT_DP_BIT_MASK	GENMASK(5, 0)
 
@@ -25,20 +28,37 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
 {
 	struct dsa_port *dp = dsa_slave_to_port(dev);
 	u8 *mtk_tag;
+	bool is_vlan_skb = true;
 
-	if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
-		return NULL;
-
-	skb_push(skb, MTK_HDR_LEN);
+	/* Build the special tag after the MAC Source Address. If VLAN header
+	 * is present, it's required that VLAN header and special tag is
+	 * being combined. Only in this way we can allow the switch can parse
+	 * the both special and VLAN tag at the same time and then look up VLAN
+	 * table with VID.
+	 */
+	if (!skb_vlan_tagged(skb)) {
+		if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
+			return NULL;
 
-	memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+		skb_push(skb, MTK_HDR_LEN);
+		memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+		is_vlan_skb = false;
+	}
 
-	/* Build the tag after the MAC Source Address */
 	mtk_tag = skb->data + 2 * ETH_ALEN;
-	mtk_tag[0] = 0;
+
+	/* Mark tag attribute on special tag insertion to notify hardware
+	 * whether that's a combined special tag with 802.1Q header.
+	 */
+	mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 :
+		     MTK_HDR_XMIT_UNTAGGED;
 	mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
-	mtk_tag[2] = 0;
-	mtk_tag[3] = 0;
+
+	/* Tag control information is kept for 802.1Q */
+	if (!is_vlan_skb) {
+		mtk_tag[2] = 0;
+		mtk_tag[3] = 0;
+	}
 
 	return skb;
 }
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index c6c8ad1d4b6d..47a0a6649a9d 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_INET_DIAG) += inet_diag.o
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
 obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
-obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f00499a46927..c24008daa3d8 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -121,6 +121,7 @@
 #endif
 #include <net/l3mdev.h>
 
+#include <trace/events/sock.h>
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -789,7 +790,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	int addr_len = 0;
 	int err;
 
-	sock_rps_record_flow(sk);
+	if (likely(!(flags & MSG_ERRQUEUE)))
+		sock_rps_record_flow(sk);
 
 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
 				   flags & ~MSG_DONTWAIT, &addr_len);
@@ -870,6 +872,9 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 	struct sock *sk = sock->sk;
 	int err = 0;
 	struct net *net = sock_net(sk);
+	void __user *p = (void __user *)arg;
+	struct ifreq ifr;
+	struct rtentry rt;
 
 	switch (cmd) {
 	case SIOCGSTAMP:
@@ -880,8 +885,12 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 		break;
 	case SIOCADDRT:
 	case SIOCDELRT:
+		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
+			return -EFAULT;
+		err = ip_rt_ioctl(net, cmd, &rt);
+		break;
 	case SIOCRTMSG:
-		err = ip_rt_ioctl(net, cmd, (void __user *)arg);
+		err = -EINVAL;
 		break;
 	case SIOCDARP:
 	case SIOCGARP:
@@ -889,17 +898,26 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 		err = arp_ioctl(net, cmd, (void __user *)arg);
 		break;
 	case SIOCGIFADDR:
-	case SIOCSIFADDR:
 	case SIOCGIFBRDADDR:
-	case SIOCSIFBRDADDR:
 	case SIOCGIFNETMASK:
-	case SIOCSIFNETMASK:
 	case SIOCGIFDSTADDR:
+	case SIOCGIFPFLAGS:
+		if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
+			return -EFAULT;
+		err = devinet_ioctl(net, cmd, &ifr);
+		if (!err && copy_to_user(p, &ifr, sizeof(struct ifreq)))
+			err = -EFAULT;
+		break;
+
+	case SIOCSIFADDR:
+	case SIOCSIFBRDADDR:
+	case SIOCSIFNETMASK:
 	case SIOCSIFDSTADDR:
 	case SIOCSIFPFLAGS:
-	case SIOCGIFPFLAGS:
 	case SIOCSIFFLAGS:
-		err = devinet_ioctl(net, cmd, (void __user *)arg);
+		if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
+			return -EFAULT;
+		err = devinet_ioctl(net, cmd, &ifr);
 		break;
 	default:
 		if (sk->sk_prot->ioctl)
@@ -1220,6 +1238,19 @@ int inet_sk_rebuild_header(struct sock *sk)
 }
 EXPORT_SYMBOL(inet_sk_rebuild_header);
 
+void inet_sk_set_state(struct sock *sk, int state)
+{
+	trace_inet_sock_set_state(sk, sk->sk_state, state);
+	sk->sk_state = state;
+}
+EXPORT_SYMBOL(inet_sk_set_state);
+
+void inet_sk_state_store(struct sock *sk, int newstate)
+{
+	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
+	smp_store_release(&sk->sk_state, newstate);
+}
+
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 				 netdev_features_t features)
 {
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6c231b43974d..f28f06c91ead 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1425,7 +1425,6 @@ static int arp_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations arp_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open           = arp_seq_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7a93359fbc72..40f001782c1b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -946,11 +946,10 @@ static int inet_abc_len(__be32 addr)
 }
 
 
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
 {
-	struct ifreq ifr;
 	struct sockaddr_in sin_orig;
-	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
 	struct in_device *in_dev;
 	struct in_ifaddr **ifap = NULL;
 	struct in_ifaddr *ifa = NULL;
@@ -959,22 +958,16 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	int ret = -EFAULT;
 	int tryaddrmatch = 0;
 
-	/*
-	 *	Fetch the caller's info block into kernel space
-	 */
-
-	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-		goto out;
-	ifr.ifr_name[IFNAMSIZ - 1] = 0;
+	ifr->ifr_name[IFNAMSIZ - 1] = 0;
 
 	/* save original address for comparison */
 	memcpy(&sin_orig, sin, sizeof(*sin));
 
-	colon = strchr(ifr.ifr_name, ':');
+	colon = strchr(ifr->ifr_name, ':');
 	if (colon)
 		*colon = 0;
 
-	dev_load(net, ifr.ifr_name);
+	dev_load(net, ifr->ifr_name);
 
 	switch (cmd) {
 	case SIOCGIFADDR:	/* Get interface address */
@@ -1014,7 +1007,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	rtnl_lock();
 
 	ret = -ENODEV;
-	dev = __dev_get_by_name(net, ifr.ifr_name);
+	dev = __dev_get_by_name(net, ifr->ifr_name);
 	if (!dev)
 		goto done;
 
@@ -1031,7 +1024,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 			   This is checked above. */
 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
 			     ifap = &ifa->ifa_next) {
-				if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
+				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
 				    sin_orig.sin_addr.s_addr ==
 							ifa->ifa_local) {
 					break; /* found */
@@ -1044,7 +1037,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 		if (!ifa) {
 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
 			     ifap = &ifa->ifa_next)
-				if (!strcmp(ifr.ifr_name, ifa->ifa_label))
+				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
 					break;
 		}
 	}
@@ -1055,20 +1048,24 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 
 	switch (cmd) {
 	case SIOCGIFADDR:	/* Get interface address */
+		ret = 0;
 		sin->sin_addr.s_addr = ifa->ifa_local;
-		goto rarok;
+		break;
 
 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
+		ret = 0;
 		sin->sin_addr.s_addr = ifa->ifa_broadcast;
-		goto rarok;
+		break;
 
 	case SIOCGIFDSTADDR:	/* Get the destination address */
+		ret = 0;
 		sin->sin_addr.s_addr = ifa->ifa_address;
-		goto rarok;
+		break;
 
 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
+		ret = 0;
 		sin->sin_addr.s_addr = ifa->ifa_mask;
-		goto rarok;
+		break;
 
 	case SIOCSIFFLAGS:
 		if (colon) {
@@ -1076,11 +1073,11 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 			if (!ifa)
 				break;
 			ret = 0;
-			if (!(ifr.ifr_flags & IFF_UP))
+			if (!(ifr->ifr_flags & IFF_UP))
 				inet_del_ifa(in_dev, ifap, 1);
 			break;
 		}
-		ret = dev_change_flags(dev, ifr.ifr_flags);
+		ret = dev_change_flags(dev, ifr->ifr_flags);
 		break;
 
 	case SIOCSIFADDR:	/* Set interface address (and family) */
@@ -1095,7 +1092,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 				break;
 			INIT_HLIST_NODE(&ifa->hash);
 			if (colon)
-				memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
+				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
 			else
 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
 		} else {
@@ -1182,28 +1179,27 @@ done:
 	rtnl_unlock();
 out:
 	return ret;
-rarok:
-	rtnl_unlock();
-	ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
-	goto out;
 }
 
-static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
+static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
 {
 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
 	struct in_ifaddr *ifa;
 	struct ifreq ifr;
 	int done = 0;
 
+	if (WARN_ON(size > sizeof(struct ifreq)))
+		goto out;
+
 	if (!in_dev)
 		goto out;
 
 	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
 		if (!buf) {
-			done += sizeof(ifr);
+			done += size;
 			continue;
 		}
-		if (len < (int) sizeof(ifr))
+		if (len < size)
 			break;
 		memset(&ifr, 0, sizeof(struct ifreq));
 		strcpy(ifr.ifr_name, ifa->ifa_label);
@@ -1212,13 +1208,12 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
 								ifa->ifa_local;
 
-		if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
+		if (copy_to_user(buf + done, &ifr, size)) {
 			done = -EFAULT;
 			break;
 		}
-		buf  += sizeof(struct ifreq);
-		len  -= sizeof(struct ifreq);
-		done += sizeof(struct ifreq);
+		len  -= size;
+		done += size;
 	}
 out:
 	return done;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 61fe6e4d23fc..296d0b956bfe 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
 	struct sk_buff *skb = base->data;
+	struct xfrm_offload *xo = xfrm_offload(skb);
 	void *tmp;
-	struct dst_entry *dst = skb_dst(skb);
-	struct xfrm_state *x = dst->xfrm;
+	struct xfrm_state *x;
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME))
+		x = skb->sp->xvec[skb->sp->len - 1];
+	else
+		x = skb_dst(skb)->xfrm;
 
 	tmp = ESP_SKB_CB(skb)->tmp;
 	esp_ssg_unref(x, tmp);
 	kfree(tmp);
-	xfrm_output_resume(skb, err);
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+		if (err) {
+			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, skb->data - skb_mac_header(skb));
+		secpath_reset(skb);
+		xfrm_dev_resume(skb);
+	} else {
+		xfrm_output_resume(skb, err);
+	}
 }
 
 /* Move ESP header back into place. */
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
 	char aead_name[CRYPTO_MAX_ALG_NAME];
 	struct crypto_aead *aead;
 	int err;
-	u32 mask = 0;
 
 	err = -ENAMETOOLONG;
 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
 		goto error;
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(aead_name, 0, mask);
+	aead = crypto_alloc_aead(aead_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
 	char authenc_name[CRYPTO_MAX_ALG_NAME];
 	unsigned int keylen;
 	int err;
-	u32 mask = 0;
 
 	err = -EINVAL;
 	if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
 			goto error;
 	}
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(authenc_name, 0, mask);
+	aead = crypto_alloc_aead(authenc_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 29b333a62ab0..da5635fc52c2 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -109,78 +109,39 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 				        netdev_features_t features)
 {
-	__u32 seq;
-	int err = 0;
-	struct sk_buff *skb2;
 	struct xfrm_state *x;
 	struct ip_esp_hdr *esph;
 	struct crypto_aead *aead;
-	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	netdev_features_t esp_features = features;
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
 	if (!xo)
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
-		goto out;
-
-	seq = xo->seq.low;
+		return ERR_PTR(-EINVAL);
 
 	x = skb->sp->xvec[skb->sp->len - 1];
 	aead = x->data;
 	esph = ip_esp_hdr(skb);
 
 	if (esph->spi != x->id.spi)
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
 	skb->encap_hdr_csum = 1;
 
-	if (!(features & NETIF_F_HW_ESP))
+	if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+	    (x->xso.dev != skb->dev))
 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-	segs = x->outer_mode->gso_segment(x, skb, esp_features);
-	if (IS_ERR_OR_NULL(segs))
-		goto out;
-
-	__skb_pull(skb, skb->data - skb_mac_header(skb));
-
-	skb2 = segs;
-	do {
-		struct sk_buff *nskb = skb2->next;
-
-		xo = xfrm_offload(skb2);
-		xo->flags |= XFRM_GSO_SEGMENT;
-		xo->seq.low = seq;
-		xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
-		if(!(features & NETIF_F_HW_ESP))
-			xo->flags |= CRYPTO_FALLBACK;
-
-		x->outer_mode->xmit(x, skb2);
+	xo->flags |= XFRM_GSO_SEGMENT;
 
-		err = x->type_offload->xmit(x, skb2, esp_features);
-		if (err) {
-			kfree_skb_list(segs);
-			return ERR_PTR(err);
-		}
-
-		if (!skb_is_gso(skb2))
-			seq++;
-		else
-			seq += skb_shinfo(skb2)->gso_segs;
-
-		skb_push(skb2, skb2->mac_len);
-		skb2 = nskb;
-	} while (skb2);
-
-out:
-	return segs;
+	return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -207,6 +168,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
 	struct crypto_aead *aead;
 	struct esp_info esp;
 	bool hw_offload = true;
+	__u32 seq;
 
 	esp.inplace = true;
 
@@ -245,23 +207,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
 			return esp.nfrags;
 	}
 
+	seq = xo->seq.low;
+
 	esph = esp.esph;
 	esph->spi = x->id.spi;
 
 	skb_push(skb, -skb_network_offset(skb));
 
 	if (xo->flags & XFRM_GSO_SEGMENT) {
-		esph->seq_no = htonl(xo->seq.low);
-	} else {
-		ip_hdr(skb)->tot_len = htons(skb->len);
-		ip_send_check(ip_hdr(skb));
+		esph->seq_no = htonl(seq);
+
+		if (!skb_is_gso(skb))
+			xo->seq.low++;
+		else
+			xo->seq.low += skb_shinfo(skb)->gso_segs;
 	}
 
+	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+
+	ip_hdr(skb)->tot_len = htons(skb->len);
+	ip_send_check(ip_hdr(skb));
+
 	if (hw_offload)
 		return 0;
 
-	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
 	err = esp_output_tail(x, skb, &esp);
 	if (err)
 		return err;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 08259d078b1c..f05afaf3235c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -587,10 +587,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
  * Handle IP routing ioctl calls.
  * These are used to manipulate the routing tables
  */
-int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt)
 {
 	struct fib_config cfg;
-	struct rtentry rt;
 	int err;
 
 	switch (cmd) {
@@ -599,11 +598,8 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		if (copy_from_user(&rt, arg, sizeof(rt)))
-			return -EFAULT;
-
 		rtnl_lock();
-		err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
+		err = rtentry_to_fib_config(net, cmd, rt, &cfg);
 		if (err == 0) {
 			struct fib_table *tb;
 
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5ddc4aefff12..5530cd6fdbc7 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2334,7 +2334,6 @@ static int fib_triestat_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations fib_triestat_fops = {
-	.owner	= THIS_MODULE,
 	.open	= fib_triestat_seq_open,
 	.read	= seq_read,
 	.llseek	= seq_lseek,
@@ -2521,7 +2520,6 @@ static int fib_trie_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations fib_trie_fops = {
-	.owner  = THIS_MODULE,
 	.open   = fib_trie_seq_open,
 	.read   = seq_read,
 	.llseek = seq_lseek,
@@ -2715,7 +2713,6 @@ static int fib_route_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations fib_route_fops = {
-	.owner  = THIS_MODULE,
 	.open   = fib_route_seq_open,
 	.read   = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2d49717a7421..10f7f74a0831 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2832,7 +2832,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp_mc_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp_mc_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
@@ -2979,7 +2978,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp_mcf_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp_mcf_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4ca46dc08e63..12410ec6f7f7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -685,7 +685,7 @@ static void reqsk_timer_handler(struct timer_list *t)
 	int max_retries, thresh;
 	u8 defer_accept;
 
-	if (sk_state_load(sk_listener) != TCP_LISTEN)
+	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
 		goto drop;
 
 	max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
@@ -783,7 +783,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
 	if (newsk) {
 		struct inet_connection_sock *newicsk = inet_csk(newsk);
 
-		newsk->sk_state = TCP_SYN_RECV;
+		inet_sk_set_state(newsk, TCP_SYN_RECV);
 		newicsk->icsk_bind_hash = NULL;
 
 		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
@@ -877,7 +877,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 	 * It is OK, because this socket enters to hash table only
 	 * after validation is complete.
 	 */
-	sk_state_store(sk, TCP_LISTEN);
+	inet_sk_state_store(sk, TCP_LISTEN);
 	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 		inet->inet_sport = htons(inet->inet_num);
 
@@ -888,7 +888,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 			return 0;
 	}
 
-	sk->sk_state = TCP_CLOSE;
+	inet_sk_set_state(sk, TCP_CLOSE);
 	return err;
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c9c35b61a027..a383f299ce24 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -564,12 +564,18 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
 		case INET_DIAG_BC_JMP:
 			yes = 0;
 			break;
+		case INET_DIAG_BC_S_EQ:
+			yes = entry->sport == op[1].no;
+			break;
 		case INET_DIAG_BC_S_GE:
 			yes = entry->sport >= op[1].no;
 			break;
 		case INET_DIAG_BC_S_LE:
 			yes = entry->sport <= op[1].no;
 			break;
+		case INET_DIAG_BC_D_EQ:
+			yes = entry->dport == op[1].no;
+			break;
 		case INET_DIAG_BC_D_GE:
 			yes = entry->dport >= op[1].no;
 			break;
@@ -802,8 +808,10 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
 			if (!valid_devcond(bc, len, &min_len))
 				return -EINVAL;
 			break;
+		case INET_DIAG_BC_S_EQ:
 		case INET_DIAG_BC_S_GE:
 		case INET_DIAG_BC_S_LE:
+		case INET_DIAG_BC_D_EQ:
 		case INET_DIAG_BC_D_GE:
 		case INET_DIAG_BC_D_LE:
 			if (!valid_port_comparison(bc, len, &min_len))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index e7d15fb0d94d..37b7da0b975d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -19,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/vmalloc.h>
+#include <linux/bootmem.h>
 
 #include <net/addrconf.h>
 #include <net/inet_connection_sock.h>
@@ -168,6 +169,60 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
 }
 EXPORT_SYMBOL_GPL(__inet_inherit_port);
 
+static struct inet_listen_hashbucket *
+inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
+{
+	u32 hash;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == AF_INET6)
+		hash = ipv6_portaddr_hash(sock_net(sk),
+					  &sk->sk_v6_rcv_saddr,
+					  inet_sk(sk)->inet_num);
+	else
+#endif
+		hash = ipv4_portaddr_hash(sock_net(sk),
+					  inet_sk(sk)->inet_rcv_saddr,
+					  inet_sk(sk)->inet_num);
+	return inet_lhash2_bucket(h, hash);
+}
+
+static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
+{
+	struct inet_listen_hashbucket *ilb2;
+
+	if (!h->lhash2)
+		return;
+
+	ilb2 = inet_lhash2_bucket_sk(h, sk);
+
+	spin_lock(&ilb2->lock);
+	if (sk->sk_reuseport && sk->sk_family == AF_INET6)
+		hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
+				   &ilb2->head);
+	else
+		hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
+				   &ilb2->head);
+	ilb2->count++;
+	spin_unlock(&ilb2->lock);
+}
+
+static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
+{
+	struct inet_listen_hashbucket *ilb2;
+
+	if (!h->lhash2 ||
+	    WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
+		return;
+
+	ilb2 = inet_lhash2_bucket_sk(h, sk);
+
+	spin_lock(&ilb2->lock);
+	hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
+	ilb2->count--;
+	spin_unlock(&ilb2->lock);
+}
+
 static inline int compute_score(struct sock *sk, struct net *net,
 				const unsigned short hnum, const __be32 daddr,
 				const int dif, const int sdif, bool exact_dif)
@@ -207,6 +262,40 @@ static inline int compute_score(struct sock *sk, struct net *net,
  */
 
 /* called with rcu_read_lock() : No refcount taken on the socket */
+static struct sock *inet_lhash2_lookup(struct net *net,
+				struct inet_listen_hashbucket *ilb2,
+				struct sk_buff *skb, int doff,
+				const __be32 saddr, __be16 sport,
+				const __be32 daddr, const unsigned short hnum,
+				const int dif, const int sdif)
+{
+	bool exact_dif = inet_exact_dif_match(net, skb);
+	struct inet_connection_sock *icsk;
+	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	u32 phash = 0;
+
+	inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
+		sk = (struct sock *)icsk;
+		score = compute_score(sk, net, hnum, daddr,
+				      dif, sdif, exact_dif);
+		if (score > hiscore) {
+			if (sk->sk_reuseport) {
+				phash = inet_ehashfn(net, daddr, hnum,
+						     saddr, sport);
+				result = reuseport_select_sock(sk, phash,
+							       skb, doff);
+				if (result)
+					return result;
+			}
+			result = sk;
+			hiscore = score;
+		}
+	}
+
+	return result;
+}
+
 struct sock *__inet_lookup_listener(struct net *net,
 				    struct inet_hashinfo *hashinfo,
 				    struct sk_buff *skb, int doff,
@@ -216,32 +305,57 @@ struct sock *__inet_lookup_listener(struct net *net,
 {
 	unsigned int hash = inet_lhashfn(net, hnum);
 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
-	int score, hiscore = 0, matches = 0, reuseport = 0;
 	bool exact_dif = inet_exact_dif_match(net, skb);
+	struct inet_listen_hashbucket *ilb2;
 	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	unsigned int hash2;
 	u32 phash = 0;
 
+	if (ilb->count <= 10 || !hashinfo->lhash2)
+		goto port_lookup;
+
+	/* Too many sk in the ilb bucket (which is hashed by port alone).
+	 * Try lhash2 (which is hashed by port and addr) instead.
+	 */
+
+	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	result = inet_lhash2_lookup(net, ilb2, skb, doff,
+				    saddr, sport, daddr, hnum,
+				    dif, sdif);
+	if (result)
+		return result;
+
+	/* Lookup lhash2 with INADDR_ANY */
+
+	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	return inet_lhash2_lookup(net, ilb2, skb, doff,
+				  saddr, sport, daddr, hnum,
+				  dif, sdif);
+
+port_lookup:
 	sk_for_each_rcu(sk, &ilb->head) {
 		score = compute_score(sk, net, hnum, daddr,
 				      dif, sdif, exact_dif);
 		if (score > hiscore) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				phash = inet_ehashfn(net, daddr, hnum,
 						     saddr, sport);
 				result = reuseport_select_sock(sk, phash,
 							       skb, doff);
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			hiscore = score;
-		} else if (score == hiscore && reuseport) {
-			matches++;
-			if (reciprocal_scale(phash, matches) == 0)
-				result = sk;
-			phash = next_pseudo_random32(phash);
 		}
 	}
 	return result;
@@ -430,7 +544,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 	} else {
 		percpu_counter_inc(sk->sk_prot->orphan_count);
-		sk->sk_state = TCP_CLOSE;
+		inet_sk_set_state(sk, TCP_CLOSE);
 		sock_set_flag(sk, SOCK_DEAD);
 		inet_csk_destroy_sock(sk);
 	}
@@ -483,6 +597,8 @@ int __inet_hash(struct sock *sk, struct sock *osk)
 		hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
 	else
 		hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+	inet_hash2(hashinfo, sk);
+	ilb->count++;
 	sock_set_flag(sk, SOCK_RCU_FREE);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 unlock:
@@ -509,28 +625,35 @@ EXPORT_SYMBOL_GPL(inet_hash);
 void inet_unhash(struct sock *sk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+	struct inet_listen_hashbucket *ilb;
 	spinlock_t *lock;
 	bool listener = false;
-	int done;
 
 	if (sk_unhashed(sk))
 		return;
 
 	if (sk->sk_state == TCP_LISTEN) {
-		lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
+		ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+		lock = &ilb->lock;
 		listener = true;
 	} else {
 		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 	}
 	spin_lock_bh(lock);
+	if (sk_unhashed(sk))
+		goto unlock;
+
 	if (rcu_access_pointer(sk->sk_reuseport_cb))
 		reuseport_detach_sock(sk);
-	if (listener)
-		done = __sk_del_node_init(sk);
-	else
-		done = __sk_nulls_del_node_init_rcu(sk);
-	if (done)
-		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+	if (listener) {
+		inet_unhash2(hashinfo, sk);
+		 __sk_del_node_init(sk);
+		 ilb->count--;
+	} else {
+		__sk_nulls_del_node_init_rcu(sk);
+	}
+	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+unlock:
 	spin_unlock_bh(lock);
 }
 EXPORT_SYMBOL_GPL(inet_unhash);
@@ -665,10 +788,37 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
 	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
 		spin_lock_init(&h->listening_hash[i].lock);
 		INIT_HLIST_HEAD(&h->listening_hash[i].head);
+		h->listening_hash[i].count = 0;
 	}
+
+	h->lhash2 = NULL;
 }
 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
 
+void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+				unsigned long numentries, int scale,
+				unsigned long low_limit,
+				unsigned long high_limit)
+{
+	unsigned int i;
+
+	h->lhash2 = alloc_large_system_hash(name,
+					    sizeof(*h->lhash2),
+					    numentries,
+					    scale,
+					    0,
+					    NULL,
+					    &h->lhash2_mask,
+					    low_limit,
+					    high_limit);
+
+	for (i = 0; i <= h->lhash2_mask; i++) {
+		spin_lock_init(&h->lhash2[i].lock);
+		INIT_HLIST_HEAD(&h->lhash2[i].head);
+		h->lhash2[i].count = 0;
+	}
+}
+
 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
 {
 	unsigned int locksz = sizeof(spinlock_t);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index b563e0c46bac..c3ea4906d237 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -97,7 +97,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 			   struct inet_hashinfo *hashinfo)
 {
 	const struct inet_sock *inet = inet_sk(sk);
@@ -119,18 +119,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 
 	spin_lock(lock);
 
-	/*
-	 * Step 2: Hash TW into tcp ehash chain.
-	 * Notes :
-	 * - tw_refcnt is set to 4 because :
-	 * - We have one reference from bhash chain.
-	 * - We have one reference from ehash chain.
-	 * - We have one reference from timer.
-	 * - One reference for ourself (our caller will release it).
-	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
-	 * committed into memory all tw fields.
-	 */
-	refcount_set(&tw->tw_refcnt, 4);
 	inet_twsk_add_node_rcu(tw, &ehead->chain);
 
 	/* Step 3: Remove SK from hash chain */
@@ -138,8 +126,19 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
 	spin_unlock(lock);
+
+	/* tw_refcnt is set to 3 because we have :
+	 * - one reference for bhash chain.
+	 * - one reference for ehash chain.
+	 * - one reference for timer.
+	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
+	 * committed into memory all tw fields.
+	 * Also note that after this point, we lost our implicit reference
+	 * so we are not allowed to use tw anymore.
+	 */
+	refcount_set(&tw->tw_refcnt, 3);
 }
-EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
+EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
 
 static void tw_timer_handler(struct timer_list *t)
 {
@@ -271,14 +270,14 @@ restart:
 				continue;
 			tw = inet_twsk(sk);
 			if ((tw->tw_family != family) ||
-				atomic_read(&twsk_net(tw)->count))
+				refcount_read(&twsk_net(tw)->count))
 				continue;
 
 			if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
 				continue;
 
 			if (unlikely((tw->tw_family != family) ||
-				     atomic_read(&twsk_net(tw)->count))) {
+				     refcount_read(&twsk_net(tw)->count))) {
 				inet_twsk_put(tw);
 				goto restart;
 			}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 45ffd3d045d2..6ec670fbbbdd 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -114,7 +114,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 static int ipgre_tunnel_init(struct net_device *dev);
 static void erspan_build_header(struct sk_buff *skb,
-				__be32 id, u32 index, bool truncate);
+				u32 id, u32 index,
+				bool truncate, bool is_ipv4);
 
 static unsigned int ipgre_net_id __read_mostly;
 static unsigned int gre_tap_net_id __read_mostly;
@@ -255,34 +256,43 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 {
 	struct net *net = dev_net(skb->dev);
 	struct metadata_dst *tun_dst = NULL;
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *pkt_md;
 	struct ip_tunnel_net *itn;
 	struct ip_tunnel *tunnel;
-	struct erspanhdr *ershdr;
 	const struct iphdr *iph;
-	__be32 index;
+	int ver;
 	int len;
 
 	itn = net_generic(net, erspan_net_id);
 	len = gre_hdr_len + sizeof(*ershdr);
 
+	/* Check based hdr len */
 	if (unlikely(!pskb_may_pull(skb, len)))
 		return PACKET_REJECT;
 
 	iph = ip_hdr(skb);
-	ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
+	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+	ver = ershdr->ver;
 
 	/* The original GRE header does not have key field,
 	 * Use ERSPAN 10-bit session ID as key.
 	 */
-	tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
-	index = ershdr->md.index;
+	tpi->key = cpu_to_be32(get_session_id(ershdr));
 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 				  tpi->flags | TUNNEL_KEY,
 				  iph->saddr, iph->daddr, tpi->key);
 
 	if (tunnel) {
+		len = gre_hdr_len + erspan_hdr_len(ver);
+		if (unlikely(!pskb_may_pull(skb, len)))
+			return PACKET_REJECT;
+
+		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+		pkt_md = (struct erspan_metadata *)(ershdr + 1);
+
 		if (__iptunnel_pull_header(skb,
-					   gre_hdr_len + sizeof(*ershdr),
+					   len,
 					   htons(ETH_P_TEB),
 					   false, false) < 0)
 			goto drop;
@@ -303,15 +313,21 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 				return PACKET_REJECT;
 
 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
-			if (!md)
-				return PACKET_REJECT;
+			memcpy(md, pkt_md, sizeof(*md));
+			md->version = ver;
 
-			md->index = index;
 			info = &tun_dst->u.tun_info;
 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
 			info->options_len = sizeof(*md);
 		} else {
-			tunnel->index = ntohl(index);
+			tunnel->erspan_ver = ver;
+			if (ver == 1) {
+				tunnel->index = ntohl(pkt_md->u.index);
+			} else {
+				tunnel->dir = pkt_md->u.md2.dir;
+				tunnel->hwid = get_hwid(&pkt_md->u.md2);
+			}
+
 		}
 
 		skb_reset_mac_header(skb);
@@ -405,14 +421,17 @@ static int gre_rcv(struct sk_buff *skb)
 	if (hdr_len < 0)
 		goto drop;
 
-	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN))) {
+	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 			return 0;
+		goto out;
 	}
 
 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 		return 0;
 
+out:
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 drop:
 	kfree_skb(skb);
@@ -560,6 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 	bool truncate = false;
 	struct flowi4 fl;
 	int tunnel_hlen;
+	int version;
 	__be16 df;
 
 	tun_info = skb_tunnel_info(skb);
@@ -568,9 +588,13 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 		goto err_free_skb;
 
 	key = &tun_info->key;
+	md = ip_tunnel_info_opts(tun_info);
+	if (!md)
+		goto err_free_rt;
 
 	/* ERSPAN has fixed 8 byte GRE header */
-	tunnel_hlen = 8 + sizeof(struct erspanhdr);
+	version = md->version;
+	tunnel_hlen = 8 + erspan_hdr_len(version);
 
 	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
 	if (!rt)
@@ -584,12 +608,18 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 		truncate = true;
 	}
 
-	md = ip_tunnel_info_opts(tun_info);
-	if (!md)
+	if (version == 1) {
+		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
+				    ntohl(md->u.index), truncate, true);
+	} else if (version == 2) {
+		erspan_build_header_v2(skb,
+				       ntohl(tunnel_id_to_key32(key->tun_id)),
+				       md->u.md2.dir,
+				       get_hwid(&md->u.md2),
+				       truncate, true);
+	} else {
 		goto err_free_rt;
-
-	erspan_build_header(skb, tunnel_id_to_key32(key->tun_id),
-			    ntohl(md->index), truncate);
+	}
 
 	gre_build_header(skb, 8, TUNNEL_SEQ,
 			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
@@ -668,52 +698,6 @@ free_skb:
 	return NETDEV_TX_OK;
 }
 
-static inline u8 tos_to_cos(u8 tos)
-{
-	u8 dscp, cos;
-
-	dscp = tos >> 2;
-	cos = dscp >> 3;
-	return cos;
-}
-
-static void erspan_build_header(struct sk_buff *skb,
-				__be32 id, u32 index, bool truncate)
-{
-	struct iphdr *iphdr = ip_hdr(skb);
-	struct ethhdr *eth = eth_hdr(skb);
-	enum erspan_encap_type enc_type;
-	struct erspanhdr *ershdr;
-	struct qtag_prefix {
-		__be16 eth_type;
-		__be16 tci;
-	} *qp;
-	u16 vlan_tci = 0;
-
-	enc_type = ERSPAN_ENCAP_NOVLAN;
-
-	/* If mirrored packet has vlan tag, extract tci and
-	 *  perserve vlan header in the mirrored frame.
-	 */
-	if (eth->h_proto == htons(ETH_P_8021Q)) {
-		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
-		vlan_tci = ntohs(qp->tci);
-		enc_type = ERSPAN_ENCAP_INFRAME;
-	}
-
-	skb_push(skb, sizeof(*ershdr));
-	ershdr = (struct erspanhdr *)skb->data;
-	memset(ershdr, 0, sizeof(*ershdr));
-
-	ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) |
-				 (ERSPAN_VERSION << VER_OFFSET));
-	ershdr->session_id = htons((u16)(ntohl(id) & ID_MASK) |
-			   ((tos_to_cos(iphdr->tos) << COS_OFFSET) & COS_MASK) |
-			   (enc_type << EN_OFFSET & EN_MASK) |
-			   ((truncate << T_OFFSET) & T_MASK));
-	ershdr->md.index = htonl(index & INDEX_MASK);
-}
-
 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 			       struct net_device *dev)
 {
@@ -737,7 +721,15 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 	}
 
 	/* Push ERSPAN header */
-	erspan_build_header(skb, tunnel->parms.o_key, tunnel->index, truncate);
+	if (tunnel->erspan_ver == 1)
+		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
+				    tunnel->index,
+				    truncate, true);
+	else
+		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
+				       tunnel->dir, tunnel->hwid,
+				       truncate, true);
+
 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
 	return NETDEV_TX_OK;
@@ -1209,13 +1201,32 @@ static int ipgre_netlink_parms(struct net_device *dev,
 	if (data[IFLA_GRE_FWMARK])
 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
 
-	if (data[IFLA_GRE_ERSPAN_INDEX]) {
-		t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+	if (data[IFLA_GRE_ERSPAN_VER]) {
+		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
-		if (t->index & ~INDEX_MASK)
+		if (t->erspan_ver != 1 && t->erspan_ver != 2)
 			return -EINVAL;
 	}
 
+	if (t->erspan_ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX]) {
+			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+			if (t->index & ~INDEX_MASK)
+				return -EINVAL;
+		}
+	} else if (t->erspan_ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR]) {
+			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
+				return -EINVAL;
+		}
+		if (data[IFLA_GRE_ERSPAN_HWID]) {
+			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
+				return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
@@ -1282,7 +1293,7 @@ static int erspan_tunnel_init(struct net_device *dev)
 	tunnel->tun_hlen = 8;
 	tunnel->parms.iph.protocol = IPPROTO_GRE;
 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
-		       sizeof(struct erspanhdr);
+		       erspan_hdr_len(tunnel->erspan_ver);
 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
 	dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
@@ -1413,6 +1424,12 @@ static size_t ipgre_get_size(const struct net_device *dev)
 		nla_total_size(4) +
 		/* IFLA_GRE_ERSPAN_INDEX */
 		nla_total_size(4) +
+		/* IFLA_GRE_ERSPAN_VER */
+		nla_total_size(1) +
+		/* IFLA_GRE_ERSPAN_DIR */
+		nla_total_size(1) +
+		/* IFLA_GRE_ERSPAN_HWID */
+		nla_total_size(2) +
 		0;
 }
 
@@ -1455,9 +1472,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 			goto nla_put_failure;
 	}
 
-	if (t->index)
+	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+		goto nla_put_failure;
+
+	if (t->erspan_ver == 1) {
 		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
 			goto nla_put_failure;
+	} else if (t->erspan_ver == 2) {
+		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+			goto nla_put_failure;
+		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+			goto nla_put_failure;
+	}
 
 	return 0;
 
@@ -1493,6 +1519,9 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
+	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
 };
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 60fb1eb7d7d8..6cc70fa488cb 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -808,6 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 	{
 		struct net_device *dev = NULL;
 		int ifindex;
+		int midx;
 
 		if (optlen != sizeof(int))
 			goto e_inval;
@@ -823,10 +824,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 		err = -EADDRNOTAVAIL;
 		if (!dev)
 			break;
+
+		midx = l3mdev_master_ifindex(dev);
 		dev_put(dev);
 
 		err = -EINVAL;
-		if (sk->sk_bound_dev_if)
+		if (sk->sk_bound_dev_if &&
+		    (!midx || midx != sk->sk_bound_dev_if))
 			break;
 
 		inet->uc_index = ifindex;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 6d21068f9b55..d786a8441bce 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -710,9 +710,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 		}
 	}
 
-	init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
-			 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
-			 tunnel->fwmark);
+	if (tunnel->fwmark) {
+		init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
+				 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+				 tunnel->fwmark);
+	}
+	else {
+		init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
+				 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+				 skb->mark);
+	}
 
 	if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
 		goto tx_error;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index abdebca848c9..f75802ad960f 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -329,39 +329,6 @@ set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
 	sin->sin_port = port;
 }
 
-static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
-{
-	int res;
-
-	mm_segment_t oldfs = get_fs();
-	set_fs(get_ds());
-	res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
-	set_fs(oldfs);
-	return res;
-}
-
-static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
-{
-	int res;
-
-	mm_segment_t oldfs = get_fs();
-	set_fs(get_ds());
-	res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
-	set_fs(oldfs);
-	return res;
-}
-
-static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
-{
-	int res;
-
-	mm_segment_t oldfs = get_fs();
-	set_fs(get_ds());
-	res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
-	set_fs(oldfs);
-	return res;
-}
-
 /*
  *	Set up interface addresses and routes.
  */
@@ -375,19 +342,19 @@ static int __init ic_setup_if(void)
 	memset(&ir, 0, sizeof(ir));
 	strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->dev->name);
 	set_sockaddr(sin, ic_myaddr, 0);
-	if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
+	if ((err = devinet_ioctl(&init_net, SIOCSIFADDR, &ir)) < 0) {
 		pr_err("IP-Config: Unable to set interface address (%d)\n",
 		       err);
 		return -1;
 	}
 	set_sockaddr(sin, ic_netmask, 0);
-	if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
+	if ((err = devinet_ioctl(&init_net, SIOCSIFNETMASK, &ir)) < 0) {
 		pr_err("IP-Config: Unable to set interface netmask (%d)\n",
 		       err);
 		return -1;
 	}
 	set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
-	if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
+	if ((err = devinet_ioctl(&init_net, SIOCSIFBRDADDR, &ir)) < 0) {
 		pr_err("IP-Config: Unable to set interface broadcast address (%d)\n",
 		       err);
 		return -1;
@@ -397,11 +364,11 @@ static int __init ic_setup_if(void)
 	 * out, we'll try to muddle along.
 	 */
 	if (ic_dev_mtu != 0) {
-		strcpy(ir.ifr_name, ic_dev->dev->name);
-		ir.ifr_mtu = ic_dev_mtu;
-		if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
+		rtnl_lock();
+		if ((err = dev_set_mtu(ic_dev->dev, ic_dev_mtu)) < 0)
 			pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n",
 			       ic_dev_mtu, err);
+		rtnl_unlock();
 	}
 	return 0;
 }
@@ -423,7 +390,7 @@ static int __init ic_setup_routes(void)
 		set_sockaddr((struct sockaddr_in *) &rm.rt_genmask, 0, 0);
 		set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0);
 		rm.rt_flags = RTF_UP | RTF_GATEWAY;
-		if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) {
+		if ((err = ip_rt_ioctl(&init_net, SIOCADDRT, &rm)) < 0) {
 			pr_err("IP-Config: Cannot add default route (%d)\n",
 			       err);
 			return -1;
@@ -1322,7 +1289,6 @@ static int pnp_seq_open(struct inode *indoe, struct file *file)
 }
 
 static const struct file_operations pnp_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= pnp_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index fd5f19c988e4..b05689bbba31 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -3022,7 +3022,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
 		const char *name =  vif->dev ? vif->dev->name : "none";
 
 		seq_printf(seq,
-			   "%2zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
+			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
 			   vif - mrt->vif_table,
 			   name, vif->bytes_in, vif->pkt_in,
 			   vif->bytes_out, vif->pkt_out,
@@ -3045,7 +3045,6 @@ static int ipmr_vif_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ipmr_vif_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ipmr_vif_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -3198,7 +3197,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ipmr_mfc_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ipmr_mfc_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c0cc6aa8cfaa..e6774ccb7731 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -80,35 +80,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
 }
 EXPORT_SYMBOL(ip_route_me_harder);
 
-/*
- * Extra routing may needed on local out, as the QUEUE target never
- * returns control to the table.
- */
-
-struct ip_rt_info {
-	__be32 daddr;
-	__be32 saddr;
-	u_int8_t tos;
-	u_int32_t mark;
-};
-
-static void nf_ip_saveroute(const struct sk_buff *skb,
-			    struct nf_queue_entry *entry)
-{
-	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
-	if (entry->state.hook == NF_INET_LOCAL_OUT) {
-		const struct iphdr *iph = ip_hdr(skb);
-
-		rt_info->tos = iph->tos;
-		rt_info->daddr = iph->daddr;
-		rt_info->saddr = iph->saddr;
-		rt_info->mark = skb->mark;
-	}
-}
-
-static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
-			 const struct nf_queue_entry *entry)
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
 {
 	const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
@@ -119,10 +91,12 @@ static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
 		      skb->mark == rt_info->mark &&
 		      iph->daddr == rt_info->daddr &&
 		      iph->saddr == rt_info->saddr))
-			return ip_route_me_harder(net, skb, RTN_UNSPEC);
+			return ip_route_me_harder(entry->state.net, skb,
+						  RTN_UNSPEC);
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nf_ip_reroute);
 
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 			    unsigned int dataoff, u_int8_t protocol)
@@ -155,9 +129,9 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 }
 EXPORT_SYMBOL(nf_ip_checksum);
 
-static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
-				      unsigned int dataoff, unsigned int len,
-				      u_int8_t protocol)
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			       unsigned int dataoff, unsigned int len,
+			       u_int8_t protocol)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	__sum16 csum = 0;
@@ -175,9 +149,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
 	}
 	return csum;
 }
+EXPORT_SYMBOL_GPL(nf_ip_checksum_partial);
 
-static int nf_ip_route(struct net *net, struct dst_entry **dst,
-		       struct flowi *fl, bool strict __always_unused)
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+		bool strict __always_unused)
 {
 	struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
 	if (IS_ERR(rt))
@@ -185,19 +160,4 @@ static int nf_ip_route(struct net *net, struct dst_entry **dst,
 	*dst = &rt->dst;
 	return 0;
 }
-
-static const struct nf_afinfo nf_ip_afinfo = {
-	.family			= AF_INET,
-	.checksum		= nf_ip_checksum,
-	.checksum_partial	= nf_ip_checksum_partial,
-	.route			= nf_ip_route,
-	.saveroute		= nf_ip_saveroute,
-	.reroute		= nf_ip_reroute,
-	.route_key_size		= sizeof(struct ip_rt_info),
-};
-
-static int __init ipv4_netfilter_init(void)
-{
-	return nf_register_afinfo(&nf_ip_afinfo);
-}
-subsys_initcall(ipv4_netfilter_init);
+EXPORT_SYMBOL_GPL(nf_ip_route);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index c11eb1744ab1..5f52236780b4 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -72,11 +72,21 @@ endif # NF_TABLES_IPV4
 
 config NF_TABLES_ARP
 	tristate "ARP nf_tables support"
+	select NETFILTER_FAMILY_ARP
 	help
 	  This option enables the ARP support for nf_tables.
 
 endif # NF_TABLES
 
+config NF_FLOW_TABLE_IPV4
+	tristate "Netfilter flow table IPv4 module"
+	depends on NF_CONNTRACK && NF_TABLES
+	select NF_FLOW_TABLE
+	help
+	  This option adds the flow table IPv4 support.
+
+	  To compile it as a module, choose M here.
+
 config NF_DUP_IPV4
 	tristate "Netfilter IPv4 packet duplication to alternate destination"
 	depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -148,6 +158,7 @@ config NF_NAT_SNMP_BASIC
 	depends on NF_CONNTRACK_SNMP
 	depends on NETFILTER_ADVANCED
 	default NF_NAT && NF_CONNTRACK_SNMP
+	select ASN1
 	---help---
 
 	  This module implements an Application Layer Gateway (ALG) for
@@ -333,6 +344,7 @@ config IP_NF_TARGET_CLUSTERIP
 	depends on NF_CONNTRACK_IPV4
 	depends on NETFILTER_ADVANCED
 	select NF_CONNTRACK_MARK
+	select NETFILTER_FAMILY_ARP
 	help
 	  The CLUSTERIP target allows you to build load-balancing clusters of
 	  network servers without having a dedicated load-balancing
@@ -392,6 +404,7 @@ endif # IP_NF_IPTABLES
 config IP_NF_ARPTABLES
 	tristate "ARP tables support"
 	select NETFILTER_XTABLES
+	select NETFILTER_FAMILY_ARP
 	depends on NETFILTER_ADVANCED
 	help
 	  arptables is a general, extensible packet identification framework.
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index adcdae358365..2dad20eefd26 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -27,9 +27,15 @@ obj-$(CONFIG_NF_REJECT_IPV4) += nf_reject_ipv4.o
 # NAT helpers (nf_conntrack)
 obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
+
+nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o
+nf_nat_snmp_basic-y : nf_nat_snmp_basic-asn1.h nf_nat_snmp_basic-asn1.c
 obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
+clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h
+
 obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
 
+
 # NAT protocols (nf_nat)
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
 
@@ -43,6 +49,9 @@ obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
 obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
+# flow table support
+obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index eb8246c39de0..4ffe302f9b82 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -805,9 +805,8 @@ static int get_info(struct net *net, void __user *user,
 	if (compat)
 		xt_compat_lock(NFPROTO_ARP);
 #endif
-	t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
-				    "arptable_%s", name);
-	if (t) {
+	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+	if (!IS_ERR(t)) {
 		struct arpt_getinfo info;
 		const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
@@ -836,7 +835,7 @@ static int get_info(struct net *net, void __user *user,
 		xt_table_unlock(t);
 		module_put(t->me);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
 	if (compat)
 		xt_compat_unlock(NFPROTO_ARP);
@@ -861,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 
 		if (get.size == private->size)
@@ -873,7 +872,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	return ret;
 }
@@ -898,10 +897,9 @@ static int __do_replace(struct net *net, const char *name,
 		goto out;
 	}
 
-	t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
-				    "arptable_%s", name);
-	if (!t) {
-		ret = -ENOENT;
+	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free_newinfo_counters_untrans;
 	}
 
@@ -1015,8 +1013,8 @@ static int do_add_counters(struct net *net, const void __user *user,
 		return PTR_ERR(paddc);
 
 	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
-	if (!t) {
-		ret = -ENOENT;
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free;
 	}
 
@@ -1403,7 +1401,7 @@ static int compat_get_entries(struct net *net,
 
 	xt_compat_lock(NFPROTO_ARP);
 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 
@@ -1418,7 +1416,7 @@ static int compat_get_entries(struct net *net,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	xt_compat_unlock(NFPROTO_ARP);
 	return ret;
@@ -1653,7 +1651,6 @@ static int __init arp_tables_init(void)
 	if (ret < 0)
 		goto err4;
 
-	pr_info("arp_tables: (C) 2002 David S. Miller\n");
 	return 0;
 
 err4:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index cc984d0e0c69..9a71f3149507 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -968,9 +968,8 @@ static int get_info(struct net *net, void __user *user,
 	if (compat)
 		xt_compat_lock(AF_INET);
 #endif
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
-				    "iptable_%s", name);
-	if (t) {
+	t = xt_request_find_table_lock(net, AF_INET, name);
+	if (!IS_ERR(t)) {
 		struct ipt_getinfo info;
 		const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
@@ -1000,7 +999,7 @@ static int get_info(struct net *net, void __user *user,
 		xt_table_unlock(t);
 		module_put(t->me);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
 	if (compat)
 		xt_compat_unlock(AF_INET);
@@ -1025,7 +1024,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, AF_INET, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
@@ -1036,7 +1035,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	return ret;
 }
@@ -1059,10 +1058,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 		goto out;
 	}
 
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
-				    "iptable_%s", name);
-	if (!t) {
-		ret = -ENOENT;
+	t = xt_request_find_table_lock(net, AF_INET, name);
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free_newinfo_counters_untrans;
 	}
 
@@ -1176,8 +1174,8 @@ do_add_counters(struct net *net, const void __user *user,
 		return PTR_ERR(paddc);
 
 	t = xt_find_table_lock(net, AF_INET, tmp.name);
-	if (!t) {
-		ret = -ENOENT;
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free;
 	}
 
@@ -1620,7 +1618,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
 
 	xt_compat_lock(AF_INET);
 	t = xt_find_table_lock(net, AF_INET, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 		ret = compat_table_info(private, &info);
@@ -1634,7 +1632,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	xt_compat_unlock(AF_INET);
 	return ret;
@@ -1936,7 +1934,6 @@ static int __init ip_tables_init(void)
 	if (ret < 0)
 		goto err5;
 
-	pr_info("(C) 2000-2006 Netfilter Core Team\n");
 	return 0;
 
 err5:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 69060e3abe85..c29a6ca6c6d6 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -776,7 +776,6 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
 }
 
 static const struct file_operations clusterip_proc_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = clusterip_proc_open,
 	.read	 = seq_read,
 	.write	 = clusterip_proc_write,
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 7667f223d7f8..9ac92ea7b93c 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -38,12 +38,6 @@ static unsigned int
 iptable_filter_hook(void *priv, struct sk_buff *skb,
 		    const struct nf_hook_state *state)
 {
-	if (state->hook == NF_INET_LOCAL_OUT &&
-	    (skb->len < sizeof(struct iphdr) ||
-	     ip_hdrlen(skb) < sizeof(struct iphdr)))
-		/* root is playing with raw sockets. */
-		return NF_ACCEPT;
-
 	return ipt_do_table(skb, state, state->net->ipv4.iptable_filter);
 }
 
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index aebdb337fd7e..dea138ca8925 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -49,11 +49,6 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 	u_int32_t mark;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	/* Save things which could affect route */
 	mark = skb->mark;
 	iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index a1a07b338ccf..0f7255cc65ee 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -72,6 +72,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_in,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP_PRI_NAT_DST,
 	},
@@ -79,6 +80,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_out,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_NAT_SRC,
 	},
@@ -86,6 +88,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_local_fn,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP_PRI_NAT_DST,
 	},
@@ -93,6 +96,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_fn,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_NAT_SRC,
 	},
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 2642ecd2645c..960625aabf04 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/slab.h>
@@ -12,6 +13,10 @@
 
 static int __net_init iptable_raw_table_init(struct net *net);
 
+static bool raw_before_defrag __read_mostly;
+MODULE_PARM_DESC(raw_before_defrag, "Enable raw table before defrag");
+module_param(raw_before_defrag, bool, 0000);
+
 static const struct xt_table packet_raw = {
 	.name = "raw",
 	.valid_hooks =  RAW_VALID_HOOKS,
@@ -21,17 +26,20 @@ static const struct xt_table packet_raw = {
 	.table_init = iptable_raw_table_init,
 };
 
+static const struct xt_table packet_raw_before_defrag = {
+	.name = "raw",
+	.valid_hooks =  RAW_VALID_HOOKS,
+	.me = THIS_MODULE,
+	.af = NFPROTO_IPV4,
+	.priority = NF_IP_PRI_RAW_BEFORE_DEFRAG,
+	.table_init = iptable_raw_table_init,
+};
+
 /* The work comes in here from netfilter.c. */
 static unsigned int
 iptable_raw_hook(void *priv, struct sk_buff *skb,
 		 const struct nf_hook_state *state)
 {
-	if (state->hook == NF_INET_LOCAL_OUT &&
-	    (skb->len < sizeof(struct iphdr) ||
-	     ip_hdrlen(skb) < sizeof(struct iphdr)))
-		/* root is playing with raw sockets. */
-		return NF_ACCEPT;
-
 	return ipt_do_table(skb, state, state->net->ipv4.iptable_raw);
 }
 
@@ -40,15 +48,19 @@ static struct nf_hook_ops *rawtable_ops __read_mostly;
 static int __net_init iptable_raw_table_init(struct net *net)
 {
 	struct ipt_replace *repl;
+	const struct xt_table *table = &packet_raw;
 	int ret;
 
+	if (raw_before_defrag)
+		table = &packet_raw_before_defrag;
+
 	if (net->ipv4.iptable_raw)
 		return 0;
 
-	repl = ipt_alloc_initial_table(&packet_raw);
+	repl = ipt_alloc_initial_table(table);
 	if (repl == NULL)
 		return -ENOMEM;
-	ret = ipt_register_table(net, &packet_raw, repl, rawtable_ops,
+	ret = ipt_register_table(net, table, repl, rawtable_ops,
 				 &net->ipv4.iptable_raw);
 	kfree(repl);
 	return ret;
@@ -69,8 +81,15 @@ static struct pernet_operations iptable_raw_net_ops = {
 static int __init iptable_raw_init(void)
 {
 	int ret;
+	const struct xt_table *table = &packet_raw;
+
+	if (raw_before_defrag) {
+		table = &packet_raw_before_defrag;
+
+		pr_info("Enabling raw table before defrag\n");
+	}
 
-	rawtable_ops = xt_hook_ops_alloc(&packet_raw, iptable_raw_hook);
+	rawtable_ops = xt_hook_ops_alloc(table, iptable_raw_hook);
 	if (IS_ERR(rawtable_ops))
 		return PTR_ERR(rawtable_ops);
 
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index ff226596e4b5..e5379fe57b64 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -43,12 +43,6 @@ static unsigned int
 iptable_security_hook(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	if (state->hook == NF_INET_LOCAL_OUT &&
-	    (skb->len < sizeof(struct iphdr) ||
-	     ip_hdrlen(skb) < sizeof(struct iphdr)))
-		/* Somebody is playing with raw sockets. */
-		return NF_ACCEPT;
-
 	return ipt_do_table(skb, state, state->net->ipv4.iptable_security);
 }
 
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 89af9d88ca21..de213a397ea8 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -154,11 +154,6 @@ static unsigned int ipv4_conntrack_local(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
 		return NF_ACCEPT;
 
@@ -368,7 +363,7 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
 MODULE_ALIAS("ip_conntrack");
 MODULE_LICENSE("GPL");
 
-static struct nf_conntrack_l4proto *builtin_l4proto4[] = {
+static const struct nf_conntrack_l4proto * const builtin_l4proto4[] = {
 	&nf_conntrack_l4proto_tcp4,
 	&nf_conntrack_l4proto_udp4,
 	&nf_conntrack_l4proto_icmp,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1849fedd9b81..5c15beafa711 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -22,7 +22,7 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/nf_log.h>
 
-static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static const unsigned int nf_ct_icmp_timeout = 30*HZ;
 
 static inline struct nf_icmp_net *icmp_pernet(struct net *net)
 {
@@ -351,7 +351,7 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.icmp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
 {
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_ICMP,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 37fe1616ca0b..a0d3ad60a411 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -78,6 +78,8 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
 	if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
 		return NF_ACCEPT;
 #endif
+	if (skb->_nfct == IP_CT_UNTRACKED)
+		return NF_ACCEPT;
 #endif
 	/* Gather fragments. */
 	if (ip_is_fragment(ip_hdr(skb))) {
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
new file mode 100644
index 000000000000..b2d01eb25f2c
--- /dev/null
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -0,0 +1,284 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+/* For layer 4 checksum field offset. */
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
+			      __be32 addr, __be32 new_addr)
+{
+	struct tcphdr *tcph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+		return -1;
+
+	tcph = (void *)(skb_network_header(skb) + thoff);
+	inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
+
+	return 0;
+}
+
+static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
+			      __be32 addr, __be32 new_addr)
+{
+	struct udphdr *udph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
+		return -1;
+
+	udph = (void *)(skb_network_header(skb) + thoff);
+	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+		inet_proto_csum_replace4(&udph->check, skb, addr,
+					 new_addr, true);
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	}
+
+	return 0;
+}
+
+static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
+				  unsigned int thoff, __be32 addr,
+				  __be32 new_addr)
+{
+	switch (iph->protocol) {
+	case IPPROTO_TCP:
+		if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	case IPPROTO_UDP:
+		if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	}
+
+	return 0;
+}
+
+static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+			   struct iphdr *iph, unsigned int thoff,
+			   enum flow_offload_tuple_dir dir)
+{
+	__be32 addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = iph->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
+		iph->saddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = iph->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
+		iph->daddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+	csum_replace4(&iph->check, addr, new_addr);
+
+	return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+}
+
+static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+			   struct iphdr *iph, unsigned int thoff,
+			   enum flow_offload_tuple_dir dir)
+{
+	__be32 addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = iph->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
+		iph->daddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = iph->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
+		iph->saddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+}
+
+static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+			  enum flow_offload_tuple_dir dir)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	unsigned int thoff = iph->ihl * 4;
+
+	if (flow->flags & FLOW_OFFLOAD_SNAT &&
+	    (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+	     nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
+		return -1;
+	if (flow->flags & FLOW_OFFLOAD_DNAT &&
+	    (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+	     nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
+		return -1;
+
+	return 0;
+}
+
+static bool ip_has_options(unsigned int thoff)
+{
+	return thoff != sizeof(struct iphdr);
+}
+
+static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
+			    struct flow_offload_tuple *tuple)
+{
+	struct flow_ports *ports;
+	unsigned int thoff;
+	struct iphdr *iph;
+
+	if (!pskb_may_pull(skb, sizeof(*iph)))
+		return -1;
+
+	iph = ip_hdr(skb);
+	thoff = iph->ihl * 4;
+
+	if (ip_is_fragment(iph) ||
+	    unlikely(ip_has_options(thoff)))
+		return -1;
+
+	if (iph->protocol != IPPROTO_TCP &&
+	    iph->protocol != IPPROTO_UDP)
+		return -1;
+
+	thoff = iph->ihl * 4;
+	if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+		return -1;
+
+	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+
+	tuple->src_v4.s_addr	= iph->saddr;
+	tuple->dst_v4.s_addr	= iph->daddr;
+	tuple->src_port		= ports->source;
+	tuple->dst_port		= ports->dest;
+	tuple->l3proto		= AF_INET;
+	tuple->l4proto		= iph->protocol;
+	tuple->iifidx		= dev->ifindex;
+
+	return 0;
+}
+
+/* Based on ip_exceeds_mtu(). */
+static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+	if (skb->len <= mtu)
+		return false;
+
+	if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
+		return false;
+
+	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+		return false;
+
+	return true;
+}
+
+static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rtable *rt)
+{
+	u32 mtu;
+
+	mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
+	if (__nf_flow_exceeds_mtu(skb, mtu))
+		return true;
+
+	return false;
+}
+
+unsigned int
+nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+			const struct nf_hook_state *state)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct nf_flowtable *flow_table = priv;
+	struct flow_offload_tuple tuple = {};
+	enum flow_offload_tuple_dir dir;
+	struct flow_offload *flow;
+	struct net_device *outdev;
+	const struct rtable *rt;
+	struct iphdr *iph;
+	__be32 nexthop;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return NF_ACCEPT;
+
+	if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
+		return NF_ACCEPT;
+
+	tuplehash = flow_offload_lookup(flow_table, &tuple);
+	if (tuplehash == NULL)
+		return NF_ACCEPT;
+
+	outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
+	if (!outdev)
+		return NF_ACCEPT;
+
+	dir = tuplehash->tuple.dir;
+	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+	rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+	if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+		return NF_ACCEPT;
+
+	if (skb_try_make_writable(skb, sizeof(*iph)))
+		return NF_DROP;
+
+	if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+	    nf_flow_nat_ip(flow, skb, dir) < 0)
+		return NF_DROP;
+
+	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+	iph = ip_hdr(skb);
+	ip_decrease_ttl(iph);
+
+	skb->dev = outdev;
+	nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
+
+	return NF_STOLEN;
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
+
+static struct nf_flowtable_type flowtable_ipv4 = {
+	.family		= NFPROTO_IPV4,
+	.params		= &nf_flow_offload_rhash_params,
+	.gc		= nf_flow_offload_work_gc,
+	.hook		= nf_flow_offload_ip_hook,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nf_flow_ipv4_module_init(void)
+{
+	nft_register_flowtable_type(&flowtable_ipv4);
+
+	return 0;
+}
+
+static void __exit nf_flow_ipv4_module_exit(void)
+{
+	nft_unregister_flowtable_type(&flowtable_ipv4);
+}
+
+module_init(nf_flow_ipv4_module_init);
+module_exit(nf_flow_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 0443ca4120b0..f7ff6a364d7b 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -356,11 +356,6 @@ nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
 #endif
 	unsigned int ret;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -396,11 +391,6 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
 	unsigned int ret;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.asn1 b/net/ipv4/netfilter/nf_nat_snmp_basic.asn1
new file mode 100644
index 000000000000..24b73268f362
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.asn1
@@ -0,0 +1,177 @@
+Message ::=
+	SEQUENCE {
+		version
+			INTEGER ({snmp_version}),
+
+		community
+			OCTET STRING,
+
+		pdu
+			PDUs
+	}
+
+
+ObjectName ::=
+	OBJECT IDENTIFIER
+
+ObjectSyntax ::=
+	CHOICE {
+		simple
+			SimpleSyntax,
+
+		application-wide
+			ApplicationSyntax
+	}
+
+SimpleSyntax ::=
+	CHOICE {
+		integer-value
+			INTEGER,
+
+		string-value
+			OCTET STRING,
+
+		objectID-value
+			OBJECT IDENTIFIER
+	}
+
+ApplicationSyntax ::=
+	CHOICE {
+		ipAddress-value
+			IpAddress,
+
+		counter-value
+			Counter32,
+
+		timeticks-value
+			TimeTicks,
+
+		arbitrary-value
+			Opaque,
+
+		big-counter-value
+			Counter64,
+
+		unsigned-integer-value
+			Unsigned32
+	}
+
+IpAddress ::=
+	[APPLICATION 0]
+		IMPLICIT OCTET STRING OPTIONAL ({snmp_helper})
+
+Counter32 ::=
+	[APPLICATION 1]
+		IMPLICIT INTEGER OPTIONAL
+
+Unsigned32 ::=
+	[APPLICATION 2]
+		IMPLICIT INTEGER OPTIONAL
+
+Gauge32 ::= Unsigned32 OPTIONAL
+
+TimeTicks ::=
+	[APPLICATION 3]
+		IMPLICIT INTEGER OPTIONAL
+
+Opaque ::=
+	[APPLICATION 4]
+		IMPLICIT OCTET STRING OPTIONAL
+
+Counter64 ::=
+	[APPLICATION 6]
+		IMPLICIT INTEGER OPTIONAL
+
+PDUs ::=
+	CHOICE {
+		get-request
+			GetRequest-PDU,
+
+		get-next-request
+			GetNextRequest-PDU,
+
+		get-bulk-request
+			GetBulkRequest-PDU,
+
+		response
+			Response-PDU,
+
+		set-request
+			SetRequest-PDU,
+
+		inform-request
+			InformRequest-PDU,
+
+		snmpV2-trap
+			SNMPv2-Trap-PDU,
+
+		report
+			Report-PDU
+	}
+
+GetRequest-PDU ::=
+	[0] IMPLICIT PDU OPTIONAL
+
+GetNextRequest-PDU ::=
+	[1] IMPLICIT PDU OPTIONAL
+
+Response-PDU ::=
+	[2] IMPLICIT PDU OPTIONAL
+
+SetRequest-PDU ::=
+	[3] IMPLICIT PDU OPTIONAL
+
+-- [4] is obsolete
+
+GetBulkRequest-PDU ::=
+	[5] IMPLICIT PDU OPTIONAL
+
+InformRequest-PDU ::=
+	[6] IMPLICIT PDU OPTIONAL
+
+SNMPv2-Trap-PDU ::=
+	[7] IMPLICIT PDU OPTIONAL
+
+Report-PDU ::=
+	[8] IMPLICIT PDU OPTIONAL
+
+PDU ::=
+	SEQUENCE {
+		request-id
+			INTEGER,
+
+		error-status
+			INTEGER,
+
+		error-index
+			INTEGER,
+
+		variable-bindings
+			VarBindList
+	}
+
+
+VarBind ::=
+	SEQUENCE {
+		name
+			ObjectName,
+
+	CHOICE {
+		value
+			ObjectSyntax,
+
+		unSpecified
+			NULL,
+
+		noSuchObject
+			[0] IMPLICIT NULL,
+
+		noSuchInstance
+			[1] IMPLICIT NULL,
+
+		endOfMibView
+			[2] IMPLICIT NULL
+	}
+}
+
+VarBindList ::= SEQUENCE OF VarBind
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
deleted file mode 100644
index d5b1e0b3f687..000000000000
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ /dev/null
@@ -1,1286 +0,0 @@
-/*
- * nf_nat_snmp_basic.c
- *
- * Basic SNMP Application Layer Gateway
- *
- * This IP NAT module is intended for use with SNMP network
- * discovery and monitoring applications where target networks use
- * conflicting private address realms.
- *
- * Static NAT is used to remap the networks from the view of the network
- * management system at the IP layer, and this module remaps some application
- * layer addresses to match.
- *
- * The simplest form of ALG is performed, where only tagged IP addresses
- * are modified.  The module does not need to be MIB aware and only scans
- * messages at the ASN.1/BER level.
- *
- * Currently, only SNMPv1 and SNMPv2 are supported.
- *
- * More information on ALG and associated issues can be found in
- * RFC 2962
- *
- * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
- * McLean & Jochen Friedrich, stripped down for use in the kernel.
- *
- * Copyright (c) 2000 RP Internet (www.rpi.net.au).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author: James Morris <jmorris@intercode.com.au>
- *
- * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <net/checksum.h>
-#include <net/udp.h>
-
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <linux/netfilter/nf_conntrack_snmp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
-MODULE_ALIAS("ip_nat_snmp_basic");
-
-#define SNMP_PORT 161
-#define SNMP_TRAP_PORT 162
-#define NOCT1(n) (*(u8 *)(n))
-
-static int debug;
-static DEFINE_SPINLOCK(snmp_lock);
-
-/*
- * Application layer address mapping mimics the NAT mapping, but
- * only for the first octet in this case (a more flexible system
- * can be implemented if needed).
- */
-struct oct1_map
-{
-	u_int8_t from;
-	u_int8_t to;
-};
-
-
-/*****************************************************************************
- *
- * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* Class */
-#define ASN1_UNI	0	/* Universal */
-#define ASN1_APL	1	/* Application */
-#define ASN1_CTX	2	/* Context */
-#define ASN1_PRV	3	/* Private */
-
-/* Tag */
-#define ASN1_EOC	0	/* End Of Contents */
-#define ASN1_BOL	1	/* Boolean */
-#define ASN1_INT	2	/* Integer */
-#define ASN1_BTS	3	/* Bit String */
-#define ASN1_OTS	4	/* Octet String */
-#define ASN1_NUL	5	/* Null */
-#define ASN1_OJI	6	/* Object Identifier  */
-#define ASN1_OJD	7	/* Object Description */
-#define ASN1_EXT	8	/* External */
-#define ASN1_SEQ	16	/* Sequence */
-#define ASN1_SET	17	/* Set */
-#define ASN1_NUMSTR	18	/* Numerical String */
-#define ASN1_PRNSTR	19	/* Printable String */
-#define ASN1_TEXSTR	20	/* Teletext String */
-#define ASN1_VIDSTR	21	/* Video String */
-#define ASN1_IA5STR	22	/* IA5 String */
-#define ASN1_UNITIM	23	/* Universal Time */
-#define ASN1_GENTIM	24	/* General Time */
-#define ASN1_GRASTR	25	/* Graphical String */
-#define ASN1_VISSTR	26	/* Visible String */
-#define ASN1_GENSTR	27	/* General String */
-
-/* Primitive / Constructed methods*/
-#define ASN1_PRI	0	/* Primitive */
-#define ASN1_CON	1	/* Constructed */
-
-/*
- * Error codes.
- */
-#define ASN1_ERR_NOERROR		0
-#define ASN1_ERR_DEC_EMPTY		2
-#define ASN1_ERR_DEC_EOC_MISMATCH	3
-#define ASN1_ERR_DEC_LENGTH_MISMATCH	4
-#define ASN1_ERR_DEC_BADVALUE		5
-
-/*
- * ASN.1 context.
- */
-struct asn1_ctx
-{
-	int error;			/* Error condition */
-	unsigned char *pointer;		/* Octet just to be decoded */
-	unsigned char *begin;		/* First octet */
-	unsigned char *end;		/* Octet after last octet */
-};
-
-/*
- * Octet string (not null terminated)
- */
-struct asn1_octstr
-{
-	unsigned char *data;
-	unsigned int len;
-};
-
-static void asn1_open(struct asn1_ctx *ctx,
-		      unsigned char *buf,
-		      unsigned int len)
-{
-	ctx->begin = buf;
-	ctx->end = buf + len;
-	ctx->pointer = buf;
-	ctx->error = ASN1_ERR_NOERROR;
-}
-
-static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
-{
-	if (ctx->pointer >= ctx->end) {
-		ctx->error = ASN1_ERR_DEC_EMPTY;
-		return 0;
-	}
-	*ch = *(ctx->pointer)++;
-	return 1;
-}
-
-static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
-{
-	unsigned char ch;
-
-	*tag = 0;
-
-	do
-	{
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-		*tag <<= 7;
-		*tag |= ch & 0x7F;
-	} while ((ch & 0x80) == 0x80);
-	return 1;
-}
-
-static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
-				    unsigned int *cls,
-				    unsigned int *con,
-				    unsigned int *tag)
-{
-	unsigned char ch;
-
-	if (!asn1_octet_decode(ctx, &ch))
-		return 0;
-
-	*cls = (ch & 0xC0) >> 6;
-	*con = (ch & 0x20) >> 5;
-	*tag = (ch & 0x1F);
-
-	if (*tag == 0x1F) {
-		if (!asn1_tag_decode(ctx, tag))
-			return 0;
-	}
-	return 1;
-}
-
-static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
-					unsigned int *def,
-					unsigned int *len)
-{
-	unsigned char ch, cnt;
-
-	if (!asn1_octet_decode(ctx, &ch))
-		return 0;
-
-	if (ch == 0x80)
-		*def = 0;
-	else {
-		*def = 1;
-
-		if (ch < 0x80)
-			*len = ch;
-		else {
-			cnt = ch & 0x7F;
-			*len = 0;
-
-			while (cnt > 0) {
-				if (!asn1_octet_decode(ctx, &ch))
-					return 0;
-				*len <<= 8;
-				*len |= ch;
-				cnt--;
-			}
-		}
-	}
-
-	/* don't trust len bigger than ctx buffer */
-	if (*len > ctx->end - ctx->pointer)
-		return 0;
-
-	return 1;
-}
-
-static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
-					unsigned char **eoc,
-					unsigned int *cls,
-					unsigned int *con,
-					unsigned int *tag)
-{
-	unsigned int def, len;
-
-	if (!asn1_id_decode(ctx, cls, con, tag))
-		return 0;
-
-	def = len = 0;
-	if (!asn1_length_decode(ctx, &def, &len))
-		return 0;
-
-	/* primitive shall be definite, indefinite shall be constructed */
-	if (*con == ASN1_PRI && !def)
-		return 0;
-
-	if (def)
-		*eoc = ctx->pointer + len;
-	else
-		*eoc = NULL;
-	return 1;
-}
-
-static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
-{
-	unsigned char ch;
-
-	if (eoc == NULL) {
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-
-		if (ch != 0x00) {
-			ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
-			return 0;
-		}
-
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-
-		if (ch != 0x00) {
-			ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
-			return 0;
-		}
-		return 1;
-	} else {
-		if (ctx->pointer != eoc) {
-			ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH;
-			return 0;
-		}
-		return 1;
-	}
-}
-
-static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
-{
-	ctx->pointer = eoc;
-	return 1;
-}
-
-static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
-				      unsigned char *eoc,
-				      long *integer)
-{
-	unsigned char ch;
-	unsigned int  len;
-
-	if (!asn1_octet_decode(ctx, &ch))
-		return 0;
-
-	*integer = (signed char) ch;
-	len = 1;
-
-	while (ctx->pointer < eoc) {
-		if (++len > sizeof (long)) {
-			ctx->error = ASN1_ERR_DEC_BADVALUE;
-			return 0;
-		}
-
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-
-		*integer <<= 8;
-		*integer |= ch;
-	}
-	return 1;
-}
-
-static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
-				      unsigned char *eoc,
-				      unsigned int *integer)
-{
-	unsigned char ch;
-	unsigned int  len;
-
-	if (!asn1_octet_decode(ctx, &ch))
-		return 0;
-
-	*integer = ch;
-	if (ch == 0) len = 0;
-	else len = 1;
-
-	while (ctx->pointer < eoc) {
-		if (++len > sizeof (unsigned int)) {
-			ctx->error = ASN1_ERR_DEC_BADVALUE;
-			return 0;
-		}
-
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-
-		*integer <<= 8;
-		*integer |= ch;
-	}
-	return 1;
-}
-
-static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
-				       unsigned char *eoc,
-				       unsigned long *integer)
-{
-	unsigned char ch;
-	unsigned int  len;
-
-	if (!asn1_octet_decode(ctx, &ch))
-		return 0;
-
-	*integer = ch;
-	if (ch == 0) len = 0;
-	else len = 1;
-
-	while (ctx->pointer < eoc) {
-		if (++len > sizeof (unsigned long)) {
-			ctx->error = ASN1_ERR_DEC_BADVALUE;
-			return 0;
-		}
-
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-
-		*integer <<= 8;
-		*integer |= ch;
-	}
-	return 1;
-}
-
-static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
-					unsigned char *eoc,
-					unsigned char **octets,
-					unsigned int *len)
-{
-	unsigned char *ptr;
-
-	*len = 0;
-
-	*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
-	if (*octets == NULL)
-		return 0;
-
-	ptr = *octets;
-	while (ctx->pointer < eoc) {
-		if (!asn1_octet_decode(ctx, ptr++)) {
-			kfree(*octets);
-			*octets = NULL;
-			return 0;
-		}
-		(*len)++;
-	}
-	return 1;
-}
-
-static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
-				       unsigned long *subid)
-{
-	unsigned char ch;
-
-	*subid = 0;
-
-	do {
-		if (!asn1_octet_decode(ctx, &ch))
-			return 0;
-
-		*subid <<= 7;
-		*subid |= ch & 0x7F;
-	} while ((ch & 0x80) == 0x80);
-	return 1;
-}
-
-static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
-				     unsigned char *eoc,
-				     unsigned long **oid,
-				     unsigned int *len)
-{
-	unsigned long subid;
-	unsigned long *optr;
-	size_t size;
-
-	size = eoc - ctx->pointer + 1;
-
-	/* first subid actually encodes first two subids */
-	if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
-		return 0;
-
-	*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
-	if (*oid == NULL)
-		return 0;
-
-	optr = *oid;
-
-	if (!asn1_subid_decode(ctx, &subid)) {
-		kfree(*oid);
-		*oid = NULL;
-		return 0;
-	}
-
-	if (subid < 40) {
-		optr[0] = 0;
-		optr[1] = subid;
-	} else if (subid < 80) {
-		optr[0] = 1;
-		optr[1] = subid - 40;
-	} else {
-		optr[0] = 2;
-		optr[1] = subid - 80;
-	}
-
-	*len = 2;
-	optr += 2;
-
-	while (ctx->pointer < eoc) {
-		if (++(*len) > size) {
-			ctx->error = ASN1_ERR_DEC_BADVALUE;
-			kfree(*oid);
-			*oid = NULL;
-			return 0;
-		}
-
-		if (!asn1_subid_decode(ctx, optr++)) {
-			kfree(*oid);
-			*oid = NULL;
-			return 0;
-		}
-	}
-	return 1;
-}
-
-/*****************************************************************************
- *
- * SNMP decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* SNMP Versions */
-#define SNMP_V1				0
-#define SNMP_V2C			1
-#define SNMP_V2				2
-#define SNMP_V3				3
-
-/* Default Sizes */
-#define SNMP_SIZE_COMM			256
-#define SNMP_SIZE_OBJECTID		128
-#define SNMP_SIZE_BUFCHR		256
-#define SNMP_SIZE_BUFINT		128
-#define SNMP_SIZE_SMALLOBJECTID		16
-
-/* Requests */
-#define SNMP_PDU_GET			0
-#define SNMP_PDU_NEXT			1
-#define SNMP_PDU_RESPONSE		2
-#define SNMP_PDU_SET			3
-#define SNMP_PDU_TRAP1			4
-#define SNMP_PDU_BULK			5
-#define SNMP_PDU_INFORM			6
-#define SNMP_PDU_TRAP2			7
-
-/* Errors */
-#define SNMP_NOERROR			0
-#define SNMP_TOOBIG			1
-#define SNMP_NOSUCHNAME			2
-#define SNMP_BADVALUE			3
-#define SNMP_READONLY			4
-#define SNMP_GENERROR			5
-#define SNMP_NOACCESS			6
-#define SNMP_WRONGTYPE			7
-#define SNMP_WRONGLENGTH		8
-#define SNMP_WRONGENCODING		9
-#define SNMP_WRONGVALUE			10
-#define SNMP_NOCREATION			11
-#define SNMP_INCONSISTENTVALUE		12
-#define SNMP_RESOURCEUNAVAILABLE	13
-#define SNMP_COMMITFAILED		14
-#define SNMP_UNDOFAILED			15
-#define SNMP_AUTHORIZATIONERROR		16
-#define SNMP_NOTWRITABLE		17
-#define SNMP_INCONSISTENTNAME		18
-
-/* General SNMP V1 Traps */
-#define SNMP_TRAP_COLDSTART		0
-#define SNMP_TRAP_WARMSTART		1
-#define SNMP_TRAP_LINKDOWN		2
-#define SNMP_TRAP_LINKUP		3
-#define SNMP_TRAP_AUTFAILURE		4
-#define SNMP_TRAP_EQPNEIGHBORLOSS	5
-#define SNMP_TRAP_ENTSPECIFIC		6
-
-/* SNMPv1 Types */
-#define SNMP_NULL                0
-#define SNMP_INTEGER             1    /* l  */
-#define SNMP_OCTETSTR            2    /* c  */
-#define SNMP_DISPLAYSTR          2    /* c  */
-#define SNMP_OBJECTID            3    /* ul */
-#define SNMP_IPADDR              4    /* uc */
-#define SNMP_COUNTER             5    /* ul */
-#define SNMP_GAUGE               6    /* ul */
-#define SNMP_TIMETICKS           7    /* ul */
-#define SNMP_OPAQUE              8    /* c  */
-
-/* Additional SNMPv2 Types */
-#define SNMP_UINTEGER            5    /* ul */
-#define SNMP_BITSTR              9    /* uc */
-#define SNMP_NSAP               10    /* uc */
-#define SNMP_COUNTER64          11    /* ul */
-#define SNMP_NOSUCHOBJECT       12
-#define SNMP_NOSUCHINSTANCE     13
-#define SNMP_ENDOFMIBVIEW       14
-
-union snmp_syntax
-{
-	unsigned char uc[0];	/* 8 bit unsigned */
-	char c[0];		/* 8 bit signed */
-	unsigned long ul[0];	/* 32 bit unsigned */
-	long l[0];		/* 32 bit signed */
-};
-
-struct snmp_object
-{
-	unsigned long *id;
-	unsigned int id_len;
-	unsigned short type;
-	unsigned int syntax_len;
-	union snmp_syntax syntax;
-};
-
-struct snmp_request
-{
-	unsigned long id;
-	unsigned int error_status;
-	unsigned int error_index;
-};
-
-struct snmp_v1_trap
-{
-	unsigned long *id;
-	unsigned int id_len;
-	unsigned long ip_address;	/* pointer  */
-	unsigned int general;
-	unsigned int specific;
-	unsigned long time;
-};
-
-/* SNMP types */
-#define SNMP_IPA    0
-#define SNMP_CNT    1
-#define SNMP_GGE    2
-#define SNMP_TIT    3
-#define SNMP_OPQ    4
-#define SNMP_C64    6
-
-/* SNMP errors */
-#define SERR_NSO    0
-#define SERR_NSI    1
-#define SERR_EOM    2
-
-static inline void mangle_address(unsigned char *begin,
-				  unsigned char *addr,
-				  const struct oct1_map *map,
-				  __sum16 *check);
-struct snmp_cnv
-{
-	unsigned int class;
-	unsigned int tag;
-	int syntax;
-};
-
-static const struct snmp_cnv snmp_conv[] = {
-	{ASN1_UNI, ASN1_NUL, SNMP_NULL},
-	{ASN1_UNI, ASN1_INT, SNMP_INTEGER},
-	{ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
-	{ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR},
-	{ASN1_UNI, ASN1_OJI, SNMP_OBJECTID},
-	{ASN1_APL, SNMP_IPA, SNMP_IPADDR},
-	{ASN1_APL, SNMP_CNT, SNMP_COUNTER},	/* Counter32 */
-	{ASN1_APL, SNMP_GGE, SNMP_GAUGE},	/* Gauge32 == Unsigned32  */
-	{ASN1_APL, SNMP_TIT, SNMP_TIMETICKS},
-	{ASN1_APL, SNMP_OPQ, SNMP_OPAQUE},
-
-	/* SNMPv2 data types and errors */
-	{ASN1_UNI, ASN1_BTS, SNMP_BITSTR},
-	{ASN1_APL, SNMP_C64, SNMP_COUNTER64},
-	{ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT},
-	{ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE},
-	{ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW},
-	{0,       0,       -1}
-};
-
-static unsigned char snmp_tag_cls2syntax(unsigned int tag,
-					 unsigned int cls,
-					 unsigned short *syntax)
-{
-	const struct snmp_cnv *cnv;
-
-	cnv = snmp_conv;
-
-	while (cnv->syntax != -1) {
-		if (cnv->tag == tag && cnv->class == cls) {
-			*syntax = cnv->syntax;
-			return 1;
-		}
-		cnv++;
-	}
-	return 0;
-}
-
-static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
-					struct snmp_object **obj)
-{
-	unsigned int cls, con, tag, len, idlen;
-	unsigned short type;
-	unsigned char *eoc, *end, *p;
-	unsigned long *lp, *id;
-	unsigned long ul;
-	long l;
-
-	*obj = NULL;
-	id = NULL;
-
-	if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
-		return 0;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
-		return 0;
-
-	if (!asn1_oid_decode(ctx, end, &id, &idlen))
-		return 0;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) {
-		kfree(id);
-		return 0;
-	}
-
-	if (con != ASN1_PRI) {
-		kfree(id);
-		return 0;
-	}
-
-	type = 0;
-	if (!snmp_tag_cls2syntax(tag, cls, &type)) {
-		kfree(id);
-		return 0;
-	}
-
-	l = 0;
-	switch (type) {
-	case SNMP_INTEGER:
-		len = sizeof(long);
-		if (!asn1_long_decode(ctx, end, &l)) {
-			kfree(id);
-			return 0;
-		}
-		*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-		if (*obj == NULL) {
-			kfree(id);
-			return 0;
-		}
-		(*obj)->syntax.l[0] = l;
-		break;
-	case SNMP_OCTETSTR:
-	case SNMP_OPAQUE:
-		if (!asn1_octets_decode(ctx, end, &p, &len)) {
-			kfree(id);
-			return 0;
-		}
-		*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-		if (*obj == NULL) {
-			kfree(p);
-			kfree(id);
-			return 0;
-		}
-		memcpy((*obj)->syntax.c, p, len);
-		kfree(p);
-		break;
-	case SNMP_NULL:
-	case SNMP_NOSUCHOBJECT:
-	case SNMP_NOSUCHINSTANCE:
-	case SNMP_ENDOFMIBVIEW:
-		len = 0;
-		*obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
-		if (*obj == NULL) {
-			kfree(id);
-			return 0;
-		}
-		if (!asn1_null_decode(ctx, end)) {
-			kfree(id);
-			kfree(*obj);
-			*obj = NULL;
-			return 0;
-		}
-		break;
-	case SNMP_OBJECTID:
-		if (!asn1_oid_decode(ctx, end, &lp, &len)) {
-			kfree(id);
-			return 0;
-		}
-		len *= sizeof(unsigned long);
-		*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-		if (*obj == NULL) {
-			kfree(lp);
-			kfree(id);
-			return 0;
-		}
-		memcpy((*obj)->syntax.ul, lp, len);
-		kfree(lp);
-		break;
-	case SNMP_IPADDR:
-		if (!asn1_octets_decode(ctx, end, &p, &len)) {
-			kfree(id);
-			return 0;
-		}
-		if (len != 4) {
-			kfree(p);
-			kfree(id);
-			return 0;
-		}
-		*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-		if (*obj == NULL) {
-			kfree(p);
-			kfree(id);
-			return 0;
-		}
-		memcpy((*obj)->syntax.uc, p, len);
-		kfree(p);
-		break;
-	case SNMP_COUNTER:
-	case SNMP_GAUGE:
-	case SNMP_TIMETICKS:
-		len = sizeof(unsigned long);
-		if (!asn1_ulong_decode(ctx, end, &ul)) {
-			kfree(id);
-			return 0;
-		}
-		*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-		if (*obj == NULL) {
-			kfree(id);
-			return 0;
-		}
-		(*obj)->syntax.ul[0] = ul;
-		break;
-	default:
-		kfree(id);
-		return 0;
-	}
-
-	(*obj)->syntax_len = len;
-	(*obj)->type = type;
-	(*obj)->id = id;
-	(*obj)->id_len = idlen;
-
-	if (!asn1_eoc_decode(ctx, eoc)) {
-		kfree(id);
-		kfree(*obj);
-		*obj = NULL;
-		return 0;
-	}
-	return 1;
-}
-
-static unsigned char noinline_for_stack
-snmp_request_decode(struct asn1_ctx *ctx, struct snmp_request *request)
-{
-	unsigned int cls, con, tag;
-	unsigned char *end;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-		return 0;
-
-	if (!asn1_ulong_decode(ctx, end, &request->id))
-		return 0;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-		return 0;
-
-	if (!asn1_uint_decode(ctx, end, &request->error_status))
-		return 0;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-		return 0;
-
-	if (!asn1_uint_decode(ctx, end, &request->error_index))
-		return 0;
-
-	return 1;
-}
-
-/*
- * Fast checksum update for possibly oddly-aligned UDP byte, from the
- * code example in the draft.
- */
-static void fast_csum(__sum16 *csum,
-		      const unsigned char *optr,
-		      const unsigned char *nptr,
-		      int offset)
-{
-	unsigned char s[4];
-
-	if (offset & 1) {
-		s[0] = ~0;
-		s[1] = ~*optr;
-		s[2] = 0;
-		s[3] = *nptr;
-	} else {
-		s[0] = ~*optr;
-		s[1] = ~0;
-		s[2] = *nptr;
-		s[3] = 0;
-	}
-
-	*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
-}
-
-/*
- * Mangle IP address.
- * 	- begin points to the start of the snmp messgae
- *      - addr points to the start of the address
- */
-static inline void mangle_address(unsigned char *begin,
-				  unsigned char *addr,
-				  const struct oct1_map *map,
-				  __sum16 *check)
-{
-	if (map->from == NOCT1(addr)) {
-		u_int32_t old;
-
-		if (debug)
-			memcpy(&old, addr, sizeof(old));
-
-		*addr = map->to;
-
-		/* Update UDP checksum if being used */
-		if (*check) {
-			fast_csum(check,
-				  &map->from, &map->to, addr - begin);
-
-		}
-
-		if (debug)
-			printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n",
-			       &old, addr);
-	}
-}
-
-static unsigned char noinline_for_stack
-snmp_trap_decode(struct asn1_ctx *ctx, struct snmp_v1_trap *trap,
-		 const struct oct1_map *map,
-		 __sum16 *check)
-{
-	unsigned int cls, con, tag, len;
-	unsigned char *end;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
-		return 0;
-
-	if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len))
-		return 0;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		goto err_id_free;
-
-	if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) ||
-	      (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS)))
-		goto err_id_free;
-
-	if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len))
-		goto err_id_free;
-
-	/* IPv4 only */
-	if (len != 4)
-		goto err_addr_free;
-
-	mangle_address(ctx->begin, ctx->pointer - 4, map, check);
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		goto err_addr_free;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-		goto err_addr_free;
-
-	if (!asn1_uint_decode(ctx, end, &trap->general))
-		goto err_addr_free;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		goto err_addr_free;
-
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-		goto err_addr_free;
-
-	if (!asn1_uint_decode(ctx, end, &trap->specific))
-		goto err_addr_free;
-
-	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-		goto err_addr_free;
-
-	if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) ||
-	      (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT)))
-		goto err_addr_free;
-
-	if (!asn1_ulong_decode(ctx, end, &trap->time))
-		goto err_addr_free;
-
-	return 1;
-
-err_addr_free:
-	kfree((unsigned long *)trap->ip_address);
-
-err_id_free:
-	kfree(trap->id);
-
-	return 0;
-}
-
-/*****************************************************************************
- *
- * Misc. routines
- *
- *****************************************************************************/
-
-/*
- * Parse and mangle SNMP message according to mapping.
- * (And this is the fucking 'basic' method).
- */
-static int snmp_parse_mangle(unsigned char *msg,
-			     u_int16_t len,
-			     const struct oct1_map *map,
-			     __sum16 *check)
-{
-	unsigned char *eoc, *end;
-	unsigned int cls, con, tag, vers, pdutype;
-	struct asn1_ctx ctx;
-	struct asn1_octstr comm;
-	struct snmp_object *obj;
-
-	if (debug > 1)
-		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1,
-			       msg, len, 0);
-
-	asn1_open(&ctx, msg, len);
-
-	/*
-	 * Start of SNMP message.
-	 */
-	if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
-		return 0;
-	if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
-		return 0;
-
-	/*
-	 * Version 1 or 2 handled.
-	 */
-	if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag))
-		return 0;
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-		return 0;
-	if (!asn1_uint_decode (&ctx, end, &vers))
-		return 0;
-	if (debug > 1)
-		pr_debug("bsalg: snmp version: %u\n", vers + 1);
-	if (vers > 1)
-		return 1;
-
-	/*
-	 * Community.
-	 */
-	if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag))
-		return 0;
-	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS)
-		return 0;
-	if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len))
-		return 0;
-	if (debug > 1) {
-		unsigned int i;
-
-		pr_debug("bsalg: community: ");
-		for (i = 0; i < comm.len; i++)
-			pr_cont("%c", comm.data[i]);
-		pr_cont("\n");
-	}
-	kfree(comm.data);
-
-	/*
-	 * PDU type
-	 */
-	if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype))
-		return 0;
-	if (cls != ASN1_CTX || con != ASN1_CON)
-		return 0;
-	if (debug > 1) {
-		static const unsigned char *const pdus[] = {
-			[SNMP_PDU_GET] = "get",
-			[SNMP_PDU_NEXT] = "get-next",
-			[SNMP_PDU_RESPONSE] = "response",
-			[SNMP_PDU_SET] = "set",
-			[SNMP_PDU_TRAP1] = "trapv1",
-			[SNMP_PDU_BULK] = "bulk",
-			[SNMP_PDU_INFORM] = "inform",
-			[SNMP_PDU_TRAP2] = "trapv2"
-		};
-
-		if (pdutype > SNMP_PDU_TRAP2)
-			pr_debug("bsalg: bad pdu type %u\n", pdutype);
-		else
-			pr_debug("bsalg: pdu: %s\n", pdus[pdutype]);
-	}
-	if (pdutype != SNMP_PDU_RESPONSE &&
-	    pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
-		return 1;
-
-	/*
-	 * Request header or v1 trap
-	 */
-	if (pdutype == SNMP_PDU_TRAP1) {
-		struct snmp_v1_trap trap;
-		unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
-
-		if (ret) {
-			kfree(trap.id);
-			kfree((unsigned long *)trap.ip_address);
-		} else
-			return ret;
-
-	} else {
-		struct snmp_request req;
-
-		if (!snmp_request_decode(&ctx, &req))
-			return 0;
-
-		if (debug > 1)
-			pr_debug("bsalg: request: id=0x%lx error_status=%u "
-			"error_index=%u\n", req.id, req.error_status,
-			req.error_index);
-	}
-
-	/*
-	 * Loop through objects, look for IP addresses to mangle.
-	 */
-	if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
-		return 0;
-
-	if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
-		return 0;
-
-	while (!asn1_eoc_decode(&ctx, eoc)) {
-		unsigned int i;
-
-		if (!snmp_object_decode(&ctx, &obj)) {
-			if (obj) {
-				kfree(obj->id);
-				kfree(obj);
-			}
-			return 0;
-		}
-
-		if (debug > 1) {
-			pr_debug("bsalg: object: ");
-			for (i = 0; i < obj->id_len; i++) {
-				if (i > 0)
-					pr_cont(".");
-				pr_cont("%lu", obj->id[i]);
-			}
-			pr_cont(": type=%u\n", obj->type);
-
-		}
-
-		if (obj->type == SNMP_IPADDR)
-			mangle_address(ctx.begin, ctx.pointer - 4, map, check);
-
-		kfree(obj->id);
-		kfree(obj);
-	}
-
-	if (!asn1_eoc_decode(&ctx, eoc))
-		return 0;
-
-	return 1;
-}
-
-/*****************************************************************************
- *
- * NAT routines.
- *
- *****************************************************************************/
-
-/*
- * SNMP translation routine.
- */
-static int snmp_translate(struct nf_conn *ct,
-			  enum ip_conntrack_info ctinfo,
-			  struct sk_buff *skb)
-{
-	struct iphdr *iph = ip_hdr(skb);
-	struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
-	u_int16_t udplen = ntohs(udph->len);
-	u_int16_t paylen = udplen - sizeof(struct udphdr);
-	int dir = CTINFO2DIR(ctinfo);
-	struct oct1_map map;
-
-	/*
-	 * Determine mappping for application layer addresses based
-	 * on NAT manipulations for the packet.
-	 */
-	if (dir == IP_CT_DIR_ORIGINAL) {
-		/* SNAT traps */
-		map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
-		map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
-	} else {
-		/* DNAT replies */
-		map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip);
-		map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip);
-	}
-
-	if (map.from == map.to)
-		return NF_ACCEPT;
-
-	if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
-			       paylen, &map, &udph->check)) {
-		net_warn_ratelimited("bsalg: parser failed\n");
-		return NF_DROP;
-	}
-	return NF_ACCEPT;
-}
-
-/* We don't actually set up expectations, just adjust internal IP
- * addresses if this is being NATted */
-static int help(struct sk_buff *skb, unsigned int protoff,
-		struct nf_conn *ct,
-		enum ip_conntrack_info ctinfo)
-{
-	int dir = CTINFO2DIR(ctinfo);
-	unsigned int ret;
-	const struct iphdr *iph = ip_hdr(skb);
-	const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
-
-	/* SNMP replies and originating SNMP traps get mangled */
-	if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
-		return NF_ACCEPT;
-	if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
-		return NF_ACCEPT;
-
-	/* No NAT? */
-	if (!(ct->status & IPS_NAT_MASK))
-		return NF_ACCEPT;
-
-	/*
-	 * Make sure the packet length is ok.  So far, we were only guaranteed
-	 * to have a valid length IP header plus 8 bytes, which means we have
-	 * enough room for a UDP header.  Just verify the UDP length field so we
-	 * can mess around with the payload.
-	 */
-	if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
-		net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
-				     &iph->saddr, &iph->daddr);
-		 return NF_DROP;
-	}
-
-	if (!skb_make_writable(skb, skb->len))
-		return NF_DROP;
-
-	spin_lock_bh(&snmp_lock);
-	ret = snmp_translate(ct, ctinfo, skb);
-	spin_unlock_bh(&snmp_lock);
-	return ret;
-}
-
-static const struct nf_conntrack_expect_policy snmp_exp_policy = {
-	.max_expected	= 0,
-	.timeout	= 180,
-};
-
-static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
-	.me			= THIS_MODULE,
-	.help			= help,
-	.expect_policy		= &snmp_exp_policy,
-	.name			= "snmp_trap",
-	.tuple.src.l3num	= AF_INET,
-	.tuple.src.u.udp.port	= cpu_to_be16(SNMP_TRAP_PORT),
-	.tuple.dst.protonum	= IPPROTO_UDP,
-};
-
-/*****************************************************************************
- *
- * Module stuff.
- *
- *****************************************************************************/
-
-static int __init nf_nat_snmp_basic_init(void)
-{
-	BUG_ON(nf_nat_snmp_hook != NULL);
-	RCU_INIT_POINTER(nf_nat_snmp_hook, help);
-
-	return nf_conntrack_helper_register(&snmp_trap_helper);
-}
-
-static void __exit nf_nat_snmp_basic_fini(void)
-{
-	RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
-	synchronize_rcu();
-	nf_conntrack_helper_unregister(&snmp_trap_helper);
-}
-
-module_init(nf_nat_snmp_basic_init);
-module_exit(nf_nat_snmp_basic_fini);
-
-module_param(debug, int, 0600);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
new file mode 100644
index 000000000000..b6e277093e7e
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -0,0 +1,235 @@
+/*
+ * nf_nat_snmp_basic.c
+ *
+ * Basic SNMP Application Layer Gateway
+ *
+ * This IP NAT module is intended for use with SNMP network
+ * discovery and monitoring applications where target networks use
+ * conflicting private address realms.
+ *
+ * Static NAT is used to remap the networks from the view of the network
+ * management system at the IP layer, and this module remaps some application
+ * layer addresses to match.
+ *
+ * The simplest form of ALG is performed, where only tagged IP addresses
+ * are modified.  The module does not need to be MIB aware and only scans
+ * messages at the ASN.1/BER level.
+ *
+ * Currently, only SNMPv1 and SNMPv2 are supported.
+ *
+ * More information on ALG and associated issues can be found in
+ * RFC 2962
+ *
+ * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
+ * McLean & Jochen Friedrich, stripped down for use in the kernel.
+ *
+ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: James Morris <jmorris@intercode.com.au>
+ *
+ * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
+#include "nf_nat_snmp_basic-asn1.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
+MODULE_ALIAS("ip_nat_snmp_basic");
+
+#define SNMP_PORT 161
+#define SNMP_TRAP_PORT 162
+
+static DEFINE_SPINLOCK(snmp_lock);
+
+struct snmp_ctx {
+	unsigned char *begin;
+	__sum16 *check;
+	__be32 from;
+	__be32 to;
+};
+
+static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
+{
+	unsigned char s[12] = {0,};
+	int size;
+
+	if (offset & 1) {
+		memcpy(&s[1], &ctx->from, 4);
+		memcpy(&s[7], &ctx->to, 4);
+		s[0] = ~0;
+		s[1] = ~s[1];
+		s[2] = ~s[2];
+		s[3] = ~s[3];
+		s[4] = ~s[4];
+		s[5] = ~0;
+		size = 12;
+	} else {
+		memcpy(&s[0], &ctx->from, 4);
+		memcpy(&s[4], &ctx->to, 4);
+		s[0] = ~s[0];
+		s[1] = ~s[1];
+		s[2] = ~s[2];
+		s[3] = ~s[3];
+		size = 8;
+	}
+	*ctx->check = csum_fold(csum_partial(s, size,
+					     ~csum_unfold(*ctx->check)));
+}
+
+int snmp_version(void *context, size_t hdrlen, unsigned char tag,
+		 const void *data, size_t datalen)
+{
+	if (*(unsigned char *)data > 1)
+		return -ENOTSUPP;
+	return 1;
+}
+
+int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
+		const void *data, size_t datalen)
+{
+	struct snmp_ctx *ctx = (struct snmp_ctx *)context;
+	__be32 *pdata = (__be32 *)data;
+
+	if (*pdata == ctx->from) {
+		pr_debug("%s: %pI4 to %pI4\n", __func__,
+			 (void *)&ctx->from, (void *)&ctx->to);
+
+		if (*ctx->check)
+			fast_csum(ctx, (unsigned char *)data - ctx->begin);
+		*pdata = ctx->to;
+	}
+
+	return 1;
+}
+
+static int snmp_translate(struct nf_conn *ct, int dir, struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
+	u16 datalen = ntohs(udph->len) - sizeof(struct udphdr);
+	char *data = (unsigned char *)udph + sizeof(struct udphdr);
+	struct snmp_ctx ctx;
+	int ret;
+
+	if (dir == IP_CT_DIR_ORIGINAL) {
+		ctx.from = ct->tuplehash[dir].tuple.src.u3.ip;
+		ctx.to = ct->tuplehash[!dir].tuple.dst.u3.ip;
+	} else {
+		ctx.from = ct->tuplehash[!dir].tuple.src.u3.ip;
+		ctx.to = ct->tuplehash[dir].tuple.dst.u3.ip;
+	}
+
+	if (ctx.from == ctx.to)
+		return NF_ACCEPT;
+
+	ctx.begin = (unsigned char *)udph + sizeof(struct udphdr);
+	ctx.check = &udph->check;
+	ret = asn1_ber_decoder(&nf_nat_snmp_basic_decoder, &ctx, data, datalen);
+	if (ret < 0) {
+		nf_ct_helper_log(skb, ct, "parser failed\n");
+		return NF_DROP;
+	}
+
+	return NF_ACCEPT;
+}
+
+/* We don't actually set up expectations, just adjust internal IP
+ * addresses if this is being NATted
+ */
+static int help(struct sk_buff *skb, unsigned int protoff,
+		struct nf_conn *ct,
+		enum ip_conntrack_info ctinfo)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	unsigned int ret;
+	const struct iphdr *iph = ip_hdr(skb);
+	const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
+
+	/* SNMP replies and originating SNMP traps get mangled */
+	if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
+		return NF_ACCEPT;
+	if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
+		return NF_ACCEPT;
+
+	/* No NAT? */
+	if (!(ct->status & IPS_NAT_MASK))
+		return NF_ACCEPT;
+
+	/* Make sure the packet length is ok.  So far, we were only guaranteed
+	 * to have a valid length IP header plus 8 bytes, which means we have
+	 * enough room for a UDP header.  Just verify the UDP length field so we
+	 * can mess around with the payload.
+	 */
+	if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
+		nf_ct_helper_log(skb, ct, "dropping malformed packet\n");
+		return NF_DROP;
+	}
+
+	if (!skb_make_writable(skb, skb->len)) {
+		nf_ct_helper_log(skb, ct, "cannot mangle packet");
+		return NF_DROP;
+	}
+
+	spin_lock_bh(&snmp_lock);
+	ret = snmp_translate(ct, dir, skb);
+	spin_unlock_bh(&snmp_lock);
+	return ret;
+}
+
+static const struct nf_conntrack_expect_policy snmp_exp_policy = {
+	.max_expected	= 0,
+	.timeout	= 180,
+};
+
+static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+	.me			= THIS_MODULE,
+	.help			= help,
+	.expect_policy		= &snmp_exp_policy,
+	.name			= "snmp_trap",
+	.tuple.src.l3num	= AF_INET,
+	.tuple.src.u.udp.port	= cpu_to_be16(SNMP_TRAP_PORT),
+	.tuple.dst.protonum	= IPPROTO_UDP,
+};
+
+static int __init nf_nat_snmp_basic_init(void)
+{
+	BUG_ON(nf_nat_snmp_hook != NULL);
+	RCU_INIT_POINTER(nf_nat_snmp_hook, help);
+
+	return nf_conntrack_helper_register(&snmp_trap_helper);
+}
+
+static void __exit nf_nat_snmp_basic_fini(void)
+{
+	RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+	synchronize_rcu();
+	nf_conntrack_helper_unregister(&snmp_trap_helper);
+}
+
+module_init(nf_nat_snmp_basic_init);
+module_exit(nf_nat_snmp_basic_fini);
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 4bbc273b45e8..036c074736b0 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -21,51 +21,12 @@ nft_do_chain_arp(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_unspec(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_unspec(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static struct nft_af_info nft_af_arp __read_mostly = {
-	.family		= NFPROTO_ARP,
-	.nhooks		= NF_ARP_NUMHOOKS,
-	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_ARP_IN]		= nft_do_chain_arp,
-		[NF_ARP_OUT]		= nft_do_chain_arp,
-		[NF_ARP_FORWARD]	= nft_do_chain_arp,
-	},
-};
-
-static int nf_tables_arp_init_net(struct net *net)
-{
-	net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
-	if (net->nft.arp== NULL)
-		return -ENOMEM;
-
-	memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
-
-	if (nft_register_afinfo(net, net->nft.arp) < 0)
-		goto err;
-
-	return 0;
-err:
-	kfree(net->nft.arp);
-	return -ENOMEM;
-}
-
-static void nf_tables_arp_exit_net(struct net *net)
-{
-	nft_unregister_afinfo(net, net->nft.arp);
-	kfree(net->nft.arp);
-}
-
-static struct pernet_operations nf_tables_arp_net_ops = {
-	.init   = nf_tables_arp_init_net,
-	.exit   = nf_tables_arp_exit_net,
-};
-
 static const struct nf_chain_type filter_arp = {
 	.name		= "filter",
 	.type		= NFT_CHAIN_T_DEFAULT,
@@ -73,26 +34,19 @@ static const struct nf_chain_type filter_arp = {
 	.owner		= THIS_MODULE,
 	.hook_mask	= (1 << NF_ARP_IN) |
 			  (1 << NF_ARP_OUT),
+	.hooks		= {
+		[NF_ARP_IN]		= nft_do_chain_arp,
+		[NF_ARP_OUT]		= nft_do_chain_arp,
+	},
 };
 
 static int __init nf_tables_arp_init(void)
 {
-	int ret;
-
-	ret = nft_register_chain_type(&filter_arp);
-	if (ret < 0)
-		return ret;
-
-	ret = register_pernet_subsys(&nf_tables_arp_net_ops);
-	if (ret < 0)
-		nft_unregister_chain_type(&filter_arp);
-
-	return ret;
+	return nft_register_chain_type(&filter_arp);
 }
 
 static void __exit nf_tables_arp_exit(void)
 {
-	unregister_pernet_subsys(&nf_tables_arp_net_ops);
 	nft_unregister_chain_type(&filter_arp);
 }
 
@@ -101,4 +55,4 @@ module_exit(nf_tables_arp_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
+MODULE_ALIAS_NFT_CHAIN(3, "filter"); /* NFPROTO_ARP */
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 2840a29b2e04..96f955496d5f 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -24,69 +24,12 @@ static unsigned int nft_do_chain_ipv4(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv4(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_ipv4_output(void *priv,
-				    struct sk_buff *skb,
-				    const struct nf_hook_state *state)
-{
-	if (unlikely(skb->len < sizeof(struct iphdr) ||
-		     ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
-		if (net_ratelimit())
-			pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
-				"packet\n");
-		return NF_ACCEPT;
-	}
-
-	return nft_do_chain_ipv4(priv, skb, state);
-}
-
-struct nft_af_info nft_af_ipv4 __read_mostly = {
-	.family		= NFPROTO_IPV4,
-	.nhooks		= NF_INET_NUMHOOKS,
-	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv4,
-		[NF_INET_LOCAL_OUT]	= nft_ipv4_output,
-		[NF_INET_FORWARD]	= nft_do_chain_ipv4,
-		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv4,
-		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv4,
-	},
-};
-EXPORT_SYMBOL_GPL(nft_af_ipv4);
-
-static int nf_tables_ipv4_init_net(struct net *net)
-{
-	net->nft.ipv4 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
-	if (net->nft.ipv4 == NULL)
-		return -ENOMEM;
-
-	memcpy(net->nft.ipv4, &nft_af_ipv4, sizeof(nft_af_ipv4));
-
-	if (nft_register_afinfo(net, net->nft.ipv4) < 0)
-		goto err;
-
-	return 0;
-err:
-	kfree(net->nft.ipv4);
-	return -ENOMEM;
-}
-
-static void nf_tables_ipv4_exit_net(struct net *net)
-{
-	nft_unregister_afinfo(net, net->nft.ipv4);
-	kfree(net->nft.ipv4);
-}
-
-static struct pernet_operations nf_tables_ipv4_net_ops = {
-	.init	= nf_tables_ipv4_init_net,
-	.exit	= nf_tables_ipv4_exit_net,
-};
-
 static const struct nf_chain_type filter_ipv4 = {
 	.name		= "filter",
 	.type		= NFT_CHAIN_T_DEFAULT,
@@ -97,26 +40,22 @@ static const struct nf_chain_type filter_ipv4 = {
 			  (1 << NF_INET_FORWARD) |
 			  (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING),
+	.hooks		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv4,
+		[NF_INET_LOCAL_OUT]	= nft_do_chain_ipv4,
+		[NF_INET_FORWARD]	= nft_do_chain_ipv4,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv4,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv4,
+	},
 };
 
 static int __init nf_tables_ipv4_init(void)
 {
-	int ret;
-
-	ret = nft_register_chain_type(&filter_ipv4);
-	if (ret < 0)
-		return ret;
-
-	ret = register_pernet_subsys(&nf_tables_ipv4_net_ops);
-	if (ret < 0)
-		nft_unregister_chain_type(&filter_ipv4);
-
-	return ret;
+	return nft_register_chain_type(&filter_ipv4);
 }
 
 static void __exit nf_tables_ipv4_exit(void)
 {
-	unregister_pernet_subsys(&nf_tables_ipv4_net_ops);
 	nft_unregister_chain_type(&filter_ipv4);
 }
 
@@ -125,4 +64,4 @@ module_exit(nf_tables_ipv4_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(AF_INET);
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "filter");
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index f5c66a7a4bf2..f2a490981594 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -33,7 +33,8 @@ static unsigned int nft_nat_do_chain(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv4(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 30493beb611a..d965c225b9f6 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -33,12 +33,8 @@ static unsigned int nf_route_table_hook(void *priv,
 	const struct iphdr *iph;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
-	nft_set_pktinfo_ipv4(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb);
 
 	mark = skb->mark;
 	iph = ip_hdr(skb);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9f37c4727861..dc5edc8f7564 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -83,7 +83,6 @@ static int sockstat_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations sockstat_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = sockstat_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -467,7 +466,6 @@ static int snmp_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations snmp_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = snmp_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -515,7 +513,6 @@ static int netstat_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations netstat_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = netstat_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5e570aa9e43b..7c509697ebc7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -617,8 +617,21 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 			ipc.oif = inet->mc_index;
 		if (!saddr)
 			saddr = inet->mc_addr;
-	} else if (!ipc.oif)
+	} else if (!ipc.oif) {
 		ipc.oif = inet->uc_index;
+	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
+		/* oif is set, packet is to local broadcast and
+		 * and uc_index is set. oif is most likely set
+		 * by sk_bound_dev_if. If uc_index != oif check if the
+		 * oif is an L3 master and uc_index is an L3 slave.
+		 * If so, we want to allow the send using the uc_index.
+		 */
+		if (ipc.oif != inet->uc_index &&
+		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
+							      inet->uc_index)) {
+			ipc.oif = inet->uc_index;
+		}
+	}
 
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE,
@@ -1119,7 +1132,6 @@ static int raw_v4_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations raw_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = raw_v4_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4e153b23bcec..49cc1c1df1ba 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -240,7 +240,6 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt_cache_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = rt_cache_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -331,7 +330,6 @@ static int rt_cpu_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt_cpu_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = rt_cpu_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -369,7 +367,6 @@ static int rt_acct_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt_acct_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= rt_acct_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -1106,7 +1103,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 		new = true;
 	}
 
-	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+	__ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
 
 	if (!dst_check(&rt->dst, 0)) {
 		if (new)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1b38b4282cc9..c059aa7df0a9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -283,8 +283,6 @@
 #include <asm/ioctls.h>
 #include <net/busy_poll.h>
 
-#include <trace/events/tcp.h>
-
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
@@ -465,7 +463,7 @@ void tcp_init_transfer(struct sock *sk, int bpf_op)
 	tcp_mtup_init(sk);
 	icsk->icsk_af_ops->rebuild_header(sk);
 	tcp_init_metrics(sk);
-	tcp_call_bpf(sk, bpf_op);
+	tcp_call_bpf(sk, bpf_op, 0, NULL);
 	tcp_init_congestion_control(sk);
 	tcp_init_buffer_space(sk);
 }
@@ -500,11 +498,9 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	const struct tcp_sock *tp = tcp_sk(sk);
 	int state;
 
-	sock_rps_record_flow(sk);
-
 	sock_poll_wait(file, sk_sleep(sk), wait);
 
-	state = sk_state_load(sk);
+	state = inet_sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		return inet_csk_listen_poll(sk);
 
@@ -1106,12 +1102,15 @@ static int linear_payload_sz(bool first_skb)
 	return 0;
 }
 
-static int select_size(const struct sock *sk, bool sg, bool first_skb)
+static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	int tmp = tp->mss_cache;
 
 	if (sg) {
+		if (zc)
+			return 0;
+
 		if (sk_can_gso(sk)) {
 			tmp = linear_payload_sz(first_skb);
 		} else {
@@ -1188,7 +1187,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 	int flags, err, copied = 0;
 	int mss_now = 0, size_goal, copied_syn = 0;
 	bool process_backlog = false;
-	bool sg;
+	bool sg, zc = false;
 	long timeo;
 
 	flags = msg->msg_flags;
@@ -1206,7 +1205,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 			goto out_err;
 		}
 
-		if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG))
+		zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
+		if (!zc)
 			uarg->zerocopy = 0;
 	}
 
@@ -1283,6 +1283,7 @@ restart:
 
 		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
 			bool first_skb;
+			int linear;
 
 new_segment:
 			/* Allocate new segment. If the interface is SG,
@@ -1296,9 +1297,8 @@ new_segment:
 				goto restart;
 			}
 			first_skb = tcp_rtx_and_write_queues_empty(sk);
-			skb = sk_stream_alloc_skb(sk,
-						  select_size(sk, sg, first_skb),
-						  sk->sk_allocation,
+			linear = select_size(sk, sg, first_skb, zc);
+			skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
 						  first_skb);
 			if (!skb)
 				goto wait_for_memory;
@@ -1327,13 +1327,13 @@ new_segment:
 			copy = msg_data_left(msg);
 
 		/* Where to copy to? */
-		if (skb_availroom(skb) > 0) {
+		if (skb_availroom(skb) > 0 && !zc) {
 			/* We have some space in skb head. Superb! */
 			copy = min_t(int, copy, skb_availroom(skb));
 			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
 			if (err)
 				goto do_fault;
-		} else if (!uarg || !uarg->zerocopy) {
+		} else if (!zc) {
 			bool merge = true;
 			int i = skb_shinfo(skb)->nr_frags;
 			struct page_frag *pfrag = sk_page_frag(sk);
@@ -1373,8 +1373,10 @@ new_segment:
 			pfrag->offset += copy;
 		} else {
 			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
-			if (err == -EMSGSIZE || err == -EEXIST)
+			if (err == -EMSGSIZE || err == -EEXIST) {
+				tcp_mark_push(tp, skb);
 				goto new_segment;
+			}
 			if (err < 0)
 				goto do_error;
 			copy = err;
@@ -1731,8 +1733,8 @@ static void tcp_update_recv_tstamps(struct sk_buff *skb,
 }
 
 /* Similar to __sock_recv_timestamp, but does not require an skb */
-void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
-			struct scm_timestamping *tss)
+static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+			       struct scm_timestamping *tss)
 {
 	struct timeval tv;
 	bool has_timestamping = false;
@@ -2040,7 +2042,29 @@ void tcp_set_state(struct sock *sk, int state)
 {
 	int oldstate = sk->sk_state;
 
-	trace_tcp_set_state(sk, oldstate, state);
+	/* We defined a new enum for TCP states that are exported in BPF
+	 * so as not force the internal TCP states to be frozen. The
+	 * following checks will detect if an internal state value ever
+	 * differs from the BPF value. If this ever happens, then we will
+	 * need to remap the internal value to the BPF value before calling
+	 * tcp_call_bpf_2arg.
+	 */
+	BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
+	BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
+	BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
+	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
+	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
+	BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
+	BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
+	BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
+	BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
+	BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
+	BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
+	BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
+	BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
+
+	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
+		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
 
 	switch (state) {
 	case TCP_ESTABLISHED:
@@ -2065,7 +2089,7 @@ void tcp_set_state(struct sock *sk, int state)
 	/* Change state AFTER socket is unhashed to avoid closed
 	 * socket sitting in hash tables.
 	 */
-	sk_state_store(sk, state);
+	inet_sk_state_store(sk, state);
 
 #ifdef STATE_TRACE
 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2434,6 +2458,12 @@ int tcp_disconnect(struct sock *sk, int flags)
 
 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
 
+	if (sk->sk_frag.page) {
+		put_page(sk->sk_frag.page);
+		sk->sk_frag.page = NULL;
+		sk->sk_frag.offset = 0;
+	}
+
 	sk->sk_error_report(sk);
 	return err;
 }
@@ -2923,7 +2953,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 	if (sk->sk_type != SOCK_STREAM)
 		return;
 
-	info->tcpi_state = sk_state_load(sk);
+	info->tcpi_state = inet_sk_state_load(sk);
 
 	/* Report meaningful fields for all TCP states, including listeners */
 	rate = READ_ONCE(sk->sk_pacing_rate);
@@ -3581,6 +3611,9 @@ void __init tcp_init(void)
 	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
 	percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
 	inet_hashinfo_init(&tcp_hashinfo);
+	inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
+			    thash_entries, 21,  /* one slot per 2 MB*/
+			    0, 64 * 1024);
 	tcp_hashinfo.bind_bucket_cachep =
 		kmem_cache_create("tcp_bind_bucket",
 				  sizeof(struct inet_bind_bucket), 0,
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 8322f26e770e..785712be5b0d 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -766,7 +766,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
 	filter_expired = after(tcp_jiffies32,
 			       bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
 	if (rs->rtt_us >= 0 &&
-	    (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
+	    (rs->rtt_us <= bbr->min_rtt_us ||
+	     (filter_expired && !rs->is_ack_delayed))) {
 		bbr->min_rtt_us = rs->rtt_us;
 		bbr->min_rtt_stamp = tcp_jiffies32;
 	}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index abbf0edcf6c2..81148f7a2323 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -24,7 +24,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 {
 	struct tcp_info *info = _info;
 
-	if (sk_state_load(sk) == TCP_LISTEN) {
+	if (inet_sk_state_load(sk) == TCP_LISTEN) {
 		r->idiag_rqueue = sk->sk_ack_backlog;
 		r->idiag_wqueue = sk->sk_max_ack_backlog;
 	} else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 78c192ee03a4..018a48477355 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -379,18 +379,9 @@ fastopen:
 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
 			       struct tcp_fastopen_cookie *cookie)
 {
-	unsigned long last_syn_loss = 0;
 	const struct dst_entry *dst;
-	int syn_loss = 0;
 
-	tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
-
-	/* Recurring FO SYN losses: no cookie or data in SYN */
-	if (syn_loss > 1 &&
-	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
-		cookie->len = -1;
-		return false;
-	}
+	tcp_fastopen_cache_get(sk, mss, cookie);
 
 	/* Firewall blackhole issue check */
 	if (tcp_fastopen_active_should_disable(sk)) {
@@ -448,6 +439,8 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect);
  * following circumstances:
  *   1. client side TFO socket receives out of order FIN
  *   2. client side TFO socket receives out of order RST
+ *   3. client side TFO socket has timed out three times consecutively during
+ *      or after handshake
  * We disable active side TFO globally for 1hr at first. Then if it
  * happens again, we disable it for 2h, then 4h, 8h, ...
  * And we reset the timeout back to 1hr when we see a successful active
@@ -524,3 +517,20 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
 		dst_release(dst);
 	}
 }
+
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
+{
+	u32 timeouts = inet_csk(sk)->icsk_retransmits;
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/* Broken middle-boxes may black-hole Fast Open connection during or
+	 * even after the handshake. Be extremely conservative and pause
+	 * Fast Open globally after hitting the third consecutive timeout or
+	 * exceeding the configured timeout limit.
+	 */
+	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
+	    (timeouts == 2 || (timeouts < 2 && expired))) {
+		tcp_fastopen_active_disable(sk);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+	}
+}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 45f750e85714..cfa51cfd2d99 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -97,6 +97,7 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 #define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */
 #define FLAG_UPDATE_TS_RECENT	0x4000 /* tcp_replace_ts_recent() */
 #define FLAG_NO_CHALLENGE_ACK	0x8000 /* do not call tcp_send_challenge_ack()	*/
+#define FLAG_ACK_MAYBE_DELAYED	0x10000 /* Likely a delayed ACK */
 
 #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -578,8 +579,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 void tcp_rcv_space_adjust(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	u32 copied;
 	int time;
-	int copied;
 
 	tcp_mstamp_refresh(tp);
 	time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@@ -602,38 +603,31 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
 	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-		int rcvwin, rcvmem, rcvbuf;
+		int rcvmem, rcvbuf;
+		u64 rcvwin, grow;
 
 		/* minimal window to cope with packet losses, assuming
 		 * steady state. Add some cushion because of small variations.
 		 */
-		rcvwin = (copied << 1) + 16 * tp->advmss;
+		rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
 
-		/* If rate increased by 25%,
-		 *	assume slow start, rcvwin = 3 * copied
-		 * If rate increased by 50%,
-		 *	assume sender can use 2x growth, rcvwin = 4 * copied
-		 */
-		if (copied >=
-		    tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
-			if (copied >=
-			    tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
-				rcvwin <<= 1;
-			else
-				rcvwin += (rcvwin >> 1);
-		}
+		/* Accommodate for sender rate increase (eg. slow start) */
+		grow = rcvwin * (copied - tp->rcvq_space.space);
+		do_div(grow, tp->rcvq_space.space);
+		rcvwin += (grow << 1);
 
 		rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
 		while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
 			rcvmem += 128;
 
-		rcvbuf = min(rcvwin / tp->advmss * rcvmem,
-			     sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+		do_div(rcvwin, tp->advmss);
+		rcvbuf = min_t(u64, rcvwin * rcvmem,
+			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 		if (rcvbuf > sk->sk_rcvbuf) {
 			sk->sk_rcvbuf = rcvbuf;
 
 			/* Make the window clamp follow along.  */
-			tp->window_clamp = rcvwin;
+			tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
 		}
 	}
 	tp->rcvq_space.space = copied;
@@ -2864,11 +2858,18 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
 	*rexmit = REXMIT_LOST;
 }
 
-static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
+static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
 {
 	u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
 	struct tcp_sock *tp = tcp_sk(sk);
 
+	if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
+		/* If the remote keeps returning delayed ACKs, eventually
+		 * the min filter would pick it up and overestimate the
+		 * prop. delay when it expires. Skip suspected delayed ACKs.
+		 */
+		return;
+	}
 	minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
 			   rtt_us ? : jiffies_to_usecs(1));
 }
@@ -2908,7 +2909,7 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
 	 * always taken together with ACK, SACK, or TS-opts. Any negative
 	 * values will be skipped with the seq_rtt_us < 0 check above.
 	 */
-	tcp_update_rtt_min(sk, ca_rtt_us);
+	tcp_update_rtt_min(sk, ca_rtt_us, flag);
 	tcp_rtt_estimator(sk, seq_rtt_us);
 	tcp_set_rto(sk);
 
@@ -3132,6 +3133,17 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
 	if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
 		seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
 		ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
+
+		if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
+		    last_in_flight && !prior_sacked && fully_acked &&
+		    sack->rate->prior_delivered + 1 == tp->delivered &&
+		    !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
+			/* Conservatively mark a delayed ACK. It's typically
+			 * from a lone runt packet over the round trip to
+			 * a receiver w/o out-of-order or CE events.
+			 */
+			flag |= FLAG_ACK_MAYBE_DELAYED;
+		}
 	}
 	if (sack->first_sackt) {
 		sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
@@ -3621,6 +3633,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
 	delivered = tp->delivered - delivered;	/* freshly ACKed or SACKed */
 	lost = tp->lost - lost;			/* freshly marked lost */
+	rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
 	tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
 	tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
 	tcp_xmit_recovery(sk, rexmit);
@@ -5306,6 +5319,9 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 	unsigned int len = skb->len;
 	struct tcp_sock *tp = tcp_sk(sk);
 
+	/* TCP congestion window tracking */
+	trace_tcp_probe(sk, skb);
+
 	tcp_mstamp_refresh(tp);
 	if (unlikely(!sk->sk_rx_dst))
 		inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94e28350f420..95738aa0d8a6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1911,7 +1911,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
 	/* Clean up the MD5 key list, if any */
 	if (tp->md5sig_info) {
 		tcp_clear_md5_list(sk);
-		kfree_rcu(tp->md5sig_info, rcu);
+		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
 		tp->md5sig_info = NULL;
 	}
 #endif
@@ -2281,7 +2281,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 		timer_expires = jiffies;
 	}
 
-	state = sk_state_load(sk);
+	state = inet_sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
 	else
@@ -2358,7 +2358,6 @@ out:
 }
 
 static const struct file_operations tcp_afinfo_seq_fops = {
-	.owner   = THIS_MODULE,
 	.open    = tcp_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 7097f92d16e5..03b51cdcc731 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -546,8 +546,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 static DEFINE_SEQLOCK(fastopen_seqlock);
 
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-			    struct tcp_fastopen_cookie *cookie,
-			    int *syn_loss, unsigned long *last_syn_loss)
+			    struct tcp_fastopen_cookie *cookie)
 {
 	struct tcp_metrics_block *tm;
 
@@ -564,8 +563,6 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 			*cookie = tfom->cookie;
 			if (cookie->len <= 0 && tfom->try_exp == 1)
 				cookie->exp = true;
-			*syn_loss = tfom->syn_loss;
-			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
 		} while (read_seqretry(&fastopen_seqlock, seq));
 	}
 	rcu_read_unlock();
@@ -895,7 +892,7 @@ static void tcp_metrics_flush_all(struct net *net)
 		pp = &hb->chain;
 		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 			match = net ? net_eq(tm_net(tm), net) :
-				!atomic_read(&tm_net(tm)->count);
+				!refcount_read(&tm_net(tm)->count);
 			if (match) {
 				*pp = tm->tcpm_next;
 				kfree_rcu(tm, rcu_head);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b079b619b60c..a8384b0c11f8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -316,9 +316,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
 		 */
 		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
-		/* Linkage updates. */
-		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
-		inet_twsk_put(tw);
+		/* Linkage updates.
+		 * Note that access to tw after this point is illegal.
+		 */
+		inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index 0b5a05bd82e3..764298e52577 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -146,7 +146,7 @@ static void tcpnv_init(struct sock *sk)
 	 * within a datacenter, where we have reasonable estimates of
 	 * RTTs
 	 */
-	base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT);
+	base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT, 0, NULL);
 	if (base_rtt > 0) {
 		ca->nv_base_rtt = base_rtt;
 		ca->nv_lower_bound_rtt = (base_rtt * 205) >> 8; /* 80% */
@@ -364,7 +364,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
 		 */
 		cwnd_by_slope = (u32)
 			div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
-				  (u64)(80000 * tp->mss_cache));
+				  80000ULL * tp->mss_cache);
 		max_win = cwnd_by_slope + nv_pad;
 
 		/* If cwnd > max_win, decrease cwnd
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a4d214c7b506..e9f985e42405 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1944,7 +1944,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 
 	in_flight = tcp_packets_in_flight(tp);
 
-	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
+	BUG_ON(tcp_skb_pcount(skb) <= 1);
+	BUG_ON(tp->snd_cwnd <= in_flight);
 
 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
 
@@ -2414,15 +2415,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
 
 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
 	/* Schedule a loss probe in 2*RTT for SACK capable connections
-	 * in Open state, that are either limited by cwnd or application.
+	 * not in loss recovery, that are either limited by cwnd or application.
 	 */
 	if ((early_retrans != 3 && early_retrans != 4) ||
 	    !tp->packets_out || !tcp_is_sack(tp) ||
-	    icsk->icsk_ca_state != TCP_CA_Open)
-		return false;
-
-	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
-	     !tcp_write_queue_empty(sk))
+	    (icsk->icsk_ca_state != TCP_CA_Open &&
+	     icsk->icsk_ca_state != TCP_CA_CWR))
 		return false;
 
 	/* Probe timeout is 2*rtt. Add minimum RTO to account
@@ -2907,6 +2905,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
 	}
 
+	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
+		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
+				  TCP_SKB_CB(skb)->seq, segs, err);
+
 	if (likely(!err)) {
 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
 		trace_tcp_retransmit_skb(sk, skb);
@@ -3471,7 +3473,7 @@ int tcp_connect(struct sock *sk)
 	struct sk_buff *buff;
 	int err;
 
-	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
+	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
 
 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
 		return -EHOSTUNREACH; /* Routing failure or similar. */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
deleted file mode 100644
index 697f4c67b2e3..000000000000
--- a/net/ipv4/tcp_probe.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * tcpprobe - Observe the TCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/ktime.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/tcp.h>
-
-MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
-MODULE_DESCRIPTION("TCP cwnd snooper");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1");
-
-static int port __read_mostly;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int bufsize __read_mostly = 4096;
-MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
-module_param(bufsize, uint, 0);
-
-static unsigned int fwmark __read_mostly;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int full __read_mostly;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "tcpprobe";
-
-struct tcp_log {
-	ktime_t tstamp;
-	union {
-		struct sockaddr		raw;
-		struct sockaddr_in	v4;
-		struct sockaddr_in6	v6;
-	}	src, dst;
-	u16	length;
-	u32	snd_nxt;
-	u32	snd_una;
-	u32	snd_wnd;
-	u32	rcv_wnd;
-	u32	snd_cwnd;
-	u32	ssthresh;
-	u32	srtt;
-};
-
-static struct {
-	spinlock_t	lock;
-	wait_queue_head_t wait;
-	ktime_t		start;
-	u32		lastcwnd;
-
-	unsigned long	head, tail;
-	struct tcp_log	*log;
-} tcp_probe;
-
-static inline int tcp_probe_used(void)
-{
-	return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
-}
-
-static inline int tcp_probe_avail(void)
-{
-	return bufsize - tcp_probe_used() - 1;
-}
-
-#define tcp_probe_copy_fl_to_si4(inet, si4, mem)		\
-	do {							\
-		si4.sin_family = AF_INET;			\
-		si4.sin_port = inet->inet_##mem##port;		\
-		si4.sin_addr.s_addr = inet->inet_##mem##addr;	\
-	} while (0)						\
-
-/*
- * Hook inserted to be called before each receive packet.
- * Note: arguments must match tcp_rcv_established()!
- */
-static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				 const struct tcphdr *th)
-{
-	unsigned int len = skb->len;
-	const struct tcp_sock *tp = tcp_sk(sk);
-	const struct inet_sock *inet = inet_sk(sk);
-
-	/* Only update if port or skb mark matches */
-	if (((port == 0 && fwmark == 0) ||
-	     ntohs(inet->inet_dport) == port ||
-	     ntohs(inet->inet_sport) == port ||
-	     (fwmark > 0 && skb->mark == fwmark)) &&
-	    (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
-
-		spin_lock(&tcp_probe.lock);
-		/* If log fills, just silently drop */
-		if (tcp_probe_avail() > 1) {
-			struct tcp_log *p = tcp_probe.log + tcp_probe.head;
-
-			p->tstamp = ktime_get();
-			switch (sk->sk_family) {
-			case AF_INET:
-				tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
-				tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
-				break;
-			case AF_INET6:
-				memset(&p->src.v6, 0, sizeof(p->src.v6));
-				memset(&p->dst.v6, 0, sizeof(p->dst.v6));
-#if IS_ENABLED(CONFIG_IPV6)
-				p->src.v6.sin6_family = AF_INET6;
-				p->src.v6.sin6_port = inet->inet_sport;
-				p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
-
-				p->dst.v6.sin6_family = AF_INET6;
-				p->dst.v6.sin6_port = inet->inet_dport;
-				p->dst.v6.sin6_addr = sk->sk_v6_daddr;
-#endif
-				break;
-			default:
-				BUG();
-			}
-
-			p->length = len;
-			p->snd_nxt = tp->snd_nxt;
-			p->snd_una = tp->snd_una;
-			p->snd_cwnd = tp->snd_cwnd;
-			p->snd_wnd = tp->snd_wnd;
-			p->rcv_wnd = tp->rcv_wnd;
-			p->ssthresh = tcp_current_ssthresh(sk);
-			p->srtt = tp->srtt_us >> 3;
-
-			tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
-		}
-		tcp_probe.lastcwnd = tp->snd_cwnd;
-		spin_unlock(&tcp_probe.lock);
-
-		wake_up(&tcp_probe.wait);
-	}
-
-	jprobe_return();
-}
-
-static struct jprobe tcp_jprobe = {
-	.kp = {
-		.symbol_name	= "tcp_rcv_established",
-	},
-	.entry	= jtcp_rcv_established,
-};
-
-static int tcpprobe_open(struct inode *inode, struct file *file)
-{
-	/* Reset (empty) log */
-	spin_lock_bh(&tcp_probe.lock);
-	tcp_probe.head = tcp_probe.tail = 0;
-	tcp_probe.start = ktime_get();
-	spin_unlock_bh(&tcp_probe.lock);
-
-	return 0;
-}
-
-static int tcpprobe_sprint(char *tbuf, int n)
-{
-	const struct tcp_log *p
-		= tcp_probe.log + tcp_probe.tail;
-	struct timespec64 ts
-		= ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start));
-
-	return scnprintf(tbuf, n,
-			"%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
-			(unsigned long)ts.tv_sec,
-			(unsigned long)ts.tv_nsec,
-			&p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
-			p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
-}
-
-static ssize_t tcpprobe_read(struct file *file, char __user *buf,
-			     size_t len, loff_t *ppos)
-{
-	int error = 0;
-	size_t cnt = 0;
-
-	if (!buf)
-		return -EINVAL;
-
-	while (cnt < len) {
-		char tbuf[256];
-		int width;
-
-		/* Wait for data in buffer */
-		error = wait_event_interruptible(tcp_probe.wait,
-						 tcp_probe_used() > 0);
-		if (error)
-			break;
-
-		spin_lock_bh(&tcp_probe.lock);
-		if (tcp_probe.head == tcp_probe.tail) {
-			/* multiple readers race? */
-			spin_unlock_bh(&tcp_probe.lock);
-			continue;
-		}
-
-		width = tcpprobe_sprint(tbuf, sizeof(tbuf));
-
-		if (cnt + width < len)
-			tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
-
-		spin_unlock_bh(&tcp_probe.lock);
-
-		/* if record greater than space available
-		   return partial buffer (so far) */
-		if (cnt + width >= len)
-			break;
-
-		if (copy_to_user(buf + cnt, tbuf, width))
-			return -EFAULT;
-		cnt += width;
-	}
-
-	return cnt == 0 ? error : cnt;
-}
-
-static const struct file_operations tcpprobe_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = tcpprobe_open,
-	.read    = tcpprobe_read,
-	.llseek  = noop_llseek,
-};
-
-static __init int tcpprobe_init(void)
-{
-	int ret = -ENOMEM;
-
-	/* Warning: if the function signature of tcp_rcv_established,
-	 * has been changed, you also have to change the signature of
-	 * jtcp_rcv_established, otherwise you end up right here!
-	 */
-	BUILD_BUG_ON(__same_type(tcp_rcv_established,
-				 jtcp_rcv_established) == 0);
-
-	init_waitqueue_head(&tcp_probe.wait);
-	spin_lock_init(&tcp_probe.lock);
-
-	if (bufsize == 0)
-		return -EINVAL;
-
-	bufsize = roundup_pow_of_two(bufsize);
-	tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
-	if (!tcp_probe.log)
-		goto err0;
-
-	if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
-		goto err0;
-
-	ret = register_jprobe(&tcp_jprobe);
-	if (ret)
-		goto err1;
-
-	pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
-		port, fwmark, bufsize);
-	return 0;
- err1:
-	remove_proc_entry(procname, init_net.proc_net);
- err0:
-	kfree(tcp_probe.log);
-	return ret;
-}
-module_init(tcpprobe_init);
-
-static __exit void tcpprobe_exit(void)
-{
-	remove_proc_entry(procname, init_net.proc_net);
-	unregister_jprobe(&tcp_jprobe);
-	kfree(tcp_probe.log);
-}
-module_exit(tcpprobe_exit);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 388158c9d9f6..71fc60f1b326 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -198,11 +198,6 @@ static int tcp_write_timeout(struct sock *sk)
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		if (icsk->icsk_retransmits) {
 			dst_negative_advice(sk);
-			if (tp->syn_fastopen || tp->syn_data)
-				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-			if (tp->syn_data && icsk->icsk_retransmits == 1)
-				NET_INC_STATS(sock_net(sk),
-					      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
 		} else if (!tp->syn_data && !tp->syn_fastopen) {
 			sk_rethink_txhash(sk);
 		}
@@ -210,17 +205,6 @@ static int tcp_write_timeout(struct sock *sk)
 		expired = icsk->icsk_retransmits >= retry_until;
 	} else {
 		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
-			/* Some middle-boxes may black-hole Fast Open _after_
-			 * the handshake. Therefore we conservatively disable
-			 * Fast Open on this path on recurring timeouts after
-			 * successful Fast Open.
-			 */
-			if (tp->syn_data_acked) {
-				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-				if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
-					NET_INC_STATS(sock_net(sk),
-						      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
-			}
 			/* Black hole detection */
 			tcp_mtu_probing(icsk, sk);
 
@@ -243,11 +227,19 @@ static int tcp_write_timeout(struct sock *sk)
 		expired = retransmits_timed_out(sk, retry_until,
 						icsk->icsk_user_timeout);
 	}
+	tcp_fastopen_active_detect_blackhole(sk, expired);
+
+	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
+		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
+				  icsk->icsk_retransmits,
+				  icsk->icsk_rto, (int)expired);
+
 	if (expired) {
 		/* Has it gone just too far? */
 		tcp_write_err(sk);
 		return 1;
 	}
+
 	return 0;
 }
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ef45adfc0edb..f81f969f9c06 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -357,18 +357,12 @@ fail:
 }
 EXPORT_SYMBOL(udp_lib_get_port);
 
-static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
-			      unsigned int port)
-{
-	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
-}
-
 int udp_v4_get_port(struct sock *sk, unsigned short snum)
 {
 	unsigned int hash2_nulladdr =
-		udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
+		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
 	unsigned int hash2_partial =
-		udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
+		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
 
 	/* precompute partial secondary hash */
 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -445,7 +439,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
 				     struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	result = NULL;
@@ -454,23 +448,16 @@ static struct sock *udp4_lib_lookup2(struct net *net,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			badness = score;
 			result = sk;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -488,11 +475,11 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 	bool exact_dif = udp_lib_exact_dif_match(net, skb);
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	if (hslot->count > 10) {
-		hash2 = udp4_portaddr_hash(net, daddr, hnum);
+		hash2 = ipv4_portaddr_hash(net, daddr, hnum);
 		slot2 = hash2 & udptable->mask;
 		hslot2 = &udptable->hash2[slot2];
 		if (hslot->count < hslot2->count)
@@ -503,7 +490,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 					  exact_dif, hslot2, skb);
 		if (!result) {
 			unsigned int old_slot2 = slot2;
-			hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+			hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
 			slot2 = hash2 & udptable->mask;
 			/* avoid searching the same slot again. */
 			if (unlikely(slot2 == old_slot2))
@@ -526,23 +513,16 @@ begin:
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			badness = score;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -997,8 +977,21 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 		if (!saddr)
 			saddr = inet->mc_addr;
 		connected = 0;
-	} else if (!ipc.oif)
+	} else if (!ipc.oif) {
 		ipc.oif = inet->uc_index;
+	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
+		/* oif is set, packet is to local broadcast and
+		 * and uc_index is set. oif is most likely set
+		 * by sk_bound_dev_if. If uc_index != oif check if the
+		 * oif is an L3 master and uc_index is an L3 slave.
+		 * If so, we want to allow the send using the uc_index.
+		 */
+		if (ipc.oif != inet->uc_index &&
+		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
+							      inet->uc_index)) {
+			ipc.oif = inet->uc_index;
+		}
+	}
 
 	if (connected)
 		rt = (struct rtable *)sk_dst_check(sk, 0);
@@ -1775,7 +1768,7 @@ EXPORT_SYMBOL(udp_lib_rehash);
 
 static void udp_v4_rehash(struct sock *sk)
 {
-	u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
 					  inet_sk(sk)->inet_rcv_saddr,
 					  inet_sk(sk)->inet_num);
 	udp_lib_rehash(sk, new_hash);
@@ -1966,9 +1959,9 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 	struct sk_buff *nskb;
 
 	if (use_hash2) {
-		hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
+		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
 			    udptable->mask;
-		hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
+		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
 start_lookup:
 		hslot = &udptable->hash2[hash2];
 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@ -2200,7 +2193,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
 					    int dif, int sdif)
 {
 	unsigned short hnum = ntohs(loc_port);
-	unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
 	unsigned int slot2 = hash2 & udp_table.mask;
 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
 	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
@@ -2510,8 +2503,6 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
 		mask |= POLLIN | POLLRDNORM;
 
-	sock_rps_record_flow(sk);
-
 	/* Check for false positives due to checksum errors */
 	if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
 	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
@@ -2736,7 +2727,6 @@ int udp4_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations udp_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 59f10fe9782e..f96614e9b9a5 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -75,7 +75,6 @@ static struct inet_protosw udplite4_protosw = {
 #ifdef CONFIG_PROC_FS
 
 static const struct file_operations udplite_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 20ca486b3cad..63faeee989a9 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -62,7 +62,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 	top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
 		0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
 
-	top_iph->ttl = ip4_dst_hoplimit(dst->child);
+	top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
 
 	top_iph->saddr = x->props.saddr.a4;
 	top_iph->daddr = x->id.daddr.a4;
@@ -106,18 +106,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
 	__skb_push(skb, skb->mac_len);
 	return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
-	if (xo->flags & XFRM_GSO_SEGMENT) {
-		skb->network_header = skb->network_header - x->props.header_len;
+	if (xo->flags & XFRM_GSO_SEGMENT)
 		skb->transport_header = skb->network_header +
 					sizeof(struct iphdr);
-	}
 
 	skb_reset_mac_len(skb);
 	pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f49bd7897e95..e1846b97ee69 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -186,7 +186,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_work(struct work_struct *w);
-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
+				   bool send_na);
 static void addrconf_dad_run(struct inet6_dev *idev);
 static void addrconf_rs_timer(struct timer_list *t);
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -3438,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 		} else if (event == NETDEV_CHANGE) {
 			if (!addrconf_link_ready(dev)) {
 				/* device is still not ready. */
+				rt6_sync_down_dev(dev, event);
 				break;
 			}
 
@@ -3449,6 +3451,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 					 * multicast snooping switches
 					 */
 					ipv6_mc_up(idev);
+					rt6_sync_up(dev, RTNH_F_LINKDOWN);
 					break;
 				}
 				idev->if_flags |= IF_READY;
@@ -3484,6 +3487,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 			if (run_pending)
 				addrconf_dad_run(idev);
 
+			/* Device has an address by now */
+			rt6_sync_up(dev, RTNH_F_DEAD);
+
 			/*
 			 * If the MTU changed during the interface down,
 			 * when the interface up, the changed MTU must be
@@ -3577,6 +3583,7 @@ static bool addr_is_local(const struct in6_addr *addr)
 
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
+	unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
 	struct net *net = dev_net(dev);
 	struct inet6_dev *idev;
 	struct inet6_ifaddr *ifa, *tmp;
@@ -3586,8 +3593,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
 	ASSERT_RTNL();
 
-	rt6_ifdown(net, dev);
-	neigh_ifdown(&nd_tbl, dev);
+	rt6_disable_ip(dev, event);
 
 	idev = __in6_dev_get(dev);
 	if (!idev)
@@ -3833,12 +3839,17 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 	     idev->cnf.accept_dad < 1) ||
 	    !(ifp->flags&IFA_F_TENTATIVE) ||
 	    ifp->flags & IFA_F_NODAD) {
+		bool send_na = false;
+
+		if (ifp->flags & IFA_F_TENTATIVE &&
+		    !(ifp->flags & IFA_F_OPTIMISTIC))
+			send_na = true;
 		bump_id = ifp->flags & IFA_F_TENTATIVE;
 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
 		spin_unlock(&ifp->lock);
 		read_unlock_bh(&idev->lock);
 
-		addrconf_dad_completed(ifp, bump_id);
+		addrconf_dad_completed(ifp, bump_id, send_na);
 		return;
 	}
 
@@ -3967,16 +3978,21 @@ static void addrconf_dad_work(struct work_struct *w)
 	}
 
 	if (ifp->dad_probes == 0) {
+		bool send_na = false;
+
 		/*
 		 * DAD was successful
 		 */
 
+		if (ifp->flags & IFA_F_TENTATIVE &&
+		    !(ifp->flags & IFA_F_OPTIMISTIC))
+			send_na = true;
 		bump_id = ifp->flags & IFA_F_TENTATIVE;
 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
 		spin_unlock(&ifp->lock);
 		write_unlock_bh(&idev->lock);
 
-		addrconf_dad_completed(ifp, bump_id);
+		addrconf_dad_completed(ifp, bump_id, send_na);
 
 		goto out;
 	}
@@ -4014,7 +4030,8 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
 	return true;
 }
 
-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
+				   bool send_na)
 {
 	struct net_device *dev = ifp->idev->dev;
 	struct in6_addr lladdr;
@@ -4046,6 +4063,16 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
 	if (send_mld)
 		ipv6_mc_dad_complete(ifp->idev);
 
+	/* send unsolicited NA if enabled */
+	if (send_na &&
+	    (ifp->idev->cnf.ndisc_notify ||
+	     dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
+		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
+			      /*router=*/ !!ifp->idev->cnf.forwarding,
+			      /*solicited=*/ false, /*override=*/ true,
+			      /*inc_opt=*/ true);
+	}
+
 	if (send_rs) {
 		/*
 		 *	If a host as already performed a random delay
@@ -4209,7 +4236,6 @@ static int if6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations if6_fops = {
-	.owner		= THIS_MODULE,
 	.open		= if6_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -4352,9 +4378,11 @@ restart:
 						spin_lock(&ifpub->lock);
 						ifpub->regen_count = 0;
 						spin_unlock(&ifpub->lock);
+						rcu_read_unlock_bh();
 						ipv6_create_tempaddr(ifpub, ifp, true);
 						in6_ifa_put(ifpub);
 						in6_ifa_put(ifp);
+						rcu_read_lock_bh();
 						goto restart;
 					}
 				} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
@@ -6595,27 +6623,45 @@ int __init addrconf_init(void)
 
 	rtnl_af_register(&inet6_ops);
 
-	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
-			      0);
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
+				   NULL, inet6_dump_ifinfo, 0);
 	if (err < 0)
 		goto errout;
 
-	/* Only the first call to __rtnl_register can fail */
-	__rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0);
-	__rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0);
-	__rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
-			inet6_dump_ifaddr, RTNL_FLAG_DOIT_UNLOCKED);
-	__rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
-			inet6_dump_ifmcaddr, 0);
-	__rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
-			inet6_dump_ifacaddr, 0);
-	__rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
-			inet6_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED);
-
-	ipv6_addr_label_rtnl_register();
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
+				   inet6_rtm_newaddr, NULL, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
+				   inet6_rtm_deladdr, NULL, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
+				   inet6_rtm_getaddr, inet6_dump_ifaddr,
+				   RTNL_FLAG_DOIT_UNLOCKED);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
+				   NULL, inet6_dump_ifmcaddr, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
+				   NULL, inet6_dump_ifacaddr, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
+				   inet6_netconf_get_devconf,
+				   inet6_netconf_dump_devconf,
+				   RTNL_FLAG_DOIT_UNLOCKED);
+	if (err < 0)
+		goto errout;
+	err = ipv6_addr_label_rtnl_register();
+	if (err < 0)
+		goto errout;
 
 	return 0;
 errout:
+	rtnl_unregister_all(PF_INET6);
 	rtnl_af_unregister(&inet6_ops);
 	unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 00e1f8ee08f8..1d6ced37ad71 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -547,13 +547,22 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 	return err;
 }
 
-void __init ipv6_addr_label_rtnl_register(void)
+int __init ipv6_addr_label_rtnl_register(void)
 {
-	__rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel,
-			NULL, RTNL_FLAG_DOIT_UNLOCKED);
-	__rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel,
-			NULL, RTNL_FLAG_DOIT_UNLOCKED);
-	__rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get,
-			ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
-}
+	int ret;
 
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDRLABEL,
+				   ip6addrlbl_newdel,
+				   NULL, RTNL_FLAG_DOIT_UNLOCKED);
+	if (ret < 0)
+		return ret;
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDRLABEL,
+				   ip6addrlbl_newdel,
+				   NULL, RTNL_FLAG_DOIT_UNLOCKED);
+	if (ret < 0)
+		return ret;
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDRLABEL,
+				   ip6addrlbl_get,
+				   ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
+	return ret;
+}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c9441ca45399..416917719a6f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -284,6 +284,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	struct net *net = sock_net(sk);
 	__be32 v4addr = 0;
 	unsigned short snum;
+	bool saved_ipv6only;
 	int addr_type = 0;
 	int err = 0;
 
@@ -389,19 +390,21 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	if (!(addr_type & IPV6_ADDR_MULTICAST))
 		np->saddr = addr->sin6_addr;
 
+	saved_ipv6only = sk->sk_ipv6only;
+	if (addr_type != IPV6_ADDR_ANY && addr_type != IPV6_ADDR_MAPPED)
+		sk->sk_ipv6only = 1;
+
 	/* Make sure we are allowed to bind here. */
 	if ((snum || !inet->bind_address_no_port) &&
 	    sk->sk_prot->get_port(sk, snum)) {
+		sk->sk_ipv6only = saved_ipv6only;
 		inet_reset_saddr(sk);
 		err = -EADDRINUSE;
 		goto out;
 	}
 
-	if (addr_type != IPV6_ADDR_ANY) {
+	if (addr_type != IPV6_ADDR_ANY)
 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
-		if (addr_type != IPV6_ADDR_MAPPED)
-			sk->sk_ipv6only = 1;
-	}
 	if (snum)
 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
 	inet->inet_sport = htons(inet->inet_num);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0bbab8a4b5d8..8e085cc05aeb 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -533,7 +533,6 @@ static int ac6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ac6_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	ac6_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f918713006..fbf08ce3f5ab 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -221,8 +221,7 @@ ipv4_connected:
 	if (__ipv6_addr_needs_scope_id(addr_type)) {
 		if (addr_len >= sizeof(struct sockaddr_in6) &&
 		    usin->sin6_scope_id) {
-			if (sk->sk_bound_dev_if &&
-			    sk->sk_bound_dev_if != usin->sin6_scope_id) {
+			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) {
 				err = -EINVAL;
 				goto out;
 			}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1a7f00cd4803..97513f35bcc5 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
 	struct sk_buff *skb = base->data;
+	struct xfrm_offload *xo = xfrm_offload(skb);
 	void *tmp;
-	struct dst_entry *dst = skb_dst(skb);
-	struct xfrm_state *x = dst->xfrm;
+	struct xfrm_state *x;
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME))
+		x = skb->sp->xvec[skb->sp->len - 1];
+	else
+		x = skb_dst(skb)->xfrm;
 
 	tmp = ESP_SKB_CB(skb)->tmp;
 	esp_ssg_unref(x, tmp);
 	kfree(tmp);
-	xfrm_output_resume(skb, err);
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+		if (err) {
+			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, skb->data - skb_mac_header(skb));
+		secpath_reset(skb);
+		xfrm_dev_resume(skb);
+	} else {
+		xfrm_output_resume(skb, err);
+	}
 }
 
 /* Move ESP header back into place. */
@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
 	char aead_name[CRYPTO_MAX_ALG_NAME];
 	struct crypto_aead *aead;
 	int err;
-	u32 mask = 0;
 
 	err = -ENAMETOOLONG;
 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
 		goto error;
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(aead_name, 0, mask);
+	aead = crypto_alloc_aead(aead_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
 	char authenc_name[CRYPTO_MAX_ALG_NAME];
 	unsigned int keylen;
 	int err;
-	u32 mask = 0;
 
 	err = -EINVAL;
 	if (!x->ealg)
@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
 			goto error;
 	}
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(authenc_name, 0, mask);
+	aead = crypto_alloc_aead(authenc_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index f52c314d4c97..3fd1ec775dc2 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -136,78 +136,39 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
 				        netdev_features_t features)
 {
-	__u32 seq;
-	int err = 0;
-	struct sk_buff *skb2;
 	struct xfrm_state *x;
 	struct ip_esp_hdr *esph;
 	struct crypto_aead *aead;
-	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	netdev_features_t esp_features = features;
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
 	if (!xo)
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
-		goto out;
-
-	seq = xo->seq.low;
+		return ERR_PTR(-EINVAL);
 
 	x = skb->sp->xvec[skb->sp->len - 1];
 	aead = x->data;
 	esph = ip_esp_hdr(skb);
 
 	if (esph->spi != x->id.spi)
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
 	skb->encap_hdr_csum = 1;
 
-	if (!(features & NETIF_F_HW_ESP))
+	if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+	    (x->xso.dev != skb->dev))
 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-	segs = x->outer_mode->gso_segment(x, skb, esp_features);
-	if (IS_ERR_OR_NULL(segs))
-		goto out;
-
-	__skb_pull(skb, skb->data - skb_mac_header(skb));
-
-	skb2 = segs;
-	do {
-		struct sk_buff *nskb = skb2->next;
-
-		xo = xfrm_offload(skb2);
-		xo->flags |= XFRM_GSO_SEGMENT;
-		xo->seq.low = seq;
-		xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
-		if(!(features & NETIF_F_HW_ESP))
-			xo->flags |= CRYPTO_FALLBACK;
-
-		x->outer_mode->xmit(x, skb2);
-
-		err = x->type_offload->xmit(x, skb2, esp_features);
-		if (err) {
-			kfree_skb_list(segs);
-			return ERR_PTR(err);
-		}
+	xo->flags |= XFRM_GSO_SEGMENT;
 
-		if (!skb_is_gso(skb2))
-			seq++;
-		else
-			seq += skb_shinfo(skb2)->gso_segs;
-
-		skb_push(skb2, skb2->mac_len);
-		skb2 = nskb;
-	} while (skb2);
-
-out:
-	return segs;
+	return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -226,6 +187,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
 
 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
 {
+	int len;
 	int err;
 	int alen;
 	int blksize;
@@ -234,6 +196,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 	struct crypto_aead *aead;
 	struct esp_info esp;
 	bool hw_offload = true;
+	__u32 seq;
 
 	esp.inplace = true;
 
@@ -269,28 +232,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 			return esp.nfrags;
 	}
 
+	seq = xo->seq.low;
+
 	esph = ip_esp_hdr(skb);
 	esph->spi = x->id.spi;
 
 	skb_push(skb, -skb_network_offset(skb));
 
 	if (xo->flags & XFRM_GSO_SEGMENT) {
-		esph->seq_no = htonl(xo->seq.low);
-	} else {
-		int len;
-
-		len = skb->len - sizeof(struct ipv6hdr);
-		if (len > IPV6_MAXPLEN)
-			len = 0;
+		esph->seq_no = htonl(seq);
 
-		ipv6_hdr(skb)->payload_len = htons(len);
+		if (!skb_is_gso(skb))
+			xo->seq.low++;
+		else
+			xo->seq.low += skb_shinfo(skb)->gso_segs;
 	}
 
+	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+
+	len = skb->len - sizeof(struct ipv6hdr);
+	if (len > IPV6_MAXPLEN)
+		len = 0;
+
+	ipv6_hdr(skb)->payload_len = htons(len);
+
 	if (hw_offload)
 		return 0;
 
-	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
 	err = esp6_output_tail(x, skb, &esp);
 	if (err)
 		return err;
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 6eb5e68f112a..44c39c5f0638 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -512,9 +512,7 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
 	struct ila_map *ila;
 	int ret;
 
-	ret = rhashtable_walk_start(rhiter);
-	if (ret && ret != -EAGAIN)
-		goto done;
+	rhashtable_walk_start(rhiter);
 
 	for (;;) {
 		ila = rhashtable_walk_next(rhiter);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b01858f5deb1..2febe26de6a1 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -125,6 +125,40 @@ static inline int compute_score(struct sock *sk, struct net *net,
 }
 
 /* called with rcu_read_lock() */
+static struct sock *inet6_lhash2_lookup(struct net *net,
+		struct inet_listen_hashbucket *ilb2,
+		struct sk_buff *skb, int doff,
+		const struct in6_addr *saddr,
+		const __be16 sport, const struct in6_addr *daddr,
+		const unsigned short hnum, const int dif, const int sdif)
+{
+	bool exact_dif = inet6_exact_dif_match(net, skb);
+	struct inet_connection_sock *icsk;
+	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	u32 phash = 0;
+
+	inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
+		sk = (struct sock *)icsk;
+		score = compute_score(sk, net, hnum, daddr, dif, sdif,
+				      exact_dif);
+		if (score > hiscore) {
+			if (sk->sk_reuseport) {
+				phash = inet6_ehashfn(net, daddr, hnum,
+						      saddr, sport);
+				result = reuseport_select_sock(sk, phash,
+							       skb, doff);
+				if (result)
+					return result;
+			}
+			result = sk;
+			hiscore = score;
+		}
+	}
+
+	return result;
+}
+
 struct sock *inet6_lookup_listener(struct net *net,
 		struct inet_hashinfo *hashinfo,
 		struct sk_buff *skb, int doff,
@@ -134,31 +168,56 @@ struct sock *inet6_lookup_listener(struct net *net,
 {
 	unsigned int hash = inet_lhashfn(net, hnum);
 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
-	int score, hiscore = 0, matches = 0, reuseport = 0;
 	bool exact_dif = inet6_exact_dif_match(net, skb);
+	struct inet_listen_hashbucket *ilb2;
 	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	unsigned int hash2;
 	u32 phash = 0;
 
+	if (ilb->count <= 10 || !hashinfo->lhash2)
+		goto port_lookup;
+
+	/* Too many sk in the ilb bucket (which is hashed by port alone).
+	 * Try lhash2 (which is hashed by port and addr) instead.
+	 */
+
+	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	result = inet6_lhash2_lookup(net, ilb2, skb, doff,
+				     saddr, sport, daddr, hnum,
+				     dif, sdif);
+	if (result)
+		return result;
+
+	/* Lookup lhash2 with in6addr_any */
+
+	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	return inet6_lhash2_lookup(net, ilb2, skb, doff,
+				   saddr, sport, daddr, hnum,
+				   dif, sdif);
+
+port_lookup:
 	sk_for_each(sk, &ilb->head) {
 		score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
 		if (score > hiscore) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				phash = inet6_ehashfn(net, daddr, hnum,
 						      saddr, sport);
 				result = reuseport_select_sock(sk, phash,
 							       skb, doff);
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			hiscore = score;
-		} else if (score == hiscore && reuseport) {
-			matches++;
-			if (reciprocal_scale(phash, matches) == 0)
-				result = sk;
-			phash = next_pseudo_random32(phash);
 		}
 	}
 	return result;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 217683d40f12..92b8d8c75eed 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -107,16 +107,13 @@ enum {
 
 void fib6_update_sernum(struct rt6_info *rt)
 {
-	struct fib6_table *table = rt->rt6i_table;
 	struct net *net = dev_net(rt->dst.dev);
 	struct fib6_node *fn;
 
-	spin_lock_bh(&table->tb6_lock);
 	fn = rcu_dereference_protected(rt->rt6i_node,
-			lockdep_is_held(&table->tb6_lock));
+			lockdep_is_held(&rt->rt6i_table->tb6_lock));
 	if (fn)
 		fn->fn_sernum = fib6_new_sernum(net);
-	spin_unlock_bh(&table->tb6_lock);
 }
 
 /*
@@ -804,12 +801,6 @@ insert_above:
 	return ln;
 }
 
-static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
-{
-	return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
-	       RTF_GATEWAY;
-}
-
 static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
 {
 	int i;
@@ -898,7 +889,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 	ins = &fn->leaf;
 
 	for (iter = leaf; iter;
-	     iter = rcu_dereference_protected(iter->dst.rt6_next,
+	     iter = rcu_dereference_protected(iter->rt6_next,
 				lockdep_is_held(&rt->rt6i_table->tb6_lock))) {
 		/*
 		 *	Search for duplicates
@@ -955,7 +946,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 			break;
 
 next_iter:
-		ins = &iter->dst.rt6_next;
+		ins = &iter->rt6_next;
 	}
 
 	if (fallback_ins && !found) {
@@ -984,7 +975,7 @@ next_iter:
 					      &sibling->rt6i_siblings);
 				break;
 			}
-			sibling = rcu_dereference_protected(sibling->dst.rt6_next,
+			sibling = rcu_dereference_protected(sibling->rt6_next,
 				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
 		}
 		/* For each sibling in the list, increment the counter of
@@ -999,6 +990,7 @@ next_iter:
 			rt6i_nsiblings++;
 		}
 		BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
+		rt6_multipath_rebalance(temp_sibling);
 	}
 
 	/*
@@ -1014,7 +1006,7 @@ add:
 		if (err)
 			return err;
 
-		rcu_assign_pointer(rt->dst.rt6_next, iter);
+		rcu_assign_pointer(rt->rt6_next, iter);
 		atomic_inc(&rt->rt6i_ref);
 		rcu_assign_pointer(rt->rt6i_node, fn);
 		rcu_assign_pointer(*ins, rt);
@@ -1045,7 +1037,7 @@ add:
 
 		atomic_inc(&rt->rt6i_ref);
 		rcu_assign_pointer(rt->rt6i_node, fn);
-		rt->dst.rt6_next = iter->dst.rt6_next;
+		rt->rt6_next = iter->rt6_next;
 		rcu_assign_pointer(*ins, rt);
 		call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE,
 					  rt, extack);
@@ -1064,14 +1056,14 @@ add:
 
 		if (nsiblings) {
 			/* Replacing an ECMP route, remove all siblings */
-			ins = &rt->dst.rt6_next;
+			ins = &rt->rt6_next;
 			iter = rcu_dereference_protected(*ins,
 				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
 			while (iter) {
 				if (iter->rt6i_metric > rt->rt6i_metric)
 					break;
 				if (rt6_qualify_for_ecmp(iter)) {
-					*ins = iter->dst.rt6_next;
+					*ins = iter->rt6_next;
 					iter->rt6i_node = NULL;
 					fib6_purge_rt(iter, fn, info->nl_net);
 					if (rcu_access_pointer(fn->rr_ptr) == iter)
@@ -1080,7 +1072,7 @@ add:
 					nsiblings--;
 					info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
 				} else {
-					ins = &iter->dst.rt6_next;
+					ins = &iter->rt6_next;
 				}
 				iter = rcu_dereference_protected(*ins,
 					lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1107,8 +1099,8 @@ void fib6_force_start_gc(struct net *net)
 			  jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
 }
 
-static void fib6_update_sernum_upto_root(struct rt6_info *rt,
-					 int sernum)
+static void __fib6_update_sernum_upto_root(struct rt6_info *rt,
+					   int sernum)
 {
 	struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
 				lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1122,6 +1114,11 @@ static void fib6_update_sernum_upto_root(struct rt6_info *rt,
 	}
 }
 
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt)
+{
+	__fib6_update_sernum_upto_root(rt, fib6_new_sernum(net));
+}
+
 /*
  *	Add routing information to the routing tree.
  *	<destination addr>/<source addr>
@@ -1241,7 +1238,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
 
 	err = fib6_add_rt2node(fn, rt, info, mxc, extack);
 	if (!err) {
-		fib6_update_sernum_upto_root(rt, sernum);
+		__fib6_update_sernum_upto_root(rt, sernum);
 		fib6_start_gc(info->nl_net, rt);
 	}
 
@@ -1670,7 +1667,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
 	WARN_ON_ONCE(rt->rt6i_flags & RTF_CACHE);
 
 	/* Unlink it */
-	*rtp = rt->dst.rt6_next;
+	*rtp = rt->rt6_next;
 	rt->rt6i_node = NULL;
 	net->ipv6.rt6_stats->fib_rt_entries--;
 	net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1691,6 +1688,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
 			sibling->rt6i_nsiblings--;
 		rt->rt6i_nsiblings = 0;
 		list_del_init(&rt->rt6i_siblings);
+		rt6_multipath_rebalance(next_sibling);
 	}
 
 	/* Adjust walkers */
@@ -1698,7 +1696,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
 	FOR_WALKERS(net, w) {
 		if (w->state == FWS_C && w->leaf == rt) {
 			RT6_TRACE("walker %p adjusted by delroute\n", w);
-			w->leaf = rcu_dereference_protected(rt->dst.rt6_next,
+			w->leaf = rcu_dereference_protected(rt->rt6_next,
 					    lockdep_is_held(&table->tb6_lock));
 			if (!w->leaf)
 				w->state = FWS_U;
@@ -1762,7 +1760,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
 			fib6_del_route(table, fn, rtp, info);
 			return 0;
 		}
-		rtp_next = &cur->dst.rt6_next;
+		rtp_next = &cur->rt6_next;
 	}
 	return -ENOENT;
 }
@@ -1918,7 +1916,7 @@ static int fib6_clean_node(struct fib6_walker *w)
 
 	for_each_fib6_walker_rt(w) {
 		res = c->func(rt, c->arg);
-		if (res < 0) {
+		if (res == -1) {
 			w->leaf = rt;
 			res = fib6_del(rt, &info);
 			if (res) {
@@ -1931,6 +1929,12 @@ static int fib6_clean_node(struct fib6_walker *w)
 				continue;
 			}
 			return 0;
+		} else if (res == -2) {
+			if (WARN_ON(!rt->rt6i_nsiblings))
+				continue;
+			rt = list_last_entry(&rt->rt6i_siblings,
+					     struct rt6_info, rt6i_siblings);
+			continue;
 		}
 		WARN_ON(res != 0);
 	}
@@ -1942,7 +1946,8 @@ static int fib6_clean_node(struct fib6_walker *w)
  *	Convenient frontend to tree walker.
  *
  *	func is called on each route.
- *		It may return -1 -> delete this route.
+ *		It may return -2 -> skip multipath route.
+ *			      -1 -> delete this route.
  *		              0  -> continue walking
  */
 
@@ -2134,7 +2139,6 @@ static void fib6_net_exit(struct net *net)
 {
 	unsigned int i;
 
-	rt6_ifdown(net, NULL);
 	del_timer_sync(&net->ipv6.ip6_fib_timer);
 
 	for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
@@ -2173,8 +2177,8 @@ int __init fib6_init(void)
 	if (ret)
 		goto out_kmem_cache_create;
 
-	ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
-			      0);
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
+				   inet6_dump_fib, 0);
 	if (ret)
 		goto out_unregister_subsys;
 
@@ -2239,7 +2243,7 @@ static int ipv6_route_yield(struct fib6_walker *w)
 
 	do {
 		iter->w.leaf = rcu_dereference_protected(
-				iter->w.leaf->dst.rt6_next,
+				iter->w.leaf->rt6_next,
 				lockdep_is_held(&iter->tbl->tb6_lock));
 		iter->skip--;
 		if (!iter->skip && iter->w.leaf)
@@ -2305,7 +2309,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 	if (!v)
 		goto iter_table;
 
-	n = rcu_dereference_bh(((struct rt6_info *)v)->dst.rt6_next);
+	n = rcu_dereference_bh(((struct rt6_info *)v)->rt6_next);
 	if (n) {
 		++*pos;
 		return n;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 7f59c8fabeeb..3dab664ff503 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -836,7 +836,6 @@ static int ip6fl_seq_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip6fl_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	ip6fl_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 873549228ccb..05f070e123e4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -55,6 +55,8 @@
 #include <net/ip6_route.h>
 #include <net/ip6_tunnel.h>
 #include <net/gre.h>
+#include <net/erspan.h>
+#include <net/dst_metadata.h>
 
 
 static bool log_ecn_error = true;
@@ -68,11 +70,13 @@ static unsigned int ip6gre_net_id __read_mostly;
 struct ip6gre_net {
 	struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
 
+	struct ip6_tnl __rcu *collect_md_tun;
 	struct net_device *fb_tunnel_dev;
 };
 
 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
+static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
 static int ip6gre_tunnel_init(struct net_device *dev);
 static void ip6gre_tunnel_setup(struct net_device *dev);
 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -121,7 +125,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
 	unsigned int h1 = HASH_KEY(key);
 	struct ip6_tnl *t, *cand = NULL;
 	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
-	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+	int dev_type = (gre_proto == htons(ETH_P_TEB) ||
+			gre_proto == htons(ETH_P_ERSPAN)) ?
 		       ARPHRD_ETHER : ARPHRD_IP6GRE;
 	int score, cand_score = 4;
 
@@ -226,6 +231,10 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
 	if (cand)
 		return cand;
 
+	t = rcu_dereference(ign->collect_md_tun);
+	if (t && t->dev->flags & IFF_UP)
+		return t;
+
 	dev = ign->fb_tunnel_dev;
 	if (dev->flags & IFF_UP)
 		return netdev_priv(dev);
@@ -261,6 +270,9 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
 {
 	struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
 
+	if (t->parms.collect_md)
+		rcu_assign_pointer(ign->collect_md_tun, t);
+
 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
 	rcu_assign_pointer(*tp, t);
 }
@@ -270,6 +282,9 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
 	struct ip6_tnl __rcu **tp;
 	struct ip6_tnl *iter;
 
+	if (t->parms.collect_md)
+		rcu_assign_pointer(ign->collect_md_tun, NULL);
+
 	for (tp = ip6gre_bucket(ign, t);
 	     (iter = rtnl_dereference(*tp)) != NULL;
 	     tp = &iter->next) {
@@ -461,7 +476,101 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
 				      tpi->proto);
 	if (tunnel) {
-		ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+		if (tunnel->parms.collect_md) {
+			struct metadata_dst *tun_dst;
+			__be64 tun_id;
+			__be16 flags;
+
+			flags = tpi->flags;
+			tun_id = key32_to_tunnel_id(tpi->key);
+
+			tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
+			if (!tun_dst)
+				return PACKET_REJECT;
+
+			ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
+		} else {
+			ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+		}
+
+		return PACKET_RCVD;
+	}
+
+	return PACKET_REJECT;
+}
+
+static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+			 struct tnl_ptk_info *tpi)
+{
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *pkt_md;
+	const struct ipv6hdr *ipv6h;
+	struct ip6_tnl *tunnel;
+	u8 ver;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
+		return PACKET_REJECT;
+
+	ipv6h = ipv6_hdr(skb);
+	ershdr = (struct erspan_base_hdr *)skb->data;
+	ver = ershdr->ver;
+	tpi->key = cpu_to_be32(get_session_id(ershdr));
+
+	tunnel = ip6gre_tunnel_lookup(skb->dev,
+				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
+				      tpi->proto);
+	if (tunnel) {
+		int len = erspan_hdr_len(ver);
+
+		if (unlikely(!pskb_may_pull(skb, len)))
+			return PACKET_REJECT;
+
+		ershdr = (struct erspan_base_hdr *)skb->data;
+		pkt_md = (struct erspan_metadata *)(ershdr + 1);
+
+		if (__iptunnel_pull_header(skb, len,
+					   htons(ETH_P_TEB),
+					   false, false) < 0)
+			return PACKET_REJECT;
+
+		if (tunnel->parms.collect_md) {
+			struct metadata_dst *tun_dst;
+			struct ip_tunnel_info *info;
+			struct erspan_metadata *md;
+			__be64 tun_id;
+			__be16 flags;
+
+			tpi->flags |= TUNNEL_KEY;
+			flags = tpi->flags;
+			tun_id = key32_to_tunnel_id(tpi->key);
+
+			tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
+						  sizeof(*md));
+			if (!tun_dst)
+				return PACKET_REJECT;
+
+			info = &tun_dst->u.tun_info;
+			md = ip_tunnel_info_opts(info);
+
+			memcpy(md, pkt_md, sizeof(*md));
+			md->version = ver;
+			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+			info->options_len = sizeof(*md);
+
+			ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
+
+		} else {
+			tunnel->parms.erspan_ver = ver;
+
+			if (ver == 1) {
+				tunnel->parms.index = ntohl(pkt_md->u.index);
+			} else {
+				tunnel->parms.dir = pkt_md->u.md2.dir;
+				tunnel->parms.hwid = get_hwid(&pkt_md->u.md2);
+			}
+
+			ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+		}
 
 		return PACKET_RCVD;
 	}
@@ -482,9 +591,17 @@ static int gre_rcv(struct sk_buff *skb)
 	if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
 		goto drop;
 
+	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+		     tpi.proto == htons(ETH_P_ERSPAN2))) {
+		if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
+			return 0;
+		goto out;
+	}
+
 	if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
 		return 0;
 
+out:
 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 drop:
 	kfree_skb(skb);
@@ -497,6 +614,78 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
+static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
+				     struct net_device *dev,
+				     struct flowi6 *fl6, __u8 *dsfield,
+				     int *encap_limit)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct ip6_tnl *t = netdev_priv(dev);
+
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		*encap_limit = t->parms.encap_limit;
+
+	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		*dsfield = ipv4_get_dsfield(iph);
+	else
+		*dsfield = ip6_tclass(t->parms.flowinfo);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6->flowi6_mark = skb->mark;
+	else
+		fl6->flowi6_mark = t->parms.fwmark;
+
+	fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+}
+
+static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
+				    struct net_device *dev,
+				    struct flowi6 *fl6, __u8 *dsfield,
+				    int *encap_limit)
+{
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct ip6_tnl *t = netdev_priv(dev);
+	__u16 offset;
+
+	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+	/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+
+	if (offset > 0) {
+		struct ipv6_tlv_tnl_enc_lim *tel;
+
+		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+		if (tel->encap_limit == 0) {
+			icmpv6_send(skb, ICMPV6_PARAMPROB,
+				    ICMPV6_HDR_FIELD, offset + 2);
+			return -1;
+		}
+		*encap_limit = tel->encap_limit - 1;
+	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+		*encap_limit = t->parms.encap_limit;
+	}
+
+	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		*dsfield = ipv6_get_dsfield(ipv6h);
+	else
+		*dsfield = ip6_tclass(t->parms.flowinfo);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+		fl6->flowlabel |= ip6_flowlabel(ipv6h);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6->flowi6_mark = skb->mark;
+	else
+		fl6->flowi6_mark = t->parms.fwmark;
+
+	fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+	return 0;
+}
+
 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 			       struct net_device *dev, __u8 dsfield,
 			       struct flowi6 *fl6, int encap_limit,
@@ -518,8 +707,38 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 
 	/* Push GRE header. */
 	protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
-	gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
-			 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+
+	if (tunnel->parms.collect_md) {
+		struct ip_tunnel_info *tun_info;
+		const struct ip_tunnel_key *key;
+		__be16 flags;
+
+		tun_info = skb_tunnel_info(skb);
+		if (unlikely(!tun_info ||
+			     !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+			     ip_tunnel_info_af(tun_info) != AF_INET6))
+			return -EINVAL;
+
+		key = &tun_info->key;
+		memset(fl6, 0, sizeof(*fl6));
+		fl6->flowi6_proto = IPPROTO_GRE;
+		fl6->daddr = key->u.ipv6.dst;
+		fl6->flowlabel = key->label;
+		fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+		dsfield = key->tos;
+		flags = key->tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+		tunnel->tun_hlen = gre_calc_hlen(flags);
+
+		gre_build_header(skb, tunnel->tun_hlen,
+				 flags, protocol,
+				 tunnel_id_to_key32(tun_info->key.tun_id), 0);
+
+	} else {
+		gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+				 protocol, tunnel->parms.o_key,
+				 htonl(tunnel->o_seqno));
+	}
 
 	return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
 			    NEXTHDR_GRE);
@@ -528,30 +747,17 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
-	const struct iphdr  *iph = ip_hdr(skb);
 	int encap_limit = -1;
 	struct flowi6 fl6;
-	__u8 dsfield;
+	__u8 dsfield = 0;
 	__u32 mtu;
 	int err;
 
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
-	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		encap_limit = t->parms.encap_limit;
-
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-		dsfield = ipv4_get_dsfield(iph);
-	else
-		dsfield = ip6_tclass(t->parms.flowinfo);
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-		fl6.flowi6_mark = skb->mark;
-	else
-		fl6.flowi6_mark = t->parms.fwmark;
-
-	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+	if (!t->parms.collect_md)
+		prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
+					 &dsfield, &encap_limit);
 
 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
 	if (err)
@@ -575,46 +781,17 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 	int encap_limit = -1;
-	__u16 offset;
 	struct flowi6 fl6;
-	__u8 dsfield;
+	__u8 dsfield = 0;
 	__u32 mtu;
 	int err;
 
 	if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
 		return -1;
 
-	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
-	/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
-	ipv6h = ipv6_hdr(skb);
-
-	if (offset > 0) {
-		struct ipv6_tlv_tnl_enc_lim *tel;
-		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
-		if (tel->encap_limit == 0) {
-			icmpv6_send(skb, ICMPV6_PARAMPROB,
-				    ICMPV6_HDR_FIELD, offset + 2);
-			return -1;
-		}
-		encap_limit = tel->encap_limit - 1;
-	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		encap_limit = t->parms.encap_limit;
-
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-		dsfield = ipv6_get_dsfield(ipv6h);
-	else
-		dsfield = ip6_tclass(t->parms.flowinfo);
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
-		fl6.flowlabel |= ip6_flowlabel(ipv6h);
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-		fl6.flowi6_mark = skb->mark;
-	else
-		fl6.flowi6_mark = t->parms.fwmark;
-
-	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+	if (!t->parms.collect_md &&
+	    prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
+		return -1;
 
 	if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
 		return -1;
@@ -661,7 +838,8 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		encap_limit = t->parms.encap_limit;
 
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+	if (!t->parms.collect_md)
+		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 
 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
 	if (err)
@@ -706,6 +884,137 @@ tx_err:
 	return NETDEV_TX_OK;
 }
 
+static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+					 struct net_device *dev)
+{
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct dst_entry *dst = skb_dst(skb);
+	struct net_device_stats *stats;
+	bool truncate = false;
+	int encap_limit = -1;
+	__u8 dsfield = false;
+	struct flowi6 fl6;
+	int err = -EINVAL;
+	__u32 mtu;
+
+	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
+		goto tx_err;
+
+	if (gre_handle_offloads(skb, false))
+		goto tx_err;
+
+	if (skb->len > dev->mtu + dev->hard_header_len) {
+		pskb_trim(skb, dev->mtu + dev->hard_header_len);
+		truncate = true;
+	}
+
+	t->parms.o_flags &= ~TUNNEL_KEY;
+	IPCB(skb)->flags = 0;
+
+	/* For collect_md mode, derive fl6 from the tunnel key,
+	 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
+	 */
+	if (t->parms.collect_md) {
+		struct ip_tunnel_info *tun_info;
+		const struct ip_tunnel_key *key;
+		struct erspan_metadata *md;
+		__be32 tun_id;
+
+		tun_info = skb_tunnel_info(skb);
+		if (unlikely(!tun_info ||
+			     !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+			     ip_tunnel_info_af(tun_info) != AF_INET6))
+			return -EINVAL;
+
+		key = &tun_info->key;
+		memset(&fl6, 0, sizeof(fl6));
+		fl6.flowi6_proto = IPPROTO_GRE;
+		fl6.daddr = key->u.ipv6.dst;
+		fl6.flowlabel = key->label;
+		fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+		dsfield = key->tos;
+		md = ip_tunnel_info_opts(tun_info);
+		if (!md)
+			goto tx_err;
+
+		tun_id = tunnel_id_to_key32(key->tun_id);
+		if (md->version == 1) {
+			erspan_build_header(skb,
+					    ntohl(tun_id),
+					    ntohl(md->u.index), truncate,
+					    false);
+		} else if (md->version == 2) {
+			erspan_build_header_v2(skb,
+					       ntohl(tun_id),
+					       md->u.md2.dir,
+					       get_hwid(&md->u.md2),
+					       truncate, false);
+		}
+	} else {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+			prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
+						 &dsfield, &encap_limit);
+			break;
+		case htons(ETH_P_IPV6):
+			if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+				goto tx_err;
+			if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
+						     &dsfield, &encap_limit))
+				goto tx_err;
+			break;
+		default:
+			memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+			break;
+		}
+
+		if (t->parms.erspan_ver == 1)
+			erspan_build_header(skb, ntohl(t->parms.o_key),
+					    t->parms.index,
+					    truncate, false);
+		else
+			erspan_build_header_v2(skb, ntohl(t->parms.o_key),
+					       t->parms.dir,
+					       t->parms.hwid,
+					       truncate, false);
+		fl6.daddr = t->parms.raddr;
+	}
+
+	/* Push GRE header. */
+	gre_build_header(skb, 8, TUNNEL_SEQ,
+			 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+
+	/* TooBig packet may have updated dst->dev's mtu */
+	if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
+		dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
+	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+			   NEXTHDR_GRE);
+	if (err != 0) {
+		/* XXX: send ICMP error even if DF is not set. */
+		if (err == -EMSGSIZE) {
+			if (skb->protocol == htons(ETH_P_IP))
+				icmp_send(skb, ICMP_DEST_UNREACH,
+					  ICMP_FRAG_NEEDED, htonl(mtu));
+			else
+				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		}
+
+		goto tx_err;
+	}
+	return NETDEV_TX_OK;
+
+tx_err:
+	stats = &t->dev->stats;
+	stats->tx_errors++;
+	stats->tx_dropped++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
 {
 	struct net_device *dev = t->dev;
@@ -1079,6 +1388,10 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
 	if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		dev->mtu -= 8;
 
+	if (tunnel->parms.collect_md) {
+		dev->features |= NETIF_F_NETNS_LOCAL;
+		netif_keep_dst(dev);
+	}
 	ip6gre_tnl_init_features(dev);
 
 	return 0;
@@ -1095,6 +1408,9 @@ static int ip6gre_tunnel_init(struct net_device *dev)
 
 	tunnel = netdev_priv(dev);
 
+	if (tunnel->parms.collect_md)
+		return 0;
+
 	memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
 	memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
 
@@ -1117,7 +1433,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
 	dev_hold(dev);
 }
 
-
 static struct inet6_protocol ip6gre_protocol __read_mostly = {
 	.handler     = gre_rcv,
 	.err_handler = ip6gre_err,
@@ -1132,7 +1447,8 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
 
 	for_each_netdev_safe(net, dev, aux)
 		if (dev->rtnl_link_ops == &ip6gre_link_ops ||
-		    dev->rtnl_link_ops == &ip6gre_tap_ops)
+		    dev->rtnl_link_ops == &ip6gre_tap_ops ||
+		    dev->rtnl_link_ops == &ip6erspan_tap_ops)
 			unregister_netdevice_queue(dev, head);
 
 	for (prio = 0; prio < 4; prio++) {
@@ -1254,6 +1570,70 @@ out:
 	return ip6gre_tunnel_validate(tb, data, extack);
 }
 
+static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
+				  struct netlink_ext_ack *extack)
+{
+	__be16 flags = 0;
+	int ret, ver = 0;
+
+	if (!data)
+		return 0;
+
+	ret = ip6gre_tap_validate(tb, data, extack);
+	if (ret)
+		return ret;
+
+	/* ERSPAN should only have GRE sequence and key flag */
+	if (data[IFLA_GRE_OFLAGS])
+		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+	if (data[IFLA_GRE_IFLAGS])
+		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+	if (!data[IFLA_GRE_COLLECT_METADATA] &&
+	    flags != (GRE_SEQ | GRE_KEY))
+		return -EINVAL;
+
+	/* ERSPAN Session ID only has 10-bit. Since we reuse
+	 * 32-bit key field as ID, check it's range.
+	 */
+	if (data[IFLA_GRE_IKEY] &&
+	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
+		return -EINVAL;
+
+	if (data[IFLA_GRE_OKEY] &&
+	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
+		return -EINVAL;
+
+	if (data[IFLA_GRE_ERSPAN_VER]) {
+		ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+		if (ver != 1 && ver != 2)
+			return -EINVAL;
+	}
+
+	if (ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX]) {
+			u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+
+			if (index & ~INDEX_MASK)
+				return -EINVAL;
+		}
+	} else if (ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR]) {
+			u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+
+			if (dir & ~(DIR_MASK >> DIR_OFFSET))
+				return -EINVAL;
+		}
+
+		if (data[IFLA_GRE_ERSPAN_HWID]) {
+			u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+
+			if (hwid & ~(HWID_MASK >> HWID_OFFSET))
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
 
 static void ip6gre_netlink_parms(struct nlattr *data[],
 				struct __ip6_tnl_parm *parms)
@@ -1300,6 +1680,22 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 
 	if (data[IFLA_GRE_FWMARK])
 		parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
+
+	if (data[IFLA_GRE_COLLECT_METADATA])
+		parms->collect_md = true;
+
+	if (data[IFLA_GRE_ERSPAN_VER])
+		parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+	if (parms->erspan_ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX])
+			parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+	} else if (parms->erspan_ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR])
+			parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+		if (data[IFLA_GRE_ERSPAN_HWID])
+			parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+	}
 }
 
 static int ip6gre_tap_init(struct net_device *dev)
@@ -1326,6 +1722,59 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
 	.ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
+static int ip6erspan_tap_init(struct net_device *dev)
+{
+	struct ip6_tnl *tunnel;
+	int t_hlen;
+	int ret;
+
+	tunnel = netdev_priv(dev);
+
+	tunnel->dev = dev;
+	tunnel->net = dev_net(dev);
+	strcpy(tunnel->parms.name, dev->name);
+
+	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+	if (ret) {
+		free_percpu(dev->tstats);
+		dev->tstats = NULL;
+		return ret;
+	}
+
+	tunnel->tun_hlen = 8;
+	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+		       erspan_hdr_len(tunnel->parms.erspan_ver);
+	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+
+	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+	dev->mtu = ETH_DATA_LEN - t_hlen;
+	if (dev->type == ARPHRD_ETHER)
+		dev->mtu -= ETH_HLEN;
+	if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		dev->mtu -= 8;
+
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	tunnel = netdev_priv(dev);
+	ip6gre_tnl_link_config(tunnel, 1);
+
+	return 0;
+}
+
+static const struct net_device_ops ip6erspan_netdev_ops = {
+	.ndo_init =		ip6erspan_tap_init,
+	.ndo_uninit =		ip6gre_tunnel_uninit,
+	.ndo_start_xmit =	ip6erspan_tunnel_xmit,
+	.ndo_set_mac_address =	eth_mac_addr,
+	.ndo_validate_addr =	eth_validate_addr,
+	.ndo_change_mtu =	ip6_tnl_change_mtu,
+	.ndo_get_stats64 =	ip_tunnel_get_stats64,
+	.ndo_get_iflink =	ip6_tnl_get_iflink,
+};
+
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
@@ -1396,8 +1845,13 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
 
 	ip6gre_netlink_parms(data, &nt->parms);
 
-	if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
-		return -EEXIST;
+	if (nt->parms.collect_md) {
+		if (rtnl_dereference(ign->collect_md_tun))
+			return -EEXIST;
+	} else {
+		if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+			return -EEXIST;
+	}
 
 	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
 		eth_hw_addr_random(dev);
@@ -1500,8 +1954,12 @@ static size_t ip6gre_get_size(const struct net_device *dev)
 		nla_total_size(2) +
 		/* IFLA_GRE_ENCAP_DPORT */
 		nla_total_size(2) +
+		/* IFLA_GRE_COLLECT_METADATA */
+		nla_total_size(0) +
 		/* IFLA_GRE_FWMARK */
 		nla_total_size(4) +
+		/* IFLA_GRE_ERSPAN_INDEX */
+		nla_total_size(4) +
 		0;
 }
 
@@ -1523,7 +1981,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 	    nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
 	    nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
 	    nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
-	    nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
+	    nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
+	    nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
 		goto nla_put_failure;
 
 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -1536,6 +1995,24 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 			t->encap.flags))
 		goto nla_put_failure;
 
+	if (p->collect_md) {
+		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+			goto nla_put_failure;
+	}
+
+	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
+		goto nla_put_failure;
+
+	if (p->erspan_ver == 1) {
+		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+			goto nla_put_failure;
+	} else if (p->erspan_ver == 2) {
+		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
+			goto nla_put_failure;
+		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
+			goto nla_put_failure;
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -1558,9 +2035,28 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
 	[IFLA_GRE_ENCAP_FLAGS]  = { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_SPORT]  = { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
+	[IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
 	[IFLA_GRE_FWMARK]       = { .type = NLA_U32 },
+	[IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
+	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
 };
 
+static void ip6erspan_tap_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &ip6erspan_netdev_ops;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ip6gre_dev_free;
+
+	dev->features |= NETIF_F_NETNS_LOCAL;
+	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	netif_keep_dst(dev);
+}
+
 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
 	.kind		= "ip6gre",
 	.maxtype	= IFLA_GRE_MAX,
@@ -1590,6 +2086,20 @@ static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
 	.get_link_net	= ip6_tnl_get_link_net,
 };
 
+static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
+	.kind		= "ip6erspan",
+	.maxtype	= IFLA_GRE_MAX,
+	.policy		= ip6gre_policy,
+	.priv_size	= sizeof(struct ip6_tnl),
+	.setup		= ip6erspan_tap_setup,
+	.validate	= ip6erspan_tap_validate,
+	.newlink	= ip6gre_newlink,
+	.changelink	= ip6gre_changelink,
+	.get_size	= ip6gre_get_size,
+	.fill_info	= ip6gre_fill_info,
+	.get_link_net	= ip6_tnl_get_link_net,
+};
+
 /*
  *	And now the modules code and kernel interface.
  */
@@ -1618,9 +2128,15 @@ static int __init ip6gre_init(void)
 	if (err < 0)
 		goto tap_ops_failed;
 
+	err = rtnl_link_register(&ip6erspan_tap_ops);
+	if (err < 0)
+		goto erspan_link_failed;
+
 out:
 	return err;
 
+erspan_link_failed:
+	rtnl_link_unregister(&ip6gre_tap_ops);
 tap_ops_failed:
 	rtnl_link_unregister(&ip6gre_link_ops);
 rtnl_link_failed:
@@ -1634,6 +2150,7 @@ static void __exit ip6gre_fini(void)
 {
 	rtnl_link_unregister(&ip6gre_tap_ops);
 	rtnl_link_unregister(&ip6gre_link_ops);
+	rtnl_link_unregister(&ip6erspan_tap_ops);
 	inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
 	unregister_pernet_device(&ip6gre_net_ops);
 }
@@ -1645,4 +2162,5 @@ MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
 MODULE_ALIAS_RTNL_LINK("ip6gre");
 MODULE_ALIAS_RTNL_LINK("ip6gretap");
+MODULE_ALIAS_RTNL_LINK("ip6erspan");
 MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3763dc01e374..997c7f19ad62 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
 		return ret;
 	}
 
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+	/* Policy lookup after SNAT yielded a new policy */
+	if (skb_dst(skb)->xfrm) {
+		IPCB(skb)->flags |= IPSKB_REROUTED;
+		return dst_output(net, sk, skb);
+	}
+#endif
+
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
@@ -370,7 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
 	return dst_output(net, sk, skb);
 }
 
-static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 {
 	unsigned int mtu;
 	struct inet6_dev *idev;
@@ -390,6 +398,7 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 	return mtu;
 }
+EXPORT_SYMBOL_GPL(ip6_dst_mtu_forward);
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
@@ -1209,7 +1218,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
 		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
 	else
 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
+			READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
 	if (np->frag_size < mtu) {
 		if (np->frag_size)
 			mtu = np->frag_size;
@@ -1217,7 +1226,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
 	if (mtu < IPV6_MIN_MTU)
 		return -EINVAL;
 	cork->base.fragsize = mtu;
-	if (dst_allfrag(rt->dst.path))
+	if (dst_allfrag(xfrm_dst_path(&rt->dst)))
 		cork->base.flags |= IPCORK_ALLFRAG;
 	cork->base.length = 0;
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1ee5584c3555..4b15fe928278 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -860,7 +860,7 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
 		struct metadata_dst *tun_dst,
 		bool log_ecn_err)
 {
-	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
+	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
 			     log_ecn_err);
 }
 EXPORT_SYMBOL(ip6_tnl_rcv);
@@ -978,6 +978,9 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
 	int ret = 0;
 	struct net *net = t->net;
 
+	if (t->parms.collect_md)
+		return 1;
+
 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 8c184f84f353..fa3ae1cb50d3 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -626,6 +626,7 @@ static void vti6_link_config(struct ip6_tnl *t)
 {
 	struct net_device *dev = t->dev;
 	struct __ip6_tnl_parm *p = &t->parms;
+	struct net_device *tdev = NULL;
 
 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -638,6 +639,25 @@ static void vti6_link_config(struct ip6_tnl *t)
 		dev->flags |= IFF_POINTOPOINT;
 	else
 		dev->flags &= ~IFF_POINTOPOINT;
+
+	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+		int strict = (ipv6_addr_type(&p->raddr) &
+			      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+		struct rt6_info *rt = rt6_lookup(t->net,
+						 &p->raddr, &p->laddr,
+						 p->link, strict);
+
+		if (rt)
+			tdev = rt->dst.dev;
+		ip6_rt_put(rt);
+	}
+
+	if (!tdev && p->link)
+		tdev = __dev_get_by_index(t->net, p->link);
+
+	if (tdev)
+		dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
+				 IPV6_MIN_MTU);
 }
 
 /**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index a2e1a864eb46..9f6cace9c817 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -477,7 +477,6 @@ static int ip6mr_vif_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip6mr_vif_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ip6mr_vif_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -495,6 +494,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 		return ERR_PTR(-ENOENT);
 
 	it->mrt = mrt;
+	it->cache = NULL;
 	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
 		: SEQ_START_TOKEN;
 }
@@ -609,7 +609,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip6mr_mfc_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ipmr_mfc_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1425,10 +1424,13 @@ int __init ip6_mr_init(void)
 		goto add_proto_fail;
 	}
 #endif
-	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
-		      ip6mr_rtm_dumproute, 0);
-	return 0;
+	err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
+				   NULL, ip6mr_rtm_dumproute, 0);
+	if (err == 0)
+		return 0;
+
 #ifdef CONFIG_IPV6_PIMSM_V2
+	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
 add_proto_fail:
 	unregister_netdevice_notifier(&ip6_mr_notifier);
 #endif
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 844642682b83..6a5d0e39bb87 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1655,8 +1655,6 @@ static void mld_sendpack(struct sk_buff *skb)
 	if (err)
 		goto err_out;
 
-	payload_len = skb->len;
-
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
 		      dst_output);
@@ -2758,7 +2756,6 @@ static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp6_mc_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp6_mc_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
@@ -2913,7 +2910,6 @@ static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp6_mcf_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp6_mcf_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index b3cea200c85e..f61a5b613b52 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -566,6 +566,11 @@ static void ndisc_send_unsol_na(struct net_device *dev)
 
 	read_lock_bh(&idev->lock);
 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
+		/* skip tentative addresses until dad completes */
+		if (ifa->flags & IFA_F_TENTATIVE &&
+		    !(ifa->flags & IFA_F_OPTIMISTIC))
+			continue;
+
 		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr,
 			      /*router=*/ !!idev->cnf.forwarding,
 			      /*solicited=*/ false, /*override=*/ true,
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39970e212ad5..d95ceca7ff8f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -68,32 +68,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ip6_route_me_harder);
 
-/*
- * Extra routing may needed on local out, as the QUEUE target never
- * returns control to the table.
- */
-
-struct ip6_rt_info {
-	struct in6_addr daddr;
-	struct in6_addr saddr;
-	u_int32_t mark;
-};
-
-static void nf_ip6_saveroute(const struct sk_buff *skb,
-			     struct nf_queue_entry *entry)
-{
-	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
-	if (entry->state.hook == NF_INET_LOCAL_OUT) {
-		const struct ipv6hdr *iph = ipv6_hdr(skb);
-
-		rt_info->daddr = iph->daddr;
-		rt_info->saddr = iph->saddr;
-		rt_info->mark = skb->mark;
-	}
-}
-
-static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
+static int nf_ip6_reroute(struct sk_buff *skb,
 			  const struct nf_queue_entry *entry)
 {
 	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -103,7 +78,7 @@ static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
 		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
 		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
 		    skb->mark != rt_info->mark)
-			return ip6_route_me_harder(net, skb);
+			return ip6_route_me_harder(entry->state.net, skb);
 	}
 	return 0;
 }
@@ -190,25 +165,19 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
 };
 
 static const struct nf_ipv6_ops ipv6ops = {
-	.chk_addr	= ipv6_chk_addr,
-	.route_input    = ip6_route_input,
-	.fragment	= ip6_fragment
-};
-
-static const struct nf_afinfo nf_ip6_afinfo = {
-	.family			= AF_INET6,
+	.chk_addr		= ipv6_chk_addr,
+	.route_input    	= ip6_route_input,
+	.fragment		= ip6_fragment,
 	.checksum		= nf_ip6_checksum,
 	.checksum_partial	= nf_ip6_checksum_partial,
 	.route			= nf_ip6_route,
-	.saveroute		= nf_ip6_saveroute,
 	.reroute		= nf_ip6_reroute,
-	.route_key_size		= sizeof(struct ip6_rt_info),
 };
 
 int __init ipv6_netfilter_init(void)
 {
 	RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
-	return nf_register_afinfo(&nf_ip6_afinfo);
+	return 0;
 }
 
 /* This can be called from inet6_init() on errors, so it cannot
@@ -217,5 +186,4 @@ int __init ipv6_netfilter_init(void)
 void ipv6_netfilter_fini(void)
 {
 	RCU_INIT_POINTER(nf_ipv6_ops, NULL);
-	nf_unregister_afinfo(&nf_ip6_afinfo);
 }
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6acb2eecd986..4a634b7a2c80 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -71,6 +71,15 @@ config NFT_FIB_IPV6
 endif # NF_TABLES_IPV6
 endif # NF_TABLES
 
+config NF_FLOW_TABLE_IPV6
+	tristate "Netfilter flow table IPv6 module"
+	depends on NF_CONNTRACK && NF_TABLES
+	select NF_FLOW_TABLE
+	help
+	  This option adds the flow table IPv6 support.
+
+	  To compile it as a module, choose M here.
+
 config NF_DUP_IPV6
 	tristate "Netfilter IPv6 packet duplication to alternate destination"
 	depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -232,6 +241,15 @@ config IP6_NF_MATCH_RT
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_MATCH_SRH
+        tristate '"srh" Segment Routing header match support'
+        depends on NETFILTER_ADVANCED
+        help
+          srh matching allows you to match packets based on the segment
+	  routing header of the packet.
+
+          To compile it as a module, choose M here.  If unsure, say N.
+
 # The targets
 config IP6_NF_TARGET_HL
 	tristate '"HL" hoplimit target support'
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c6ee0cdd0ba9..d984057b8395 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -45,6 +45,9 @@ obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
 obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o
 
+# flow table support
+obj-$(CONFIG_NF_FLOW_TABLE_IPV6) += nf_flow_table_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
@@ -54,6 +57,7 @@ obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
 obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
+obj-$(CONFIG_IP6_NF_MATCH_SRH) += ip6t_srh.o
 
 # targets
 obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 66a8c69a3db4..af4c917e0836 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -986,9 +986,8 @@ static int get_info(struct net *net, void __user *user,
 	if (compat)
 		xt_compat_lock(AF_INET6);
 #endif
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
-				    "ip6table_%s", name);
-	if (t) {
+	t = xt_request_find_table_lock(net, AF_INET6, name);
+	if (!IS_ERR(t)) {
 		struct ip6t_getinfo info;
 		const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
@@ -1018,7 +1017,7 @@ static int get_info(struct net *net, void __user *user,
 		xt_table_unlock(t);
 		module_put(t->me);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
 	if (compat)
 		xt_compat_unlock(AF_INET6);
@@ -1044,7 +1043,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, AF_INET6, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		struct xt_table_info *private = t->private;
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
@@ -1055,7 +1054,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	return ret;
 }
@@ -1078,10 +1077,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 		goto out;
 	}
 
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
-				    "ip6table_%s", name);
-	if (!t) {
-		ret = -ENOENT;
+	t = xt_request_find_table_lock(net, AF_INET6, name);
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free_newinfo_counters_untrans;
 	}
 
@@ -1194,8 +1192,8 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
 	if (IS_ERR(paddc))
 		return PTR_ERR(paddc);
 	t = xt_find_table_lock(net, AF_INET6, tmp.name);
-	if (!t) {
-		ret = -ENOENT;
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free;
 	}
 
@@ -1631,7 +1629,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
 
 	xt_compat_lock(AF_INET6);
 	t = xt_find_table_lock(net, AF_INET6, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 		ret = compat_table_info(private, &info);
@@ -1645,7 +1643,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	xt_compat_unlock(AF_INET6);
 	return ret;
@@ -1949,7 +1947,6 @@ static int __init ip6_tables_init(void)
 	if (ret < 0)
 		goto err5;
 
-	pr_info("(C) 2000-2006 Netfilter Core Team\n");
 	return 0;
 
 err5:
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
new file mode 100644
index 000000000000..9642164107ce
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -0,0 +1,161 @@
+/* Kernel module to match Segment Routing Header (SRH) parameters. */
+
+/* Author:
+ * Ahmed Abdelsalam <amsalam20@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version 2
+ *	of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+#include <net/ipv6.h>
+#include <net/seg6.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv6/ip6t_srh.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+/* Test a struct->mt_invflags and a boolean for inequality */
+#define NF_SRH_INVF(ptr, flag, boolean)	\
+	((boolean) ^ !!((ptr)->mt_invflags & (flag)))
+
+static bool srh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct ip6t_srh *srhinfo = par->matchinfo;
+	struct ipv6_sr_hdr *srh;
+	struct ipv6_sr_hdr _srh;
+	int hdrlen, srhoff = 0;
+
+	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
+		return false;
+	srh = skb_header_pointer(skb, srhoff, sizeof(_srh), &_srh);
+	if (!srh)
+		return false;
+
+	hdrlen = ipv6_optlen(srh);
+	if (skb->len - srhoff < hdrlen)
+		return false;
+
+	if (srh->type != IPV6_SRCRT_TYPE_4)
+		return false;
+
+	if (srh->segments_left > srh->first_segment)
+		return false;
+
+	/* Next Header matching */
+	if (srhinfo->mt_flags & IP6T_SRH_NEXTHDR)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NEXTHDR,
+				!(srh->nexthdr == srhinfo->next_hdr)))
+			return false;
+
+	/* Header Extension Length matching */
+	if (srhinfo->mt_flags & IP6T_SRH_LEN_EQ)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_EQ,
+				!(srh->hdrlen == srhinfo->hdr_len)))
+			return false;
+
+	if (srhinfo->mt_flags & IP6T_SRH_LEN_GT)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_GT,
+				!(srh->hdrlen > srhinfo->hdr_len)))
+			return false;
+
+	if (srhinfo->mt_flags & IP6T_SRH_LEN_LT)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_LT,
+				!(srh->hdrlen < srhinfo->hdr_len)))
+			return false;
+
+	/* Segments Left matching */
+	if (srhinfo->mt_flags & IP6T_SRH_SEGS_EQ)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_EQ,
+				!(srh->segments_left == srhinfo->segs_left)))
+			return false;
+
+	if (srhinfo->mt_flags & IP6T_SRH_SEGS_GT)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_GT,
+				!(srh->segments_left > srhinfo->segs_left)))
+			return false;
+
+	if (srhinfo->mt_flags & IP6T_SRH_SEGS_LT)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_LT,
+				!(srh->segments_left < srhinfo->segs_left)))
+			return false;
+
+	/**
+	 * Last Entry matching
+	 * Last_Entry field was introduced in revision 6 of the SRH draft.
+	 * It was called First_Segment in the previous revision
+	 */
+	if (srhinfo->mt_flags & IP6T_SRH_LAST_EQ)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_EQ,
+				!(srh->first_segment == srhinfo->last_entry)))
+			return false;
+
+	if (srhinfo->mt_flags & IP6T_SRH_LAST_GT)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_GT,
+				!(srh->first_segment > srhinfo->last_entry)))
+			return false;
+
+	if (srhinfo->mt_flags & IP6T_SRH_LAST_LT)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_LT,
+				!(srh->first_segment < srhinfo->last_entry)))
+			return false;
+
+	/**
+	 * Tag matchig
+	 * Tag field was introduced in revision 6 of the SRH draft.
+	 */
+	if (srhinfo->mt_flags & IP6T_SRH_TAG)
+		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_TAG,
+				!(srh->tag == srhinfo->tag)))
+			return false;
+	return true;
+}
+
+static int srh_mt6_check(const struct xt_mtchk_param *par)
+{
+	const struct ip6t_srh *srhinfo = par->matchinfo;
+
+	if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
+		pr_err("unknown srh match flags  %X\n", srhinfo->mt_flags);
+		return -EINVAL;
+	}
+
+	if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
+		pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_match srh_mt6_reg __read_mostly = {
+	.name		= "srh",
+	.family		= NFPROTO_IPV6,
+	.match		= srh_mt6,
+	.matchsize	= sizeof(struct ip6t_srh),
+	.checkentry	= srh_mt6_check,
+	.me		= THIS_MODULE,
+};
+
+static int __init srh_mt6_init(void)
+{
+	return xt_register_match(&srh_mt6_reg);
+}
+
+static void __exit srh_mt6_exit(void)
+{
+	xt_unregister_match(&srh_mt6_reg);
+}
+
+module_init(srh_mt6_init);
+module_exit(srh_mt6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: IPv6 Segment Routing Header match");
+MODULE_AUTHOR("Ahmed Abdelsalam <amsalam20@gmail.com>");
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 2b1a9dcdbcb3..b0524b18c4fb 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,14 +42,6 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 	u_int8_t hop_limit;
 	u_int32_t flowlabel, mark;
 	int err;
-#if 0
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		net_warn_ratelimited("ip6t_hook: happy cracking\n");
-		return NF_ACCEPT;
-	}
-#endif
 
 	/* save source/dest address, mark, hoplimit, flowlabel, priority,  */
 	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 991512576c8c..47306e45a80a 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -74,6 +74,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 	{
 		.hook		= ip6table_nat_in,
 		.pf		= NFPROTO_IPV6,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP6_PRI_NAT_DST,
 	},
@@ -81,6 +82,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 	{
 		.hook		= ip6table_nat_out,
 		.pf		= NFPROTO_IPV6,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP6_PRI_NAT_SRC,
 	},
@@ -88,12 +90,14 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 	{
 		.hook		= ip6table_nat_local_fn,
 		.pf		= NFPROTO_IPV6,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_NAT_DST,
 	},
 	/* After packet filtering, change source */
 	{
 		.hook		= ip6table_nat_fn,
+		.nat_hook	= true,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_NAT_SRC,
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index d4bc56443dc1..710fa0806c37 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <linux/slab.h>
@@ -11,6 +12,10 @@
 
 static int __net_init ip6table_raw_table_init(struct net *net);
 
+static bool raw_before_defrag __read_mostly;
+MODULE_PARM_DESC(raw_before_defrag, "Enable raw table before defrag");
+module_param(raw_before_defrag, bool, 0000);
+
 static const struct xt_table packet_raw = {
 	.name = "raw",
 	.valid_hooks = RAW_VALID_HOOKS,
@@ -20,6 +25,15 @@ static const struct xt_table packet_raw = {
 	.table_init = ip6table_raw_table_init,
 };
 
+static const struct xt_table packet_raw_before_defrag = {
+	.name = "raw",
+	.valid_hooks = RAW_VALID_HOOKS,
+	.me = THIS_MODULE,
+	.af = NFPROTO_IPV6,
+	.priority = NF_IP6_PRI_RAW_BEFORE_DEFRAG,
+	.table_init = ip6table_raw_table_init,
+};
+
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_raw_hook(void *priv, struct sk_buff *skb,
@@ -33,15 +47,19 @@ static struct nf_hook_ops *rawtable_ops __read_mostly;
 static int __net_init ip6table_raw_table_init(struct net *net)
 {
 	struct ip6t_replace *repl;
+	const struct xt_table *table = &packet_raw;
 	int ret;
 
+	if (raw_before_defrag)
+		table = &packet_raw_before_defrag;
+
 	if (net->ipv6.ip6table_raw)
 		return 0;
 
-	repl = ip6t_alloc_initial_table(&packet_raw);
+	repl = ip6t_alloc_initial_table(table);
 	if (repl == NULL)
 		return -ENOMEM;
-	ret = ip6t_register_table(net, &packet_raw, repl, rawtable_ops,
+	ret = ip6t_register_table(net, table, repl, rawtable_ops,
 				  &net->ipv6.ip6table_raw);
 	kfree(repl);
 	return ret;
@@ -62,9 +80,16 @@ static struct pernet_operations ip6table_raw_net_ops = {
 static int __init ip6table_raw_init(void)
 {
 	int ret;
+	const struct xt_table *table = &packet_raw;
+
+	if (raw_before_defrag) {
+		table = &packet_raw_before_defrag;
+
+		pr_info("Enabling raw table before defrag\n");
+	}
 
 	/* Register hooks */
-	rawtable_ops = xt_hook_ops_alloc(&packet_raw, ip6table_raw_hook);
+	rawtable_ops = xt_hook_ops_alloc(table, ip6table_raw_hook);
 	if (IS_ERR(rawtable_ops))
 		return PTR_ERR(rawtable_ops);
 
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3b80a38f62b8..11a313fd9273 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -176,11 +176,6 @@ static unsigned int ipv6_conntrack_local(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct ipv6hdr)) {
-		net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
-		return NF_ACCEPT;
-	}
 	return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
 }
 
@@ -368,7 +363,7 @@ static struct nf_sockopt_ops so_getorigdst6 = {
 	.owner		= THIS_MODULE,
 };
 
-static struct nf_conntrack_l4proto *builtin_l4proto6[] = {
+static const struct nf_conntrack_l4proto * const builtin_l4proto6[] = {
 	&nf_conntrack_l4proto_tcp6,
 	&nf_conntrack_l4proto_udp6,
 	&nf_conntrack_l4proto_icmpv6,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3ac0d826afc4..2548e2c8aedd 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -27,7 +27,7 @@
 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
 #include <net/netfilter/nf_log.h>
 
-static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
 
 static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
 {
@@ -352,7 +352,7 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.icmpv6.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_ICMPV6,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 977d8900cfd1..ce53dcfda88a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -231,7 +231,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
 
 	if ((unsigned int)end > IPV6_MAXPLEN) {
 		pr_debug("offset is too large.\n");
-		return -1;
+		return -EINVAL;
 	}
 
 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
@@ -264,7 +264,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
 			 * this case. -DaveM
 			 */
 			pr_debug("end of fragment not rounded to 8 bytes.\n");
-			return -1;
+			return -EPROTO;
 		}
 		if (end > fq->q.len) {
 			/* Some bits beyond end -> corruption. */
@@ -358,7 +358,7 @@ found:
 discard_fq:
 	inet_frag_kill(&fq->q, &nf_frags);
 err:
-	return -1;
+	return -EINVAL;
 }
 
 /*
@@ -567,6 +567,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
 
 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 {
+	u16 savethdr = skb->transport_header;
 	struct net_device *dev = skb->dev;
 	int fhoff, nhoff, ret;
 	struct frag_hdr *fhdr;
@@ -600,8 +601,12 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 
 	spin_lock_bh(&fq->q.lock);
 
-	if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
-		ret = -EINVAL;
+	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
+	if (ret < 0) {
+		if (ret == -EPROTO) {
+			skb->transport_header = savethdr;
+			ret = 0;
+		}
 		goto out_unlock;
 	}
 
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index b326da59257f..c87b48359e8f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -63,6 +63,9 @@ static unsigned int ipv6_defrag(void *priv,
 	/* Previously seen (loopback)?	*/
 	if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
 		return NF_ACCEPT;
+
+	if (skb->_nfct == IP_CT_UNTRACKED)
+		return NF_ACCEPT;
 #endif
 
 	err = nf_ct_frag6_gather(state->net, skb,
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
new file mode 100644
index 000000000000..fff21602875a
--- /dev/null
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -0,0 +1,277 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+/* For layer 4 checksum field offset. */
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
+				struct in6_addr *addr,
+				struct in6_addr *new_addr)
+{
+	struct tcphdr *tcph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+		return -1;
+
+	tcph = (void *)(skb_network_header(skb) + thoff);
+	inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
+				  new_addr->s6_addr32, true);
+
+	return 0;
+}
+
+static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
+				struct in6_addr *addr,
+				struct in6_addr *new_addr)
+{
+	struct udphdr *udph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
+		return -1;
+
+	udph = (void *)(skb_network_header(skb) + thoff);
+	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+		inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
+					  new_addr->s6_addr32, true);
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	}
+
+	return 0;
+}
+
+static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
+				    unsigned int thoff, struct in6_addr *addr,
+				    struct in6_addr *new_addr)
+{
+	switch (ip6h->nexthdr) {
+	case IPPROTO_TCP:
+		if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	case IPPROTO_UDP:
+		if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	}
+
+	return 0;
+}
+
+static int nf_flow_snat_ipv6(const struct flow_offload *flow,
+			     struct sk_buff *skb, struct ipv6hdr *ip6h,
+			     unsigned int thoff,
+			     enum flow_offload_tuple_dir dir)
+{
+	struct in6_addr addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = ip6h->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
+		ip6h->saddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = ip6h->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
+		ip6h->daddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
+}
+
+static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
+			     struct sk_buff *skb, struct ipv6hdr *ip6h,
+			     unsigned int thoff,
+			     enum flow_offload_tuple_dir dir)
+{
+	struct in6_addr addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = ip6h->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
+		ip6h->daddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = ip6h->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
+		ip6h->saddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
+}
+
+static int nf_flow_nat_ipv6(const struct flow_offload *flow,
+			    struct sk_buff *skb,
+			    enum flow_offload_tuple_dir dir)
+{
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	unsigned int thoff = sizeof(*ip6h);
+
+	if (flow->flags & FLOW_OFFLOAD_SNAT &&
+	    (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
+	     nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+		return -1;
+	if (flow->flags & FLOW_OFFLOAD_DNAT &&
+	    (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
+	     nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+		return -1;
+
+	return 0;
+}
+
+static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
+			      struct flow_offload_tuple *tuple)
+{
+	struct flow_ports *ports;
+	struct ipv6hdr *ip6h;
+	unsigned int thoff;
+
+	if (!pskb_may_pull(skb, sizeof(*ip6h)))
+		return -1;
+
+	ip6h = ipv6_hdr(skb);
+
+	if (ip6h->nexthdr != IPPROTO_TCP &&
+	    ip6h->nexthdr != IPPROTO_UDP)
+		return -1;
+
+	thoff = sizeof(*ip6h);
+	if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+		return -1;
+
+	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+
+	tuple->src_v6		= ip6h->saddr;
+	tuple->dst_v6		= ip6h->daddr;
+	tuple->src_port		= ports->source;
+	tuple->dst_port		= ports->dest;
+	tuple->l3proto		= AF_INET6;
+	tuple->l4proto		= ip6h->nexthdr;
+	tuple->iifidx		= dev->ifindex;
+
+	return 0;
+}
+
+/* Based on ip_exceeds_mtu(). */
+static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+	if (skb->len <= mtu)
+		return false;
+
+	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+		return false;
+
+	return true;
+}
+
+static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rt6_info *rt)
+{
+	u32 mtu;
+
+	mtu = ip6_dst_mtu_forward(&rt->dst);
+	if (__nf_flow_exceeds_mtu(skb, mtu))
+		return true;
+
+	return false;
+}
+
+unsigned int
+nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+			  const struct nf_hook_state *state)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct nf_flowtable *flow_table = priv;
+	struct flow_offload_tuple tuple = {};
+	enum flow_offload_tuple_dir dir;
+	struct flow_offload *flow;
+	struct net_device *outdev;
+	struct in6_addr *nexthop;
+	struct ipv6hdr *ip6h;
+	struct rt6_info *rt;
+
+	if (skb->protocol != htons(ETH_P_IPV6))
+		return NF_ACCEPT;
+
+	if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
+		return NF_ACCEPT;
+
+	tuplehash = flow_offload_lookup(flow_table, &tuple);
+	if (tuplehash == NULL)
+		return NF_ACCEPT;
+
+	outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
+	if (!outdev)
+		return NF_ACCEPT;
+
+	dir = tuplehash->tuple.dir;
+	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+	rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
+	if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+		return NF_ACCEPT;
+
+	if (skb_try_make_writable(skb, sizeof(*ip6h)))
+		return NF_DROP;
+
+	if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+	    nf_flow_nat_ipv6(flow, skb, dir) < 0)
+		return NF_DROP;
+
+	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+	ip6h = ipv6_hdr(skb);
+	ip6h->hop_limit--;
+
+	skb->dev = outdev;
+	nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+	neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
+
+	return NF_STOLEN;
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
+
+static struct nf_flowtable_type flowtable_ipv6 = {
+	.family		= NFPROTO_IPV6,
+	.params		= &nf_flow_offload_rhash_params,
+	.gc		= nf_flow_offload_work_gc,
+	.hook		= nf_flow_offload_ipv6_hook,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nf_flow_ipv6_module_init(void)
+{
+	nft_register_flowtable_type(&flowtable_ipv6);
+
+	return 0;
+}
+
+static void __exit nf_flow_ipv6_module_exit(void)
+{
+	nft_unregister_flowtable_type(&flowtable_ipv6);
+}
+
+module_init(nf_flow_ipv6_module_init);
+module_exit(nf_flow_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 1d2fb9267d6f..bed57ee65f7b 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -369,10 +369,6 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
 #endif
 	unsigned int ret;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct ipv6hdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -408,10 +404,6 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
 	unsigned int ret;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct ipv6hdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index d6e4ba5de916..17e03589331c 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -22,68 +22,12 @@ static unsigned int nft_do_chain_ipv6(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv6(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_ipv6_output(void *priv,
-				    struct sk_buff *skb,
-				    const struct nf_hook_state *state)
-{
-	if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
-		if (net_ratelimit())
-			pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
-				"packet\n");
-		return NF_ACCEPT;
-	}
-
-	return nft_do_chain_ipv6(priv, skb, state);
-}
-
-struct nft_af_info nft_af_ipv6 __read_mostly = {
-	.family		= NFPROTO_IPV6,
-	.nhooks		= NF_INET_NUMHOOKS,
-	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv6,
-		[NF_INET_LOCAL_OUT]	= nft_ipv6_output,
-		[NF_INET_FORWARD]	= nft_do_chain_ipv6,
-		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv6,
-		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv6,
-	},
-};
-EXPORT_SYMBOL_GPL(nft_af_ipv6);
-
-static int nf_tables_ipv6_init_net(struct net *net)
-{
-	net->nft.ipv6 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
-	if (net->nft.ipv6 == NULL)
-		return -ENOMEM;
-
-	memcpy(net->nft.ipv6, &nft_af_ipv6, sizeof(nft_af_ipv6));
-
-	if (nft_register_afinfo(net, net->nft.ipv6) < 0)
-		goto err;
-
-	return 0;
-err:
-	kfree(net->nft.ipv6);
-	return -ENOMEM;
-}
-
-static void nf_tables_ipv6_exit_net(struct net *net)
-{
-	nft_unregister_afinfo(net, net->nft.ipv6);
-	kfree(net->nft.ipv6);
-}
-
-static struct pernet_operations nf_tables_ipv6_net_ops = {
-	.init	= nf_tables_ipv6_init_net,
-	.exit	= nf_tables_ipv6_exit_net,
-};
-
 static const struct nf_chain_type filter_ipv6 = {
 	.name		= "filter",
 	.type		= NFT_CHAIN_T_DEFAULT,
@@ -94,26 +38,22 @@ static const struct nf_chain_type filter_ipv6 = {
 			  (1 << NF_INET_FORWARD) |
 			  (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING),
+	.hooks		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv6,
+		[NF_INET_LOCAL_OUT]	= nft_do_chain_ipv6,
+		[NF_INET_FORWARD]	= nft_do_chain_ipv6,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv6,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv6,
+	},
 };
 
 static int __init nf_tables_ipv6_init(void)
 {
-	int ret;
-
-	ret = nft_register_chain_type(&filter_ipv6);
-	if (ret < 0)
-		return ret;
-
-	ret = register_pernet_subsys(&nf_tables_ipv6_net_ops);
-	if (ret < 0)
-		nft_unregister_chain_type(&filter_ipv6);
-
-	return ret;
+	return nft_register_chain_type(&filter_ipv6);
 }
 
 static void __exit nf_tables_ipv6_exit(void)
 {
-	unregister_pernet_subsys(&nf_tables_ipv6_net_ops);
 	nft_unregister_chain_type(&filter_ipv6);
 }
 
@@ -122,4 +62,4 @@ module_exit(nf_tables_ipv6_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(AF_INET6);
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "filter");
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 443cd306c0b0..73fe2bd13fcf 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -31,7 +31,8 @@ static unsigned int nft_nat_do_chain(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv6(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index f2727475895e..11d3c3b9aa18 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -33,7 +33,8 @@ static unsigned int nf_route_table_hook(void *priv,
 	u32 mark, flowlabel;
 	int err;
 
-	nft_set_pktinfo_ipv6(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb);
 
 	/* save source/dest address, mark, hoplimit, flowlabel, priority */
 	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 54b5899543ef..cc5174c7254c 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -60,7 +60,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
 {
 	const struct net_device *dev = NULL;
 	const struct nf_ipv6_ops *v6ops;
-	const struct nf_afinfo *afinfo;
 	int route_err, addrtype;
 	struct rt6_info *rt;
 	struct flowi6 fl6 = {
@@ -69,8 +68,8 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
 	};
 	u32 ret = 0;
 
-	afinfo = nf_get_afinfo(NFPROTO_IPV6);
-	if (!afinfo)
+	v6ops = nf_get_ipv6_ops();
+	if (!v6ops)
 		return RTN_UNREACHABLE;
 
 	if (priv->flags & NFTA_FIB_F_IIF)
@@ -80,12 +79,11 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
 
 	nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
 
-	v6ops = nf_get_ipv6_ops();
-	if (dev && v6ops && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
+	if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
 		ret = RTN_LOCAL;
 
-	route_err = afinfo->route(nft_net(pkt), (struct dst_entry **)&rt,
-				  flowi6_to_flowi(&fl6), false);
+	route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt,
+				 flowi6_to_flowi(&fl6), false);
 	if (route_err)
 		goto err;
 
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index e88bcb8ff0fd..b67814242f78 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -58,7 +58,6 @@ static int sockstat6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations sockstat6_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = sockstat6_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -248,7 +247,6 @@ static int snmp6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations snmp6_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = snmp6_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -274,7 +272,6 @@ static int snmp6_dev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations snmp6_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = snmp6_dev_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 761a473a07c5..ddda7eb3c623 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1308,7 +1308,6 @@ static int raw6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations raw6_seq_fops = {
-	.owner =	THIS_MODULE,
 	.open =		raw6_seq_open,
 	.read =		seq_read,
 	.llseek =	seq_lseek,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0458b761f3c5..fb2d251c0500 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -186,7 +186,7 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
 
 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
 {
-	return dst_metrics_write_ptr(rt->dst.from);
+	return dst_metrics_write_ptr(&rt->from->dst);
 }
 
 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -391,7 +391,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
 {
 	struct rt6_info *rt = (struct rt6_info *)dst;
 	struct rt6_exception_bucket *bucket;
-	struct dst_entry *from = dst->from;
+	struct rt6_info *from = rt->from;
 	struct inet6_dev *idev;
 
 	dst_destroy_metrics_generic(dst);
@@ -409,8 +409,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
 		kfree(bucket);
 	}
 
-	dst->from = NULL;
-	dst_release(from);
+	rt->from = NULL;
+	dst_release(&from->dst);
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -443,9 +443,9 @@ static bool rt6_check_expired(const struct rt6_info *rt)
 	if (rt->rt6i_flags & RTF_EXPIRES) {
 		if (time_after(jiffies, rt->dst.expires))
 			return true;
-	} else if (rt->dst.from) {
+	} else if (rt->from) {
 		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
-		       rt6_check_expired((struct rt6_info *)rt->dst.from);
+			rt6_check_expired(rt->from);
 	}
 	return false;
 }
@@ -455,7 +455,6 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
 					     int strict)
 {
 	struct rt6_info *sibling, *next_sibling;
-	int route_choosen;
 
 	/* We might have already computed the hash for ICMPv6 errors. In such
 	 * case it will always be non-zero. Otherwise now is the time to do it.
@@ -463,26 +462,19 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
 	if (!fl6->mp_hash)
 		fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
 
-	route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
-	/* Don't change the route, if route_choosen == 0
-	 * (siblings does not include ourself)
-	 */
-	if (route_choosen)
-		list_for_each_entry_safe(sibling, next_sibling,
-				&match->rt6i_siblings, rt6i_siblings) {
-			route_choosen--;
-			if (route_choosen == 0) {
-				struct inet6_dev *idev = sibling->rt6i_idev;
-
-				if (!netif_carrier_ok(sibling->dst.dev) &&
-				    idev->cnf.ignore_routes_with_linkdown)
-					break;
-				if (rt6_score_route(sibling, oif, strict) < 0)
-					break;
-				match = sibling;
-				break;
-			}
-		}
+	if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound))
+		return match;
+
+	list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings,
+				 rt6i_siblings) {
+		if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound))
+			continue;
+		if (rt6_score_route(sibling, oif, strict) < 0)
+			break;
+		match = sibling;
+		break;
+	}
+
 	return match;
 }
 
@@ -499,12 +491,15 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
 	struct rt6_info *local = NULL;
 	struct rt6_info *sprt;
 
-	if (!oif && ipv6_addr_any(saddr))
-		goto out;
+	if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD))
+		return rt;
 
-	for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) {
+	for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
 		struct net_device *dev = sprt->dst.dev;
 
+		if (sprt->rt6i_nh_flags & RTNH_F_DEAD)
+			continue;
+
 		if (oif) {
 			if (dev->ifindex == oif)
 				return sprt;
@@ -533,8 +528,8 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
 		if (flags & RT6_LOOKUP_F_IFACE)
 			return net->ipv6.ip6_null_entry;
 	}
-out:
-	return rt;
+
+	return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt;
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
@@ -679,10 +674,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
 	int m;
 	bool match_do_rr = false;
 	struct inet6_dev *idev = rt->rt6i_idev;
-	struct net_device *dev = rt->dst.dev;
 
-	if (dev && !netif_carrier_ok(dev) &&
-	    idev->cnf.ignore_routes_with_linkdown &&
+	if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+		goto out;
+
+	if (idev->cnf.ignore_routes_with_linkdown &&
+	    rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
 	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
 		goto out;
 
@@ -721,7 +718,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 
 	match = NULL;
 	cont = NULL;
-	for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) {
+	for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
 		if (rt->rt6i_metric != metric) {
 			cont = rt;
 			break;
@@ -731,7 +728,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 	}
 
 	for (rt = leaf; rt && rt != rr_head;
-	     rt = rcu_dereference(rt->dst.rt6_next)) {
+	     rt = rcu_dereference(rt->rt6_next)) {
 		if (rt->rt6i_metric != metric) {
 			cont = rt;
 			break;
@@ -743,7 +740,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 	if (match || !cont)
 		return match;
 
-	for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next))
+	for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
 
 	return match;
@@ -781,7 +778,7 @@ static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
 			     &do_rr);
 
 	if (do_rr) {
-		struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next);
+		struct rt6_info *next = rcu_dereference(rt0->rt6_next);
 
 		/* no entries matched; do round-robin */
 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
@@ -1054,7 +1051,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
 	 */
 
 	if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
-		ort = (struct rt6_info *)ort->dst.from;
+		ort = ort->from;
 
 	rcu_read_lock();
 	dev = ip6_rt_get_dev_rcu(ort);
@@ -1274,7 +1271,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
 
 	/* ort can't be a cache or pcpu route */
 	if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
-		ort = (struct rt6_info *)ort->dst.from;
+		ort = ort->from;
 	WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
 
 	spin_lock_bh(&rt6_exception_lock);
@@ -1346,7 +1343,9 @@ out:
 
 	/* Update fn->fn_sernum to invalidate all cached dst */
 	if (!err) {
+		spin_lock_bh(&ort->rt6i_table->tb6_lock);
 		fib6_update_sernum(ort);
+		spin_unlock_bh(&ort->rt6i_table->tb6_lock);
 		fib6_force_start_gc(net);
 	}
 
@@ -1415,8 +1414,8 @@ static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
 /* Remove the passed in cached rt from the hash table that contains it */
 int rt6_remove_exception_rt(struct rt6_info *rt)
 {
-	struct rt6_info *from = (struct rt6_info *)rt->dst.from;
 	struct rt6_exception_bucket *bucket;
+	struct rt6_info *from = rt->from;
 	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
 	int err;
@@ -1460,8 +1459,8 @@ int rt6_remove_exception_rt(struct rt6_info *rt)
  */
 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
 {
-	struct rt6_info *from = (struct rt6_info *)rt->dst.from;
 	struct rt6_exception_bucket *bucket;
+	struct rt6_info *from = rt->from;
 	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
 
@@ -1586,12 +1585,19 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
 	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
 	 * expired, independently from their aging, as per RFC 8201 section 4
 	 */
-	if (!(rt->rt6i_flags & RTF_EXPIRES) &&
-	    time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
-		RT6_TRACE("aging clone %p\n", rt);
+	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
+		if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
+			RT6_TRACE("aging clone %p\n", rt);
+			rt6_remove_exception(bucket, rt6_ex);
+			return;
+		}
+	} else if (time_after(jiffies, rt->dst.expires)) {
+		RT6_TRACE("purging expired route %p\n", rt);
 		rt6_remove_exception(bucket, rt6_ex);
 		return;
-	} else if (rt->rt6i_flags & RTF_GATEWAY) {
+	}
+
+	if (rt->rt6i_flags & RTF_GATEWAY) {
 		struct neighbour *neigh;
 		__u8 neigh_flags = 0;
 
@@ -1606,11 +1612,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
 			rt6_remove_exception(bucket, rt6_ex);
 			return;
 		}
-	} else if (__rt6_check_expired(rt)) {
-		RT6_TRACE("purging expired route %p\n", rt);
-		rt6_remove_exception(bucket, rt6_ex);
-		return;
 	}
+
 	gc_args->more++;
 }
 
@@ -1824,10 +1827,10 @@ u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
 
 	if (skb) {
 		ip6_multipath_l3_keys(skb, &hash_keys);
-		return flow_hash_from_keys(&hash_keys);
+		return flow_hash_from_keys(&hash_keys) >> 1;
 	}
 
-	return get_hash_from_flowi6(fl6);
+	return get_hash_from_flowi6(fl6) >> 1;
 }
 
 void ip6_route_input(struct sk_buff *skb)
@@ -1929,9 +1932,9 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
 
 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
 {
-	if (rt->dst.from &&
-	    dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
-		dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
+	if (rt->from &&
+	    dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(&rt->from->dst))
+		dst_init_metrics(&rt->dst, dst_metrics_ptr(&rt->from->dst), true);
 }
 
 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
@@ -1951,7 +1954,7 @@ static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
 {
 	if (!__rt6_check_expired(rt) &&
 	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
-	    rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+	    rt6_check(rt->from, cookie))
 		return &rt->dst;
 	else
 		return NULL;
@@ -1971,7 +1974,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 	rt6_dst_from_metrics_check(rt);
 
 	if (rt->rt6i_flags & RTF_PCPU ||
-	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
+	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
 		return rt6_dst_from_check(rt, cookie);
 	else
 		return rt6_check(rt, cookie);
@@ -2154,6 +2157,8 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
 	for_each_fib6_node_rt_rcu(fn) {
+		if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+			continue;
 		if (rt6_check_expired(rt))
 			continue;
 		if (rt->dst.error)
@@ -2344,7 +2349,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 	rt->rt6i_idev     = idev;
 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
 
-	/* Add this dst into uncached_list so that rt6_ifdown() can
+	/* Add this dst into uncached_list so that rt6_disable_ip() can
 	 * do proper release of the net_device
 	 */
 	rt6_uncached_list_add(rt);
@@ -2439,7 +2444,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
 
 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
 					    struct fib6_config *cfg,
-					    const struct in6_addr *gw_addr)
+					    const struct in6_addr *gw_addr,
+					    u32 tbid, int flags)
 {
 	struct flowi6 fl6 = {
 		.flowi6_oif = cfg->fc_ifindex,
@@ -2448,15 +2454,15 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
 	};
 	struct fib6_table *table;
 	struct rt6_info *rt;
-	int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
 
-	table = fib6_get_table(net, cfg->fc_table);
+	table = fib6_get_table(net, tbid);
 	if (!table)
 		return NULL;
 
 	if (!ipv6_addr_any(&cfg->fc_prefsrc))
 		flags |= RT6_LOOKUP_F_HAS_SADDR;
 
+	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
 	rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
 
 	/* if table lookup failed, fall back to full lookup */
@@ -2468,6 +2474,82 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
 	return rt;
 }
 
+static int ip6_route_check_nh_onlink(struct net *net,
+				     struct fib6_config *cfg,
+				     struct net_device *dev,
+				     struct netlink_ext_ack *extack)
+{
+	u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
+	const struct in6_addr *gw_addr = &cfg->fc_gateway;
+	u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+	struct rt6_info *grt;
+	int err;
+
+	err = 0;
+	grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
+	if (grt) {
+		if (grt->rt6i_flags & flags || dev != grt->dst.dev) {
+			NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+			err = -EINVAL;
+		}
+
+		ip6_rt_put(grt);
+	}
+
+	return err;
+}
+
+static int ip6_route_check_nh(struct net *net,
+			      struct fib6_config *cfg,
+			      struct net_device **_dev,
+			      struct inet6_dev **idev)
+{
+	const struct in6_addr *gw_addr = &cfg->fc_gateway;
+	struct net_device *dev = _dev ? *_dev : NULL;
+	struct rt6_info *grt = NULL;
+	int err = -EHOSTUNREACH;
+
+	if (cfg->fc_table) {
+		int flags = RT6_LOOKUP_F_IFACE;
+
+		grt = ip6_nh_lookup_table(net, cfg, gw_addr,
+					  cfg->fc_table, flags);
+		if (grt) {
+			if (grt->rt6i_flags & RTF_GATEWAY ||
+			    (dev && dev != grt->dst.dev)) {
+				ip6_rt_put(grt);
+				grt = NULL;
+			}
+		}
+	}
+
+	if (!grt)
+		grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
+
+	if (!grt)
+		goto out;
+
+	if (dev) {
+		if (dev != grt->dst.dev) {
+			ip6_rt_put(grt);
+			goto out;
+		}
+	} else {
+		*_dev = dev = grt->dst.dev;
+		*idev = grt->rt6i_idev;
+		dev_hold(dev);
+		in6_dev_hold(grt->rt6i_idev);
+	}
+
+	if (!(grt->rt6i_flags & RTF_GATEWAY))
+		err = 0;
+
+	ip6_rt_put(grt);
+
+out:
+	return err;
+}
+
 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 					      struct netlink_ext_ack *extack)
 {
@@ -2519,6 +2601,21 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 	if (cfg->fc_metric == 0)
 		cfg->fc_metric = IP6_RT_PRIO_USER;
 
+	if (cfg->fc_flags & RTNH_F_ONLINK) {
+		if (!dev) {
+			NL_SET_ERR_MSG(extack,
+				       "Nexthop device required for onlink");
+			err = -ENODEV;
+			goto out;
+		}
+
+		if (!(dev->flags & IFF_UP)) {
+			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+			err = -ENETDOWN;
+			goto out;
+		}
+	}
+
 	err = -ENOBUFS;
 	if (cfg->fc_nlinfo.nlh &&
 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
@@ -2593,6 +2690,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 #endif
 
 	rt->rt6i_metric = cfg->fc_metric;
+	rt->rt6i_nh_weight = 1;
 
 	/* We cannot add true routes via loopback here,
 	   they would result in kernel looping; promote them to reject routes
@@ -2662,8 +2760,6 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 		rt->rt6i_gateway = *gw_addr;
 
 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
-			struct rt6_info *grt = NULL;
-
 			/* IPv6 strictly inhibits using not link-local
 			   addresses as nexthop address.
 			   Otherwise, router will not able to send redirects.
@@ -2680,40 +2776,12 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 				goto out;
 			}
 
-			if (cfg->fc_table) {
-				grt = ip6_nh_lookup_table(net, cfg, gw_addr);
-
-				if (grt) {
-					if (grt->rt6i_flags & RTF_GATEWAY ||
-					    (dev && dev != grt->dst.dev)) {
-						ip6_rt_put(grt);
-						grt = NULL;
-					}
-				}
-			}
-
-			if (!grt)
-				grt = rt6_lookup(net, gw_addr, NULL,
-						 cfg->fc_ifindex, 1);
-
-			err = -EHOSTUNREACH;
-			if (!grt)
-				goto out;
-			if (dev) {
-				if (dev != grt->dst.dev) {
-					ip6_rt_put(grt);
-					goto out;
-				}
+			if (cfg->fc_flags & RTNH_F_ONLINK) {
+				err = ip6_route_check_nh_onlink(net, cfg, dev,
+								extack);
 			} else {
-				dev = grt->dst.dev;
-				idev = grt->rt6i_idev;
-				dev_hold(dev);
-				in6_dev_hold(grt->rt6i_idev);
+				err = ip6_route_check_nh(net, cfg, &dev, &idev);
 			}
-			if (!(grt->rt6i_flags & RTF_GATEWAY))
-				err = 0;
-			ip6_rt_put(grt);
-
 			if (err)
 				goto out;
 		}
@@ -2732,6 +2800,12 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 	if (!dev)
 		goto out;
 
+	if (!(dev->flags & IFF_UP)) {
+		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+		err = -ENETDOWN;
+		goto out;
+	}
+
 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
 			NL_SET_ERR_MSG(extack, "Invalid source address");
@@ -2746,6 +2820,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 	rt->rt6i_flags = cfg->fc_flags;
 
 install_route:
+	if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
+	    !netif_carrier_ok(dev))
+		rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
+	rt->rt6i_nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
 	rt->dst.dev = dev;
 	rt->rt6i_idev = idev;
 	rt->rt6i_table = table;
@@ -3056,11 +3134,11 @@ out:
 
 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
 {
-	BUG_ON(from->dst.from);
+	BUG_ON(from->from);
 
 	rt->rt6i_flags &= ~RTF_EXPIRES;
 	dst_hold(&from->dst);
-	rt->dst.from = &from->dst;
+	rt->from = from;
 	dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
 }
 
@@ -3459,37 +3537,249 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
 	fib6_clean_all(net, fib6_clean_tohost, gateway);
 }
 
-struct arg_dev_net {
-	struct net_device *dev;
-	struct net *net;
+struct arg_netdev_event {
+	const struct net_device *dev;
+	union {
+		unsigned int nh_flags;
+		unsigned long event;
+	};
 };
 
+static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt)
+{
+	struct rt6_info *iter;
+	struct fib6_node *fn;
+
+	fn = rcu_dereference_protected(rt->rt6i_node,
+			lockdep_is_held(&rt->rt6i_table->tb6_lock));
+	iter = rcu_dereference_protected(fn->leaf,
+			lockdep_is_held(&rt->rt6i_table->tb6_lock));
+	while (iter) {
+		if (iter->rt6i_metric == rt->rt6i_metric &&
+		    rt6_qualify_for_ecmp(iter))
+			return iter;
+		iter = rcu_dereference_protected(iter->rt6_next,
+				lockdep_is_held(&rt->rt6i_table->tb6_lock));
+	}
+
+	return NULL;
+}
+
+static bool rt6_is_dead(const struct rt6_info *rt)
+{
+	if (rt->rt6i_nh_flags & RTNH_F_DEAD ||
+	    (rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
+	     rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
+		return true;
+
+	return false;
+}
+
+static int rt6_multipath_total_weight(const struct rt6_info *rt)
+{
+	struct rt6_info *iter;
+	int total = 0;
+
+	if (!rt6_is_dead(rt))
+		total += rt->rt6i_nh_weight;
+
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
+		if (!rt6_is_dead(iter))
+			total += iter->rt6i_nh_weight;
+	}
+
+	return total;
+}
+
+static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total)
+{
+	int upper_bound = -1;
+
+	if (!rt6_is_dead(rt)) {
+		*weight += rt->rt6i_nh_weight;
+		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
+						    total) - 1;
+	}
+	atomic_set(&rt->rt6i_nh_upper_bound, upper_bound);
+}
+
+static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total)
+{
+	struct rt6_info *iter;
+	int weight = 0;
+
+	rt6_upper_bound_set(rt, &weight, total);
+
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		rt6_upper_bound_set(iter, &weight, total);
+}
+
+void rt6_multipath_rebalance(struct rt6_info *rt)
+{
+	struct rt6_info *first;
+	int total;
+
+	/* In case the entire multipath route was marked for flushing,
+	 * then there is no need to rebalance upon the removal of every
+	 * sibling route.
+	 */
+	if (!rt->rt6i_nsiblings || rt->should_flush)
+		return;
+
+	/* During lookup routes are evaluated in order, so we need to
+	 * make sure upper bounds are assigned from the first sibling
+	 * onwards.
+	 */
+	first = rt6_multipath_first_sibling(rt);
+	if (WARN_ON_ONCE(!first))
+		return;
+
+	total = rt6_multipath_total_weight(first);
+	rt6_multipath_upper_bound_set(first, total);
+}
+
+static int fib6_ifup(struct rt6_info *rt, void *p_arg)
+{
+	const struct arg_netdev_event *arg = p_arg;
+	const struct net *net = dev_net(arg->dev);
+
+	if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) {
+		rt->rt6i_nh_flags &= ~arg->nh_flags;
+		fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt);
+		rt6_multipath_rebalance(rt);
+	}
+
+	return 0;
+}
+
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
+{
+	struct arg_netdev_event arg = {
+		.dev = dev,
+		{
+			.nh_flags = nh_flags,
+		},
+	};
+
+	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
+		arg.nh_flags |= RTNH_F_LINKDOWN;
+
+	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
+}
+
+static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
+				   const struct net_device *dev)
+{
+	struct rt6_info *iter;
+
+	if (rt->dst.dev == dev)
+		return true;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		if (iter->dst.dev == dev)
+			return true;
+
+	return false;
+}
+
+static void rt6_multipath_flush(struct rt6_info *rt)
+{
+	struct rt6_info *iter;
+
+	rt->should_flush = 1;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		iter->should_flush = 1;
+}
+
+static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
+					     const struct net_device *down_dev)
+{
+	struct rt6_info *iter;
+	unsigned int dead = 0;
+
+	if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD)
+		dead++;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		if (iter->dst.dev == down_dev ||
+		    iter->rt6i_nh_flags & RTNH_F_DEAD)
+			dead++;
+
+	return dead;
+}
+
+static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
+				       const struct net_device *dev,
+				       unsigned int nh_flags)
+{
+	struct rt6_info *iter;
+
+	if (rt->dst.dev == dev)
+		rt->rt6i_nh_flags |= nh_flags;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		if (iter->dst.dev == dev)
+			iter->rt6i_nh_flags |= nh_flags;
+}
+
 /* called with write lock held for table with rt */
-static int fib6_ifdown(struct rt6_info *rt, void *arg)
+static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
 {
-	const struct arg_dev_net *adn = arg;
-	const struct net_device *dev = adn->dev;
+	const struct arg_netdev_event *arg = p_arg;
+	const struct net_device *dev = arg->dev;
+	const struct net *net = dev_net(dev);
 
-	if ((rt->dst.dev == dev || !dev) &&
-	    rt != adn->net->ipv6.ip6_null_entry &&
-	    (rt->rt6i_nsiblings == 0 ||
-	     (dev && netdev_unregistering(dev)) ||
-	     !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
-		return -1;
+	if (rt == net->ipv6.ip6_null_entry)
+		return 0;
+
+	switch (arg->event) {
+	case NETDEV_UNREGISTER:
+		return rt->dst.dev == dev ? -1 : 0;
+	case NETDEV_DOWN:
+		if (rt->should_flush)
+			return -1;
+		if (!rt->rt6i_nsiblings)
+			return rt->dst.dev == dev ? -1 : 0;
+		if (rt6_multipath_uses_dev(rt, dev)) {
+			unsigned int count;
+
+			count = rt6_multipath_dead_count(rt, dev);
+			if (rt->rt6i_nsiblings + 1 == count) {
+				rt6_multipath_flush(rt);
+				return -1;
+			}
+			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
+						   RTNH_F_LINKDOWN);
+			fib6_update_sernum(rt);
+			rt6_multipath_rebalance(rt);
+		}
+		return -2;
+	case NETDEV_CHANGE:
+		if (rt->dst.dev != dev ||
+		    rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
+			break;
+		rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
+		rt6_multipath_rebalance(rt);
+		break;
+	}
 
 	return 0;
 }
 
-void rt6_ifdown(struct net *net, struct net_device *dev)
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
 {
-	struct arg_dev_net adn = {
+	struct arg_netdev_event arg = {
 		.dev = dev,
-		.net = net,
+		{
+			.event = event,
+		},
 	};
 
-	fib6_clean_all(net, fib6_ifdown, &adn);
-	if (dev)
-		rt6_uncached_list_flush_dev(net, dev);
+	fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
+}
+
+void rt6_disable_ip(struct net_device *dev, unsigned long event)
+{
+	rt6_sync_down_dev(dev, event);
+	rt6_uncached_list_flush_dev(dev_net(dev), dev);
+	neigh_ifdown(&nd_tbl, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -3603,6 +3893,8 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (rtm->rtm_flags & RTM_F_CLONED)
 		cfg->fc_flags |= RTF_CACHE;
 
+	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
+
 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
 	cfg->fc_nlinfo.nlh = nlh;
 	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -3812,6 +4104,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
 			goto cleanup;
 		}
 
+		rt->rt6i_nh_weight = rtnh->rtnh_hops + 1;
+
 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
 		if (err) {
 			dst_release_immediate(&rt->dst);
@@ -3992,7 +4286,10 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
 			    unsigned int *flags, bool skip_oif)
 {
-	if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
+	if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+		*flags |= RTNH_F_DEAD;
+
+	if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) {
 		*flags |= RTNH_F_LINKDOWN;
 		if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
 			*flags |= RTNH_F_DEAD;
@@ -4003,6 +4300,7 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
 			goto nla_put_failure;
 	}
 
+	*flags |= (rt->rt6i_nh_flags & RTNH_F_ONLINK);
 	if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
 		*flags |= RTNH_F_OFFLOAD;
 
@@ -4031,7 +4329,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
 	if (!rtnh)
 		goto nla_put_failure;
 
-	rtnh->rtnh_hops = 0;
+	rtnh->rtnh_hops = rt->rt6i_nh_weight - 1;
 	rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
 
 	if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
@@ -4321,9 +4619,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 		goto errout;
 	}
 
-	if (fibmatch && rt->dst.from) {
-		struct rt6_info *ort = container_of(rt->dst.from,
-						    struct rt6_info, dst);
+	if (fibmatch && rt->from) {
+		struct rt6_info *ort = rt->from;
 
 		dst_hold(&ort->dst);
 		ip6_rt_put(rt);
@@ -4427,7 +4724,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 #ifdef CONFIG_PROC_FS
 
 static const struct file_operations ipv6_route_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= ipv6_route_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -4455,7 +4751,6 @@ static int rt6_stats_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt6_stats_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = rt6_stats_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -4600,8 +4895,6 @@ static int __net_init ip6_route_net_init(struct net *net)
 					   GFP_KERNEL);
 	if (!net->ipv6.ip6_null_entry)
 		goto out_ip6_dst_entries;
-	net->ipv6.ip6_null_entry->dst.path =
-		(struct dst_entry *)net->ipv6.ip6_null_entry;
 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
 			 ip6_template_metrics, true);
@@ -4613,8 +4906,6 @@ static int __net_init ip6_route_net_init(struct net *net)
 					       GFP_KERNEL);
 	if (!net->ipv6.ip6_prohibit_entry)
 		goto out_ip6_null_entry;
-	net->ipv6.ip6_prohibit_entry->dst.path =
-		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
 			 ip6_template_metrics, true);
@@ -4624,8 +4915,6 @@ static int __net_init ip6_route_net_init(struct net *net)
 					       GFP_KERNEL);
 	if (!net->ipv6.ip6_blk_hole_entry)
 		goto out_ip6_prohibit_entry;
-	net->ipv6.ip6_blk_hole_entry->dst.path =
-		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
 			 ip6_template_metrics, true);
@@ -4782,11 +5071,20 @@ int __init ip6_route_init(void)
 	if (ret)
 		goto fib6_rules_init;
 
-	ret = -ENOBUFS;
-	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
-	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
-	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
-			    RTNL_FLAG_DOIT_UNLOCKED))
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
+				   inet6_rtm_newroute, NULL, 0);
+	if (ret < 0)
+		goto out_register_late_subsys;
+
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
+				   inet6_rtm_delroute, NULL, 0);
+	if (ret < 0)
+		goto out_register_late_subsys;
+
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
+				   inet6_rtm_getroute, NULL,
+				   RTNL_FLAG_DOIT_UNLOCKED);
+	if (ret < 0)
 		goto out_register_late_subsys;
 
 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
@@ -4804,6 +5102,7 @@ out:
 	return ret;
 
 out_register_late_subsys:
+	rtnl_unregister_all(PF_INET6);
 	unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
 	fib6_rules_cleanup();
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index c81407770956..7f5621d09571 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -306,9 +306,7 @@ static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
 	struct seg6_hmac_info *hinfo;
 	int ret;
 
-	ret = rhashtable_walk_start(iter);
-	if (ret && ret != -EAGAIN)
-		goto done;
+	rhashtable_walk_start(iter);
 
 	for (;;) {
 		hinfo = rhashtable_walk_next(iter);
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 825b8e01f947..ba3767ef5e93 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -501,7 +501,7 @@ static struct seg6_action_desc *__get_action_desc(int action)
 	struct seg6_action_desc *desc;
 	int i, count;
 
-	count = sizeof(seg6_action_table) / sizeof(struct seg6_action_desc);
+	count = ARRAY_SIZE(seg6_action_table);
 	for (i = 0; i < count; i++) {
 		desc = &seg6_action_table[i];
 		if (desc->action == action)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7178476b3d2f..a1ab29e2ab3b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -176,8 +176,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 			/* If interface is set while binding, indices
 			 * must coincide.
 			 */
-			if (sk->sk_bound_dev_if &&
-			    sk->sk_bound_dev_if != usin->sin6_scope_id)
+			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
 				return -EINVAL;
 
 			sk->sk_bound_dev_if = usin->sin6_scope_id;
@@ -1795,7 +1794,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		timer_expires = jiffies;
 	}
 
-	state = sk_state_load(sp);
+	state = inet_sk_state_load(sp);
 	if (state == TCP_LISTEN)
 		rx_queue = sp->sk_ack_backlog;
 	else
@@ -1884,7 +1883,6 @@ out:
 }
 
 static const struct file_operations tcp6_afinfo_seq_fops = {
-	.owner   = THIS_MODULE,
 	.open    = tcp_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f30fa313bf2..52e3ea0e6f50 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -89,28 +89,12 @@ static u32 udp6_ehashfn(const struct net *net,
 			       udp_ipv6_hash_secret + net_hash_mix(net));
 }
 
-static u32 udp6_portaddr_hash(const struct net *net,
-			      const struct in6_addr *addr6,
-			      unsigned int port)
-{
-	unsigned int hash, mix = net_hash_mix(net);
-
-	if (ipv6_addr_any(addr6))
-		hash = jhash_1word(0, mix);
-	else if (ipv6_addr_v4mapped(addr6))
-		hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
-	else
-		hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
-
-	return hash ^ port;
-}
-
 int udp_v6_get_port(struct sock *sk, unsigned short snum)
 {
 	unsigned int hash2_nulladdr =
-		udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
+		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 	unsigned int hash2_partial =
-		udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
+		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 
 	/* precompute partial secondary hash */
 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -119,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
 
 static void udp_v6_rehash(struct sock *sk)
 {
-	u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
 					  &sk->sk_v6_rcv_saddr,
 					  inet_sk(sk)->inet_num);
 
@@ -184,7 +168,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 		struct udp_hslot *hslot2, struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	result = NULL;
@@ -193,8 +177,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 
@@ -202,15 +185,9 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			badness = score;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -228,11 +205,11 @@ struct sock *__udp6_lib_lookup(struct net *net,
 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 	bool exact_dif = udp6_lib_exact_dif_match(net, skb);
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	if (hslot->count > 10) {
-		hash2 = udp6_portaddr_hash(net, daddr, hnum);
+		hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 		slot2 = hash2 & udptable->mask;
 		hslot2 = &udptable->hash2[slot2];
 		if (hslot->count < hslot2->count)
@@ -243,7 +220,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
 					  hslot2, skb);
 		if (!result) {
 			unsigned int old_slot2 = slot2;
-			hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
+			hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 			slot2 = hash2 & udptable->mask;
 			/* avoid searching the same slot again. */
 			if (unlikely(slot2 == old_slot2))
@@ -267,23 +244,16 @@ begin:
 		score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
 				      sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			badness = score;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -719,9 +689,9 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 	struct sk_buff *nskb;
 
 	if (use_hash2) {
-		hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
+		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 			    udptable->mask;
-		hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
+		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 start_lookup:
 		hslot = &udptable->hash2[hash2];
 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@ -909,7 +879,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
 			int dif, int sdif)
 {
 	unsigned short hnum = ntohs(loc_port);
-	unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum);
+	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
 	unsigned int slot2 = hash2 & udp_table.mask;
 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
@@ -1509,7 +1479,6 @@ int udp6_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations udp6_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 2784cc363f2b..14ae32bb1f3d 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -94,7 +94,6 @@ void udplitev6_exit(void)
 #ifdef CONFIG_PROC_FS
 
 static const struct file_operations udplite6_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index dc93002ff9d1..bb935a3b7fea 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -59,7 +59,7 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 	if (x->props.flags & XFRM_STATE_NOECN)
 		dsfield &= ~INET_ECN_MASK;
 	ipv6_change_dsfield(top_iph, 0, dsfield);
-	top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
+	top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
 	top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
 	top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
 	return 0;
@@ -106,17 +106,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
 	__skb_push(skb, skb->mac_len);
 	return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
-	if (xo->flags & XFRM_GSO_SEGMENT) {
-		skb->network_header = skb->network_header - x->props.header_len;
+	if (xo->flags & XFRM_GSO_SEGMENT)
 		skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
-	}
 
 	skb_reset_mac_len(skb);
 	pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 885ade234a49..09fb44ee3b45 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -265,7 +265,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 			in6_dev_put(xdst->u.rt6.rt6i_idev);
 			xdst->u.rt6.rt6i_idev = loopback_idev;
 			in6_dev_hold(loopback_idev);
-			xdst = (struct xfrm_dst *)xdst->u.dst.child;
+			xdst = (struct xfrm_dst *)xfrm_dst_child(&xdst->u.dst);
 		} while (xdst->u.dst.xfrm);
 
 		__in6_dev_put(loopback_idev);
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 38a3d51d9ead..b9232e4e2ed4 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -260,7 +260,6 @@ static int ipx_seq_socket_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ipx_seq_interface_fops = {
-	.owner		= THIS_MODULE,
 	.open           = ipx_seq_interface_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
@@ -268,7 +267,6 @@ static const struct file_operations ipx_seq_interface_fops = {
 };
 
 static const struct file_operations ipx_seq_route_fops = {
-	.owner		= THIS_MODULE,
 	.open           = ipx_seq_route_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
@@ -276,7 +274,6 @@ static const struct file_operations ipx_seq_route_fops = {
 };
 
 static const struct file_operations ipx_seq_socket_fops = {
-	.owner		= THIS_MODULE,
 	.open           = ipx_seq_socket_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index bd5723315069..9d5649e4e8b7 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -247,7 +247,6 @@ static int kcm_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations kcm_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= kcm_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -397,7 +396,6 @@ static int kcm_stats_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations kcm_stats_seq_fops = {
-	.owner   = THIS_MODULE,
 	.open    = kcm_stats_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 115918ad8eca..194a7483bb93 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -662,10 +662,9 @@ discard:
  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
- * Cookie value, sublayer format and offset (pad) are negotiated with
- * the peer when the session is set up. Unlike L2TPv2, we do not need
- * to parse the packet header to determine if optional fields are
- * present.
+ * Cookie value and sublayer format are negotiated with the peer when
+ * the session is set up. Unlike L2TPv2, we do not need to parse the
+ * packet header to determine if optional fields are present.
  *
  * Caller must already have parsed the frame and determined that it is
  * a data (not control) frame before coming here. Fields up to the
@@ -731,11 +730,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 				 "%s: recv data ns=%u, session nr=%u\n",
 				 session->name, ns, session->nr);
 		}
+		ptr += 4;
 	}
 
-	/* Advance past L2-specific header, if present */
-	ptr += session->l2specific_len;
-
 	if (L2TP_SKB_CB(skb)->has_seq) {
 		/* Received a packet with sequence numbers. If we're the LNS,
 		 * check if we sre sending sequence numbers and if not,
@@ -780,10 +777,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 		}
 	}
 
-	/* Session data offset is handled differently for L2TPv2 and
-	 * L2TPv3. For L2TPv2, there is an optional 16-bit value in
-	 * the header. For L2TPv3, the offset is negotiated using AVPs
-	 * in the session setup control protocol.
+	/* Session data offset is defined only for L2TPv2 and is
+	 * indicated by an optional 16-bit value in the header.
 	 */
 	if (tunnel->version == L2TP_HDR_VER_2) {
 		/* If offset bit set, skip it. */
@@ -791,8 +786,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 			offset = ntohs(*(__be16 *)ptr);
 			ptr += 2 + offset;
 		}
-	} else
-		ptr += session->offset;
+	}
 
 	offset = ptr - optr;
 	if (!pskb_may_pull(skb, offset))
@@ -1052,24 +1046,21 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
 		memcpy(bufp, &session->cookie[0], session->cookie_len);
 		bufp += session->cookie_len;
 	}
-	if (session->l2specific_len) {
-		if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
-			u32 l2h = 0;
-			if (session->send_seq) {
-				l2h = 0x40000000 | session->ns;
-				session->ns++;
-				session->ns &= 0xffffff;
-				l2tp_dbg(session, L2TP_MSG_SEQ,
-					 "%s: updated ns to %u\n",
-					 session->name, session->ns);
-			}
+	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+		u32 l2h = 0;
 
-			*((__be32 *) bufp) = htonl(l2h);
+		if (session->send_seq) {
+			l2h = 0x40000000 | session->ns;
+			session->ns++;
+			session->ns &= 0xffffff;
+			l2tp_dbg(session, L2TP_MSG_SEQ,
+				 "%s: updated ns to %u\n",
+				 session->name, session->ns);
 		}
-		bufp += session->l2specific_len;
+
+		*((__be32 *)bufp) = htonl(l2h);
+		bufp += 4;
 	}
-	if (session->offset)
-		bufp += session->offset;
 
 	return bufp - optr;
 }
@@ -1725,7 +1716,7 @@ int l2tp_session_delete(struct l2tp_session *session)
 EXPORT_SYMBOL_GPL(l2tp_session_delete);
 
 /* We come here whenever a session's send_seq, cookie_len or
- * l2specific_len parameters are set.
+ * l2specific_type parameters are set.
  */
 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 {
@@ -1734,7 +1725,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 		if (session->send_seq)
 			session->hdr_len += 4;
 	} else {
-		session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+		session->hdr_len = 4 + session->cookie_len;
+		session->hdr_len += l2tp_get_l2specific_len(session);
 		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
 			session->hdr_len += 4;
 	}
@@ -1784,9 +1776,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
 			session->recv_seq = cfg->recv_seq;
 			session->lns_mode = cfg->lns_mode;
 			session->reorder_timeout = cfg->reorder_timeout;
-			session->offset = cfg->offset;
 			session->l2specific_type = cfg->l2specific_type;
-			session->l2specific_len = cfg->l2specific_len;
 			session->cookie_len = cfg->cookie_len;
 			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
 			session->peer_cookie_len = cfg->peer_cookie_len;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9534e16965cc..9bbee90e9963 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -59,8 +59,6 @@ struct l2tp_session_cfg {
 	int			debug;		/* bitmask of debug message
 						 * categories */
 	u16			vlan_id;	/* VLAN pseudowire only */
-	u16			offset;		/* offset to payload */
-	u16			l2specific_len;	/* Layer 2 specific length */
 	u16			l2specific_type; /* Layer 2 specific type */
 	u8			cookie[8];	/* optional cookie */
 	int			cookie_len;	/* 0, 4 or 8 bytes */
@@ -86,9 +84,6 @@ struct l2tp_session {
 	int			cookie_len;
 	u8			peer_cookie[8];
 	int			peer_cookie_len;
-	u16			offset;		/* offset from end of L2TP header
-						   to beginning of data */
-	u16			l2specific_len;
 	u16			l2specific_type;
 	u16			hdr_len;
 	u32			nr;		/* session NR state (receive) */
@@ -305,6 +300,17 @@ static inline void l2tp_session_dec_refcount(struct l2tp_session *session)
 		l2tp_session_free(session);
 }
 
+static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
+{
+	switch (session->l2specific_type) {
+	case L2TP_L2SPECTYPE_DEFAULT:
+		return 4;
+	case L2TP_L2SPECTYPE_NONE:
+	default:
+		return 0;
+	}
+}
+
 #define l2tp_printk(ptr, type, func, fmt, ...)				\
 do {									\
 	if (((ptr)->debug) & (type))					\
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index eb69411bcb47..72e713da4733 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -180,8 +180,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
 		   session->lns_mode ? "LNS" : "LAC",
 		   session->debug,
 		   jiffies_to_msecs(session->reorder_timeout));
-	seq_printf(m, "   offset %hu l2specific %hu/%hu\n",
-		   session->offset, session->l2specific_type, session->l2specific_len);
+	seq_printf(m, "   offset 0 l2specific %hu/%hu\n",
+		   session->l2specific_type, l2tp_get_l2specific_len(session));
 	if (session->cookie_len) {
 		seq_printf(m, "   cookie %02x%02x%02x%02x",
 			   session->cookie[0], session->cookie[1],
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index a1f24fb2be98..e7ea9c4b89ff 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -547,19 +547,19 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
 	}
 
 	if (tunnel->version > 2) {
-		if (info->attrs[L2TP_ATTR_OFFSET])
-			cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
-
 		if (info->attrs[L2TP_ATTR_DATA_SEQ])
 			cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
 
-		cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
-		if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
+		if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) {
 			cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
-
-		cfg.l2specific_len = 4;
-		if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
-			cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
+			if (cfg.l2specific_type != L2TP_L2SPECTYPE_DEFAULT &&
+			    cfg.l2specific_type != L2TP_L2SPECTYPE_NONE) {
+				ret = -EINVAL;
+				goto out_tunnel;
+			}
+		} else {
+			cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
+		}
 
 		if (info->attrs[L2TP_ATTR_COOKIE]) {
 			u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
@@ -620,27 +620,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
 		goto out_tunnel;
 	}
 
-	/* Check that pseudowire-specific params are present */
-	switch (cfg.pw_type) {
-	case L2TP_PWTYPE_NONE:
-		break;
-	case L2TP_PWTYPE_ETH_VLAN:
-		if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
-			ret = -EINVAL;
-			goto out_tunnel;
-		}
-		break;
-	case L2TP_PWTYPE_ETH:
-		break;
-	case L2TP_PWTYPE_PPP:
-	case L2TP_PWTYPE_PPP_AC:
-		break;
-	case L2TP_PWTYPE_IP:
-	default:
-		ret = -EPROTONOSUPPORT;
-		break;
-	}
-
 	ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
 							   session_id,
 							   peer_session_id,
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index b412fc3351dc..59f246d7b290 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1734,7 +1734,6 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pppol2tp_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= pppol2tp_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 29c509c54bb2..66821e8a2b7a 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -225,7 +225,6 @@ static int llc_seq_core_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations llc_seq_socket_fops = {
-	.owner		= THIS_MODULE,
 	.open		= llc_seq_socket_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -233,7 +232,6 @@ static const struct file_operations llc_seq_socket_fops = {
 };
 
 static const struct file_operations llc_seq_core_fops = {
-	.owner		= THIS_MODULE,
 	.open		= llc_seq_core_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index d444752dbf40..a8b1616cec41 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -153,27 +153,16 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
  */
 static void sta_rx_agg_session_timer_expired(struct timer_list *t)
 {
-	struct tid_ampdu_rx *tid_rx_timer =
-		from_timer(tid_rx_timer, t, session_timer);
-	struct sta_info *sta = tid_rx_timer->sta;
-	u8 tid = tid_rx_timer->tid;
-	struct tid_ampdu_rx *tid_rx;
+	struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer);
+	struct sta_info *sta = tid_rx->sta;
+	u8 tid = tid_rx->tid;
 	unsigned long timeout;
 
-	rcu_read_lock();
-	tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
-	if (!tid_rx) {
-		rcu_read_unlock();
-		return;
-	}
-
 	timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
 	if (time_is_after_jiffies(timeout)) {
 		mod_timer(&tid_rx->session_timer, timeout);
-		rcu_read_unlock();
 		return;
 	}
-	rcu_read_unlock();
 
 	ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
 	       sta->sta.addr, tid);
@@ -415,10 +404,11 @@ end:
 					  timeout);
 }
 
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-				     u8 dialog_token, u16 timeout,
-				     u16 start_seq_num, u16 ba_policy, u16 tid,
-				     u16 buf_size, bool tx, bool auto_seq)
+static void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+					    u8 dialog_token, u16 timeout,
+					    u16 start_seq_num, u16 ba_policy,
+					    u16 tid, u16 buf_size, bool tx,
+					    bool auto_seq)
 {
 	mutex_lock(&sta->ampdu_mlme.mtx);
 	___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5f8ab5be369f..595c662a61e8 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -392,7 +392,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 	 * telling the driver. New packets will not go through since
 	 * the aggregation session is no longer OPERATIONAL.
 	 */
-	synchronize_net();
+	if (!local->in_reconfig)
+		synchronize_net();
 
 	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
 					WLAN_BACK_RECIPIENT :
@@ -429,18 +430,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
  */
 static void sta_addba_resp_timer_expired(struct timer_list *t)
 {
-	struct tid_ampdu_tx *tid_tx_timer =
-		from_timer(tid_tx_timer, t, addba_resp_timer);
-	struct sta_info *sta = tid_tx_timer->sta;
-	u8 tid = tid_tx_timer->tid;
-	struct tid_ampdu_tx *tid_tx;
+	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
+	struct sta_info *sta = tid_tx->sta;
+	u8 tid = tid_tx->tid;
 
 	/* check if the TID waits for addBA response */
-	rcu_read_lock();
-	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
-	if (!tid_tx ||
-	    test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
-		rcu_read_unlock();
+	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
 		ht_dbg(sta->sdata,
 		       "timer expired on %pM tid %d not expecting addBA response\n",
 		       sta->sta.addr, tid);
@@ -451,7 +446,6 @@ static void sta_addba_resp_timer_expired(struct timer_list *t)
 	       sta->sta.addr, tid);
 
 	ieee80211_stop_tx_ba_session(&sta->sta, tid);
-	rcu_read_unlock();
 }
 
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -529,29 +523,21 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
  */
 static void sta_tx_agg_session_timer_expired(struct timer_list *t)
 {
-	struct tid_ampdu_tx *tid_tx_timer =
-		from_timer(tid_tx_timer, t, session_timer);
-	struct sta_info *sta = tid_tx_timer->sta;
-	u8 tid = tid_tx_timer->tid;
-	struct tid_ampdu_tx *tid_tx;
+	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
+	struct sta_info *sta = tid_tx->sta;
+	u8 tid = tid_tx->tid;
 	unsigned long timeout;
 
-	rcu_read_lock();
-	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
-	if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-		rcu_read_unlock();
+	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
 		return;
 	}
 
 	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
 	if (time_is_after_jiffies(timeout)) {
 		mod_timer(&tid_tx->session_timer, timeout);
-		rcu_read_unlock();
 		return;
 	}
 
-	rcu_read_unlock();
-
 	ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
 	       sta->sta.addr, tid);
 
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fb15d3b97cb2..46028e12e216 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -573,10 +573,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
 		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
 			     offsetof(typeof(kseq), aes_cmac));
+		/* fall through */
 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
 		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
 			     offsetof(typeof(kseq), aes_gmac));
+		/* fall through */
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
 		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
@@ -2205,6 +2207,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
 		 * for now fall through to allow scanning only when
 		 * beaconing hasn't been configured yet
 		 */
+		/* fall through */
 	case NL80211_IFTYPE_AP:
 		/*
 		 * If the scan has been forced (and the driver supports
@@ -2373,10 +2376,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 	struct ieee80211_sub_if_data *sdata;
 	enum nl80211_tx_power_setting txp_type = type;
 	bool update_txp_type = false;
+	bool has_monitor = false;
 
 	if (wdev) {
 		sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			sdata = rtnl_dereference(local->monitor_sdata);
+			if (!sdata)
+				return -EOPNOTSUPP;
+		}
+
 		switch (type) {
 		case NL80211_TX_POWER_AUTOMATIC:
 			sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2415,15 +2425,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			has_monitor = true;
+			continue;
+		}
 		sdata->user_power_level = local->user_power_level;
 		if (txp_type != sdata->vif.bss_conf.txpower_type)
 			update_txp_type = true;
 		sdata->vif.bss_conf.txpower_type = txp_type;
 	}
-	list_for_each_entry(sdata, &local->interfaces, list)
+	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+			continue;
 		ieee80211_recalc_txpower(sdata, update_txp_type);
+	}
 	mutex_unlock(&local->iflist_mtx);
 
+	if (has_monitor) {
+		sdata = rtnl_dereference(local->monitor_sdata);
+		if (sdata) {
+			sdata->user_power_level = local->user_power_level;
+			if (txp_type != sdata->vif.bss_conf.txpower_type)
+				update_txp_type = true;
+			sdata->vif.bss_conf.txpower_type = txp_type;
+
+			ieee80211_recalc_txpower(sdata, update_txp_type);
+		}
+	}
+
 	return 0;
 }
 
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 5fae001f286c..1f466d12a6bc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -211,6 +211,7 @@ static const char *hw_flag_names[] = {
 	FLAG(TX_FRAG_LIST),
 	FLAG(REPORTS_LOW_ACK),
 	FLAG(SUPPORTS_TX_FRAG),
+	FLAG(SUPPORTS_TDLS_BUFFER_STA),
 #undef FLAG
 };
 
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index b15412c21ac9..444ea8d127fe 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -420,7 +420,7 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
 		default:
 			p += scnprintf(p, sizeof(buf) + buf - p,
 				       "\t\tMAX-MPDU-UNKNOWN\n");
-		};
+		}
 		switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
 		case 0:
 			p += scnprintf(p, sizeof(buf) + buf - p,
@@ -438,7 +438,7 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
 			p += scnprintf(p, sizeof(buf) + buf - p,
 				       "\t\tUNKNOWN-MHZ: 0x%x\n",
 				       (vhtc->cap >> 2) & 0x3);
-		};
+		}
 		PFLAG(RXLDPC, "RXLDPC");
 		PFLAG(SHORT_GI_80, "SHORT-GI-80");
 		PFLAG(SHORT_GI_160, "SHORT-GI-160");
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c7f93fd9ca7a..4d82fe7d627c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
 	if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
 			 sdata->vif.type == NL80211_IFTYPE_NAN ||
 			 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-			  !sdata->vif.mu_mimo_owner)))
+			  !sdata->vif.mu_mimo_owner &&
+			  !(changed & BSS_CHANGED_TXPOWER))))
 		return;
 
 	if (!check_sdata_in_driver(sdata))
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 1621b6ab17ba..d7523530d3f8 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -492,6 +492,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
 	case IEEE80211_SMPS_AUTOMATIC:
 	case IEEE80211_SMPS_NUM_MODES:
 		WARN_ON(1);
+		/* fall through */
 	case IEEE80211_SMPS_OFF:
 		action_frame->u.action.u.ht_smps.smps_control =
 				WLAN_HT_SMPS_CONTROL_DISABLED;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 885d00b41911..26900025de2f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1757,10 +1757,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				     u16 initiator, u16 reason, bool stop);
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				    u16 initiator, u16 reason, bool stop);
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-				     u8 dialog_token, u16 timeout,
-				     u16 start_seq_num, u16 ba_policy, u16 tid,
-				     u16 buf_size, bool tx, bool auto_seq);
 void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
 				      u8 dialog_token, u16 timeout,
 				      u16 start_seq_num, u16 ba_policy, u16 tid,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 13b16f90e1cf..5fe01f82df12 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1474,7 +1474,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
 		break;
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NUM_NL80211_IFTYPES:
-		BUG();
+		WARN_ON(1);
 		break;
 	}
 
@@ -1633,7 +1633,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
 				goto out_unlock;
 			}
 		}
-		/* otherwise fall through */
+		/* fall through */
 	default:
 		/* assign a new address if possible -- try n_addresses first */
 		for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 938049395f90..aee05ec3f7ea 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -178,13 +178,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 	if (!ret) {
 		key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-		if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+		if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+					   IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
 		      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
 			decrease_tailroom_need_count(sdata, 1);
 
 		WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
 			(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
 
+		WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) &&
+			(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC));
+
 		return 0;
 	}
 
@@ -237,7 +241,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
 	sta = key->sta;
 	sdata = key->sdata;
 
-	if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+	if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+				   IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
 	      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
 		increment_tailroom_need_count(sdata);
 
@@ -1104,7 +1109,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
 	if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
 		key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-		if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+		if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+					   IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
 		      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
 			increment_tailroom_need_count(key->sdata);
 	}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index e054a2fd8d38..0785d04a80bc 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -263,6 +263,9 @@ static void ieee80211_restart_work(struct work_struct *work)
 	flush_delayed_work(&local->roc_work);
 	flush_work(&local->hw_roc_done);
 
+	/* wait for all packet processing to be done */
+	synchronize_net();
+
 	ieee80211_reconfig(local);
 	rtnl_unlock();
 }
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 5e27364e10ac..73ac607beb5d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -989,8 +989,10 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
 	switch (sdata->vif.bss_conf.chandef.width) {
 	case NL80211_CHAN_WIDTH_20_NOHT:
 		sta_flags |= IEEE80211_STA_DISABLE_HT;
+		/* fall through */
 	case NL80211_CHAN_WIDTH_20:
 		sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+		/* fall through */
 	case NL80211_CHAN_WIDTH_40:
 		sta_flags |= IEEE80211_STA_DISABLE_VHT;
 		break;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 4394463a0c2e..35ad3983ae4b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1250,6 +1250,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
 		break;
 	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
 		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
+		/* fall through */
 	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
 		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
 		target_flags |= IEEE80211_PREQ_TO_FLAG |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 86c8dfef56a4..a5125624a76d 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -257,9 +257,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 	if (ret)
 		return NULL;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto err;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -269,7 +267,6 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 		if (i++ == idx)
 			break;
 	}
-err:
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 
@@ -513,9 +510,7 @@ void mesh_plink_broken(struct sta_info *sta)
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -535,7 +530,6 @@ void mesh_plink_broken(struct sta_info *sta)
 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
 		}
 	}
-out:
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -584,9 +578,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -597,7 +589,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
 		if (rcu_access_pointer(mpath->next_hop) == sta)
 			__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -614,9 +606,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -627,7 +617,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 		if (ether_addr_equal(mpath->mpp, proxy))
 			__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -642,9 +632,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -653,7 +641,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
 			break;
 		__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -873,9 +861,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -887,7 +873,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
 			__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index e2d00cce3c17..0f6c9ca59062 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -672,7 +672,7 @@ void mesh_plink_timer(struct timer_list *t)
 			break;
 		}
 		reason = WLAN_REASON_MESH_MAX_RETRIES;
-		/* fall through on else */
+		/* fall through */
 	case NL80211_PLINK_CNF_RCVD:
 		/* confirm timer */
 		if (!reason)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c244691deab9..39b660b9a908 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -473,6 +473,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
 	case IEEE80211_SMPS_AUTOMATIC:
 	case IEEE80211_SMPS_NUM_MODES:
 		WARN_ON(1);
+		/* fall through */
 	case IEEE80211_SMPS_OFF:
 		cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
 			IEEE80211_HT_CAP_SM_PS_SHIFT;
@@ -2861,10 +2862,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 	aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
 	capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
 
-	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
-		sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
-			   aid);
-	aid &= ~(BIT(15) | BIT(14));
+	/*
+	 * The 5 MSB of the AID field are reserved
+	 * (802.11-2016 9.4.1.8 AID field)
+	 */
+	aid &= 0x7ff;
 
 	ifmgd->broken_ap = false;
 
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index faf4f6055000..f1d40b6645ff 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -801,14 +801,14 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	case NL80211_IFTYPE_ADHOC:
 		if (!sdata->vif.bss_conf.ibss_joined)
 			need_offchan = true;
-		/* fall through */
 #ifdef CONFIG_MAC80211_MESH
+		/* fall through */
 	case NL80211_IFTYPE_MESH_POINT:
 		if (ieee80211_vif_is_mesh(&sdata->vif) &&
 		    !sdata->u.mesh.mesh_id_len)
 			need_offchan = true;
-		/* fall through */
 #endif
+		/* fall through */
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_P2P_GO:
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 4daafb07602f..fd580614085b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1607,23 +1607,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 
 	/*
 	 * Change STA power saving mode only at the end of a frame
-	 * exchange sequence.
+	 * exchange sequence, and only for a data or management
+	 * frame as specified in IEEE 802.11-2016 11.2.3.2
 	 */
 	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
 	    !ieee80211_has_morefrags(hdr->frame_control) &&
-	    !ieee80211_is_back_req(hdr->frame_control) &&
+	    (ieee80211_is_mgmt(hdr->frame_control) ||
+	     ieee80211_is_data(hdr->frame_control)) &&
 	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
 	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
-	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-	    /*
-	     * PM bit is only checked in frames where it isn't reserved,
-	     * in AP mode it's reserved in non-bufferable management frames
-	     * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
-	     * BAR frames should be ignored as specified in
-	     * IEEE 802.11-2012 10.2.1.2.
-	     */
-	    (!ieee80211_is_mgmt(hdr->frame_control) ||
-	     ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
+	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
 		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
 			if (!ieee80211_has_pm(hdr->frame_control))
 				sta_ps_end(sta);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 91093d4a2f84..5cd5e6e5834e 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -47,6 +47,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
 			  !ifmgd->tdls_wider_bw_prohibited;
+	bool buffer_sta = ieee80211_hw_check(&local->hw,
+					     SUPPORTS_TDLS_BUFFER_STA);
 	struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
 	bool vht = sband && sband->vht_cap.vht_supported;
 	u8 *pos = skb_put(skb, 10);
@@ -56,7 +58,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
 	*pos++ = 0x0;
 	*pos++ = 0x0;
 	*pos++ = 0x0;
-	*pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
+	*pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) |
+		 (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0);
 	*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
 	*pos++ = 0;
 	*pos++ = 0;
@@ -236,6 +239,7 @@ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
 	switch (ac) {
 	default:
 		WARN_ON_ONCE(1);
+		/* fall through */
 	case 0:
 		return IEEE80211_AC_BE;
 	case 1:
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3160954fc406..25904af38839 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2922,7 +2922,9 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
 
 		gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
 		iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
-		mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		mmic = build.key->conf.flags &
+			(IEEE80211_KEY_FLAG_GENERATE_MMIC |
+			 IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
 
 		/* don't handle software crypto */
 		if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d57e5f6bd8b6..1f82191ce601 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2110,15 +2110,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 		cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0);
 
  wake_up:
-	if (local->in_reconfig) {
-		local->in_reconfig = false;
-		barrier();
-
-		/* Restart deferred ROCs */
-		mutex_lock(&local->mtx);
-		ieee80211_start_next_roc(local);
-		mutex_unlock(&local->mtx);
-	}
 
 	if (local->monitors == local->open_count && local->monitors > 0)
 		ieee80211_add_virtual_monitor(local);
@@ -2146,6 +2137,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 		mutex_unlock(&local->sta_mtx);
 	}
 
+	if (local->in_reconfig) {
+		local->in_reconfig = false;
+		barrier();
+
+		/* Restart deferred ROCs */
+		mutex_lock(&local->mtx);
+		ieee80211_start_next_roc(local);
+		mutex_unlock(&local->mtx);
+	}
+
 	ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
 					IEEE80211_QUEUE_STOP_REASON_SUSPEND,
 					false);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 3e3d3014e9ab..5f7c96368b11 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -165,6 +165,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
 			qos = sta->sta.wme;
 			break;
 		}
+		/* fall through */
 	case NL80211_IFTYPE_AP:
 		ra = skb->data;
 		break;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b58722d9de37..785056cb76f6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1,7 +1,7 @@
 /*
  * Copyright 2002-2004, Instant802 Networks, Inc.
  * Copyright 2008, Jouni Malinen <j@w1.fi>
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -59,8 +59,9 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 	if (info->control.hw_key &&
 	    (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
 	     ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) &&
-	    !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
-		/* hwaccel - with no need for SW-generated MMIC */
+	    !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+				     IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) {
+		/* hwaccel - with no need for SW-generated MMIC or MIC space */
 		return TX_CONTINUE;
 	}
 
@@ -75,8 +76,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 		 skb_tailroom(skb), tail))
 		return TX_DROP;
 
-	key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
 	mic = skb_put(skb, MICHAEL_MIC_LEN);
+
+	if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) {
+		/* Zeroed MIC can help with debug */
+		memset(mic, 0, MICHAEL_MIC_LEN);
+		return TX_CONTINUE;
+	}
+
+	key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
 	michael_mic(key, hdr, data, data_len, mic);
 	if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
 		mic[0]++;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 8ca9915befc8..5dce8336d33f 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2510,12 +2510,15 @@ static int __init mpls_init(void)
 
 	rtnl_af_register(&mpls_af_ops);
 
-	rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0);
-	rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0);
-	rtnl_register(PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes,
-		      0);
-	rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf,
-		      mpls_netconf_dump_devconf, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
+			     mpls_rtm_newroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
+			     mpls_rtm_delroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
+			     mpls_getroute, mpls_dump_routes, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
+			     mpls_netconf_get_devconf,
+			     mpls_netconf_dump_devconf, 0);
 	err = ipgre_tunnel_encap_add_mpls_ops();
 	if (err)
 		pr_err("Can't add mpls over gre tunnel ops\n");
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 67e708e98ccf..e7b05de1e6d1 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -143,43 +143,14 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
 	if (!nc)
 		return -ENODEV;
 
-	/* If the channel is active one, we need reconfigure it */
 	spin_lock_irqsave(&nc->lock, flags);
 	ncm = &nc->modes[NCSI_MODE_LINK];
 	hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
 	ncm->data[3] = ntohl(hncdsc->status);
-	netdev_info(ndp->ndev.dev, "NCSI: HNCDSC AEN - channel %u state %s\n",
-		    nc->id, ncm->data[3] & 0x3 ? "up" : "down");
-	if (!list_empty(&nc->link) ||
-	    nc->state != NCSI_CHANNEL_ACTIVE) {
-		spin_unlock_irqrestore(&nc->lock, flags);
-		return 0;
-	}
-
-	spin_unlock_irqrestore(&nc->lock, flags);
-	if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
-		ndp->flags |= NCSI_DEV_RESHUFFLE;
-
-	/* If this channel is the active one and the link doesn't
-	 * work, we have to choose another channel to be active one.
-	 * The logic here is exactly similar to what we do when link
-	 * is down on the active channel.
-	 *
-	 * On the other hand, we need configure it when host driver
-	 * state on the active channel becomes ready.
-	 */
-	ncsi_stop_channel_monitor(nc);
-
-	spin_lock_irqsave(&nc->lock, flags);
-	nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
-					   NCSI_CHANNEL_ACTIVE;
 	spin_unlock_irqrestore(&nc->lock, flags);
-
-	spin_lock_irqsave(&ndp->lock, flags);
-	list_add_tail_rcu(&nc->link, &ndp->channel_queue);
-	spin_unlock_irqrestore(&ndp->lock, flags);
-
-	ncsi_process_next_channel(ndp);
+	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
+		      "NCSI: host driver %srunning on channel %u\n",
+		      ncm->data[3] & 0x1 ? "" : "not ", nc->id);
 
 	return 0;
 }
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e4a13cc8a2e7..9019fa98003d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -12,6 +12,12 @@ config NETFILTER_INGRESS
 config NETFILTER_NETLINK
 	tristate
 
+config NETFILTER_FAMILY_BRIDGE
+	bool
+
+config NETFILTER_FAMILY_ARP
+	bool
+
 config NETFILTER_NETLINK_ACCT
 tristate "Netfilter NFACCT over NFNETLINK interface"
 	depends on NETFILTER_ADVANCED
@@ -62,6 +68,8 @@ config NF_LOG_NETDEV
 	select NF_LOG_COMMON
 
 if NF_CONNTRACK
+config NETFILTER_CONNCOUNT
+	tristate
 
 config NF_CONNTRACK_MARK
 	bool  'Connection mark tracking support'
@@ -497,6 +505,13 @@ config NFT_CT
 	  This option adds the "ct" expression that you can use to match
 	  connection tracking information such as the flow state.
 
+config NFT_FLOW_OFFLOAD
+	depends on NF_CONNTRACK && NF_FLOW_TABLE
+	tristate "Netfilter nf_tables hardware flow offload module"
+	help
+	  This option adds the "flow_offload" expression that you can use to
+	  choose what flows are placed into the hardware.
+
 config NFT_SET_RBTREE
 	tristate "Netfilter nf_tables rbtree set module"
 	help
@@ -649,6 +664,23 @@ endif # NF_TABLES_NETDEV
 
 endif # NF_TABLES
 
+config NF_FLOW_TABLE_INET
+	tristate "Netfilter flow table mixed IPv4/IPv6 module"
+	depends on NF_FLOW_TABLE_IPV4 && NF_FLOW_TABLE_IPV6
+	select NF_FLOW_TABLE
+	help
+          This option adds the flow table mixed IPv4/IPv6 support.
+
+	  To compile it as a module, choose M here.
+
+config NF_FLOW_TABLE
+	tristate "Netfilter flow table module"
+	depends on NF_CONNTRACK && NF_TABLES
+	help
+	  This option adds the flow table core infrastructure.
+
+	  To compile it as a module, choose M here.
+
 config NETFILTER_XTABLES
 	tristate "Netfilter Xtables support (required for ip_tables)"
 	default m if NETFILTER_ADVANCED=n
@@ -1120,6 +1152,7 @@ config NETFILTER_XT_MATCH_CONNLIMIT
 	tristate '"connlimit" match support'
 	depends on NF_CONNTRACK
 	depends on NETFILTER_ADVANCED
+	select NETFILTER_CONNCOUNT
 	---help---
 	  This match allows you to match against the number of parallel
 	  connections to a server per client IP address (or address block).
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f78ed2470831..5d9b8b959e58 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
+netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o
 
 nf_conntrack-y	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
 nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
@@ -67,6 +67,8 @@ obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 # SYNPROXY
 obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
 
+obj-$(CONFIG_NETFILTER_CONNCOUNT) += nf_conncount.o
+
 # generic packet duplication from netdev family
 obj-$(CONFIG_NF_DUP_NETDEV)	+= nf_dup_netdev.o
 
@@ -84,6 +86,7 @@ obj-$(CONFIG_NFT_META)		+= nft_meta.o
 obj-$(CONFIG_NFT_RT)		+= nft_rt.o
 obj-$(CONFIG_NFT_NUMGEN)	+= nft_numgen.o
 obj-$(CONFIG_NFT_CT)		+= nft_ct.o
+obj-$(CONFIG_NFT_FLOW_OFFLOAD)	+= nft_flow_offload.o
 obj-$(CONFIG_NFT_LIMIT)		+= nft_limit.o
 obj-$(CONFIG_NFT_NAT)		+= nft_nat.o
 obj-$(CONFIG_NFT_OBJREF)	+= nft_objref.o
@@ -107,6 +110,10 @@ obj-$(CONFIG_NFT_FIB_NETDEV)	+= nft_fib_netdev.o
 obj-$(CONFIG_NFT_DUP_NETDEV)	+= nft_dup_netdev.o
 obj-$(CONFIG_NFT_FWD_NETDEV)	+= nft_fwd_netdev.o
 
+# flow table infrastructure
+obj-$(CONFIG_NF_FLOW_TABLE)	+= nf_flow_table.o
+obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
+
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 52cd2901a097..0f6b8172fb9a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -4,8 +4,7 @@
  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
  * way.
  *
- * Rusty Russell (C)2000 -- This code is GPL.
- * Patrick McHardy (c) 2006-2012
+ * This code is GPL.
  */
 #include <linux/kernel.h>
 #include <linux/netfilter.h>
@@ -28,34 +27,12 @@
 
 #include "nf_internals.h"
 
-static DEFINE_MUTEX(afinfo_mutex);
-
-const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-EXPORT_SYMBOL(nf_afinfo);
 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
 DEFINE_PER_CPU(bool, nf_skb_duplicated);
 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
 
-int nf_register_afinfo(const struct nf_afinfo *afinfo)
-{
-	mutex_lock(&afinfo_mutex);
-	RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
-	mutex_unlock(&afinfo_mutex);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(nf_register_afinfo);
-
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
-{
-	mutex_lock(&afinfo_mutex);
-	RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
-	mutex_unlock(&afinfo_mutex);
-	synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
-
 #ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
@@ -74,7 +51,8 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
 	struct nf_hook_entries *e;
 	size_t alloc = sizeof(*e) +
 		       sizeof(struct nf_hook_entry) * num +
-		       sizeof(struct nf_hook_ops *) * num;
+		       sizeof(struct nf_hook_ops *) * num +
+		       sizeof(struct nf_hook_entries_rcu_head);
 
 	if (num == 0)
 		return NULL;
@@ -85,6 +63,30 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
 	return e;
 }
 
+static void __nf_hook_entries_free(struct rcu_head *h)
+{
+	struct nf_hook_entries_rcu_head *head;
+
+	head = container_of(h, struct nf_hook_entries_rcu_head, head);
+	kvfree(head->allocation);
+}
+
+static void nf_hook_entries_free(struct nf_hook_entries *e)
+{
+	struct nf_hook_entries_rcu_head *head;
+	struct nf_hook_ops **ops;
+	unsigned int num;
+
+	if (!e)
+		return;
+
+	num = e->num_hook_entries;
+	ops = nf_hook_entries_get_hook_ops(e);
+	head = (void *)&ops[num];
+	head->allocation = e;
+	call_rcu(&head->head, __nf_hook_entries_free);
+}
+
 static unsigned int accept_all(void *priv,
 			       struct sk_buff *skb,
 			       const struct nf_hook_state *state)
@@ -135,6 +137,12 @@ nf_hook_entries_grow(const struct nf_hook_entries *old,
 			++i;
 			continue;
 		}
+
+		if (reg->nat_hook && orig_ops[i]->nat_hook) {
+			kvfree(new);
+			return ERR_PTR(-EBUSY);
+		}
+
 		if (inserted || reg->priority > orig_ops[i]->priority) {
 			new_ops[nhooks] = (void *)orig_ops[i];
 			new->hooks[nhooks] = old->hooks[i];
@@ -237,27 +245,61 @@ out_assign:
 	return old;
 }
 
-static struct nf_hook_entries __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
+static struct nf_hook_entries __rcu **
+nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
+		   struct net_device *dev)
 {
-	if (reg->pf != NFPROTO_NETDEV)
-		return net->nf.hooks[reg->pf]+reg->hooknum;
+	switch (pf) {
+	case NFPROTO_NETDEV:
+		break;
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+	case NFPROTO_ARP:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
+			return NULL;
+		return net->nf.hooks_arp + hooknum;
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	case NFPROTO_BRIDGE:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
+			return NULL;
+		return net->nf.hooks_bridge + hooknum;
+#endif
+	case NFPROTO_IPV4:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
+			return NULL;
+		return net->nf.hooks_ipv4 + hooknum;
+	case NFPROTO_IPV6:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
+			return NULL;
+		return net->nf.hooks_ipv6 + hooknum;
+#if IS_ENABLED(CONFIG_DECNET)
+	case NFPROTO_DECNET:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
+			return NULL;
+		return net->nf.hooks_decnet + hooknum;
+#endif
+	default:
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
 
 #ifdef CONFIG_NETFILTER_INGRESS
-	if (reg->hooknum == NF_NETDEV_INGRESS) {
-		if (reg->dev && dev_net(reg->dev) == net)
-			return &reg->dev->nf_hooks_ingress;
+	if (hooknum == NF_NETDEV_INGRESS) {
+		if (dev && dev_net(dev) == net)
+			return &dev->nf_hooks_ingress;
 	}
 #endif
 	WARN_ON_ONCE(1);
 	return NULL;
 }
 
-int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+static int __nf_register_net_hook(struct net *net, int pf,
+				  const struct nf_hook_ops *reg)
 {
 	struct nf_hook_entries *p, *new_hooks;
 	struct nf_hook_entries __rcu **pp;
 
-	if (reg->pf == NFPROTO_NETDEV) {
+	if (pf == NFPROTO_NETDEV) {
 #ifndef CONFIG_NETFILTER_INGRESS
 		if (reg->hooknum == NF_NETDEV_INGRESS)
 			return -EOPNOTSUPP;
@@ -267,7 +309,7 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
 			return -EINVAL;
 	}
 
-	pp = nf_hook_entry_head(net, reg);
+	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
 	if (!pp)
 		return -EINVAL;
 
@@ -285,21 +327,19 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
 
 	hooks_validate(new_hooks);
 #ifdef CONFIG_NETFILTER_INGRESS
-	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+	if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
 		net_inc_ingress_queue();
 #endif
 #ifdef HAVE_JUMP_LABEL
-	static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+	static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
 #endif
-	synchronize_net();
 	BUG_ON(p == new_hooks);
-	kvfree(p);
+	nf_hook_entries_free(p);
 	return 0;
 }
-EXPORT_SYMBOL(nf_register_net_hook);
 
 /*
- * __nf_unregister_net_hook - remove a hook from blob
+ * nf_remove_net_hook - remove a hook from blob
  *
  * @oldp: current address of hook blob
  * @unreg: hook to unregister
@@ -307,8 +347,8 @@ EXPORT_SYMBOL(nf_register_net_hook);
  * This cannot fail, hook unregistration must always succeed.
  * Therefore replace the to-be-removed hook with a dummy hook.
  */
-static void __nf_unregister_net_hook(struct nf_hook_entries *old,
-				     const struct nf_hook_ops *unreg)
+static void nf_remove_net_hook(struct nf_hook_entries *old,
+			       const struct nf_hook_ops *unreg, int pf)
 {
 	struct nf_hook_ops **orig_ops;
 	bool found = false;
@@ -326,24 +366,24 @@ static void __nf_unregister_net_hook(struct nf_hook_entries *old,
 
 	if (found) {
 #ifdef CONFIG_NETFILTER_INGRESS
-		if (unreg->pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS)
+		if (pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS)
 			net_dec_ingress_queue();
 #endif
 #ifdef HAVE_JUMP_LABEL
-		static_key_slow_dec(&nf_hooks_needed[unreg->pf][unreg->hooknum]);
+		static_key_slow_dec(&nf_hooks_needed[pf][unreg->hooknum]);
 #endif
 	} else {
-		WARN_ONCE(1, "hook not found, pf %d num %d", unreg->pf, unreg->hooknum);
+		WARN_ONCE(1, "hook not found, pf %d num %d", pf, unreg->hooknum);
 	}
 }
 
-void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+static void __nf_unregister_net_hook(struct net *net, int pf,
+				     const struct nf_hook_ops *reg)
 {
 	struct nf_hook_entries __rcu **pp;
 	struct nf_hook_entries *p;
-	unsigned int nfq;
 
-	pp = nf_hook_entry_head(net, reg);
+	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
 	if (!pp)
 		return;
 
@@ -355,23 +395,52 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 		return;
 	}
 
-	__nf_unregister_net_hook(p, reg);
+	nf_remove_net_hook(p, reg, pf);
 
 	p = __nf_hook_entries_try_shrink(pp);
 	mutex_unlock(&nf_hook_mutex);
 	if (!p)
 		return;
 
-	synchronize_net();
+	nf_queue_nf_hook_drop(net);
+	nf_hook_entries_free(p);
+}
 
-	/* other cpu might still process nfqueue verdict that used reg */
-	nfq = nf_queue_nf_hook_drop(net);
-	if (nfq)
-		synchronize_net();
-	kvfree(p);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+	if (reg->pf == NFPROTO_INET) {
+		__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+		__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
+	} else {
+		__nf_unregister_net_hook(net, reg->pf, reg);
+	}
 }
 EXPORT_SYMBOL(nf_unregister_net_hook);
 
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+	int err;
+
+	if (reg->pf == NFPROTO_INET) {
+		err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
+		if (err < 0)
+			return err;
+
+		err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
+		if (err < 0) {
+			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+			return err;
+		}
+	} else {
+		err = __nf_register_net_hook(net, reg->pf, reg);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(nf_register_net_hook);
+
 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
 			  unsigned int n)
 {
@@ -395,63 +464,10 @@ EXPORT_SYMBOL(nf_register_net_hooks);
 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
 			     unsigned int hookcount)
 {
-	struct nf_hook_entries *to_free[16], *p;
-	struct nf_hook_entries __rcu **pp;
-	unsigned int i, j, n;
-
-	mutex_lock(&nf_hook_mutex);
-	for (i = 0; i < hookcount; i++) {
-		pp = nf_hook_entry_head(net, &reg[i]);
-		if (!pp)
-			continue;
-
-		p = nf_entry_dereference(*pp);
-		if (WARN_ON_ONCE(!p))
-			continue;
-		__nf_unregister_net_hook(p, &reg[i]);
-	}
-	mutex_unlock(&nf_hook_mutex);
-
-	do {
-		n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free));
-
-		mutex_lock(&nf_hook_mutex);
-
-		for (i = 0, j = 0; i < hookcount && j < n; i++) {
-			pp = nf_hook_entry_head(net, &reg[i]);
-			if (!pp)
-				continue;
-
-			p = nf_entry_dereference(*pp);
-			if (!p)
-				continue;
-
-			to_free[j] = __nf_hook_entries_try_shrink(pp);
-			if (to_free[j])
-				++j;
-		}
-
-		mutex_unlock(&nf_hook_mutex);
-
-		if (j) {
-			unsigned int nfq;
-
-			synchronize_net();
-
-			/* need 2nd synchronize_net() if nfqueue is used, skb
-			 * can get reinjected right before nf_queue_hook_drop()
-			 */
-			nfq = nf_queue_nf_hook_drop(net);
-			if (nfq)
-				synchronize_net();
-
-			for (i = 0; i < j; i++)
-				kvfree(to_free[i]);
-		}
+	unsigned int i;
 
-		reg += n;
-		hookcount -= n;
-	} while (hookcount > 0);
+	for (i = 0; i < hookcount; i++)
+		nf_unregister_net_hook(net, &reg[i]);
 }
 EXPORT_SYMBOL(nf_unregister_net_hooks);
 
@@ -569,14 +585,27 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
 EXPORT_SYMBOL(nf_nat_decode_session_hook);
 #endif
 
-static int __net_init netfilter_net_init(struct net *net)
+static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max)
 {
-	int i, h;
+	int h;
 
-	for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
-		for (h = 0; h < NF_MAX_HOOKS; h++)
-			RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
-	}
+	for (h = 0; h < max; h++)
+		RCU_INIT_POINTER(e[h], NULL);
+}
+
+static int __net_init netfilter_net_init(struct net *net)
+{
+	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
+	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+	__netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
+#endif
 
 #ifdef CONFIG_PROC_FS
 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 5ca18f07683b..257ca393e6f2 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -127,14 +127,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 
 	if (ret <= 0)
 		return ret;
-	if (SET_WITH_TIMEOUT(set) &&
-	    ip_set_timeout_expired(ext_timeout(x, set)))
-		return 0;
-	if (SET_WITH_COUNTER(set))
-		ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
-	if (SET_WITH_SKBINFO(set))
-		ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags);
-	return 1;
+	return ip_set_match_extensions(set, ext, mext, flags, x);
 }
 
 static int
@@ -227,6 +220,7 @@ mtype_list(const struct ip_set *set,
 	rcu_read_lock();
 	for (; cb->args[IPSET_CB_ARG0] < map->elements;
 	     cb->args[IPSET_CB_ARG0]++) {
+		cond_resched_rcu();
 		id = cb->args[IPSET_CB_ARG0];
 		x = get_ext(set, map, id);
 		if (!test_bit(id, map->members) ||
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index d8975a0b4282..488d6d05c65c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -263,12 +263,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
 		if (ret)
 			return ret;
-		if (first_ip > last_ip) {
-			u32 tmp = first_ip;
-
-			first_ip = last_ip;
-			last_ip = tmp;
-		}
+		if (first_ip > last_ip)
+			swap(first_ip, last_ip);
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 4c279fbd2d5d..c00b6a2e8e3c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -337,12 +337,8 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
 		if (ret)
 			return ret;
-		if (first_ip > last_ip) {
-			u32 tmp = first_ip;
-
-			first_ip = last_ip;
-			last_ip = tmp;
-		}
+		if (first_ip > last_ip)
+			swap(first_ip, last_ip);
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 7f9bbd7c98b5..b561ca8b3659 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -238,12 +238,8 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 
 	first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
 	last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
-	if (first_port > last_port) {
-		u16 tmp = first_port;
-
-		first_port = last_port;
-		last_port = tmp;
-	}
+	if (first_port > last_port)
+		swap(first_port, last_port);
 
 	elements = last_port - first_port + 1;
 	set->dsize = ip_set_elem_len(set, tb, 0, 0);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index cf84f7b37cd9..975a85a48d39 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -57,7 +57,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 
 /* When the nfnl mutex is held: */
 #define ip_set_dereference(p)		\
-	rcu_dereference_protected(p, 1)
+	rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
 #define ip_set(inst, id)		\
 	ip_set_dereference((inst)->ip_set_list)[id]
 
@@ -472,6 +472,31 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
 }
 EXPORT_SYMBOL_GPL(ip_set_put_extensions);
 
+bool
+ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
+			struct ip_set_ext *mext, u32 flags, void *data)
+{
+	if (SET_WITH_TIMEOUT(set) &&
+	    ip_set_timeout_expired(ext_timeout(data, set)))
+		return false;
+	if (SET_WITH_COUNTER(set)) {
+		struct ip_set_counter *counter = ext_counter(data, set);
+
+		if (flags & IPSET_FLAG_MATCH_COUNTERS &&
+		    !(ip_set_match_counter(ip_set_get_packets(counter),
+				mext->packets, mext->packets_op) &&
+		      ip_set_match_counter(ip_set_get_bytes(counter),
+				mext->bytes, mext->bytes_op)))
+			return false;
+		ip_set_update_counter(counter, ext, flags);
+	}
+	if (SET_WITH_SKBINFO(set))
+		ip_set_get_skbinfo(ext_skbinfo(data, set),
+				   ext, mext, flags);
+	return true;
+}
+EXPORT_SYMBOL_GPL(ip_set_match_extensions);
+
 /* Creating/destroying/renaming/swapping affect the existence and
  * the properties of a set. All of these can be executed from userspace
  * only and serialized by the nfnl mutex indirectly from nfnetlink.
@@ -1386,11 +1411,9 @@ dump_last:
 				goto next_set;
 			if (set->variant->uref)
 				set->variant->uref(set, cb, true);
-			/* Fall through and add elements */
+			/* fall through */
 		default:
-			rcu_read_lock_bh();
 			ret = set->variant->list(set, skb, cb);
-			rcu_read_unlock_bh();
 			if (!cb->args[IPSET_CB_ARG0])
 				/* Set is done, proceed with next one */
 				goto next_set;
@@ -2055,6 +2078,7 @@ ip_set_net_exit(struct net *net)
 
 	inst->is_deleted = true; /* flag for ip_set_nfnl_put */
 
+	nfnl_lock(NFNL_SUBSYS_IPSET);
 	for (i = 0; i < inst->ip_set_max; i++) {
 		set = ip_set(inst, i);
 		if (set) {
@@ -2062,6 +2086,7 @@ ip_set_net_exit(struct net *net)
 			ip_set_destroy_set(set);
 		}
 	}
+	nfnl_unlock(NFNL_SUBSYS_IPSET);
 	kfree(rcu_dereference_protected(inst->ip_set_list, 1));
 }
 
@@ -2097,7 +2122,6 @@ ip_set_init(void)
 		return ret;
 	}
 
-	pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
 	return 0;
 }
 
@@ -2113,3 +2137,5 @@ ip_set_fini(void)
 
 module_init(ip_set_init);
 module_exit(ip_set_fini);
+
+MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index efffc8eabafe..bbad940c0137 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -917,12 +917,9 @@ static inline int
 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
 		 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
 {
-	if (SET_WITH_COUNTER(set))
-		ip_set_update_counter(ext_counter(data, set),
-				      ext, mext, flags);
-	if (SET_WITH_SKBINFO(set))
-		ip_set_get_skbinfo(ext_skbinfo(data, set),
-				   ext, mext, flags);
+	if (!ip_set_match_extensions(set, ext, mext, flags, data))
+		return 0;
+	/* nomatch entries return -ENOTEMPTY */
 	return mtype_do_data_match(data);
 }
 
@@ -941,9 +938,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
 	struct mtype_elem *data;
 #if IPSET_NET_COUNT == 2
 	struct mtype_elem orig = *d;
-	int i, j = 0, k;
+	int ret, i, j = 0, k;
 #else
-	int i, j = 0;
+	int ret, i, j = 0;
 #endif
 	u32 key, multi = 0;
 
@@ -969,18 +966,13 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
 			data = ahash_data(n, i, set->dsize);
 			if (!mtype_data_equal(data, d, &multi))
 				continue;
-			if (SET_WITH_TIMEOUT(set)) {
-				if (!ip_set_timeout_expired(
-						ext_timeout(data, set)))
-					return mtype_data_match(data, ext,
-								mext, set,
-								flags);
+			ret = mtype_data_match(data, ext, mext, set, flags);
+			if (ret != 0)
+				return ret;
 #ifdef IP_SET_HASH_WITH_MULTI
-				multi = 0;
+			/* No match, reset multiple match flag */
+			multi = 0;
 #endif
-			} else
-				return mtype_data_match(data, ext,
-							mext, set, flags);
 		}
 #if IPSET_NET_COUNT == 2
 		}
@@ -1027,12 +1019,11 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 		if (!test_bit(i, n->used))
 			continue;
 		data = ahash_data(n, i, set->dsize);
-		if (mtype_data_equal(data, d, &multi) &&
-		    !(SET_WITH_TIMEOUT(set) &&
-		      ip_set_timeout_expired(ext_timeout(data, set)))) {
-			ret = mtype_data_match(data, ext, mext, set, flags);
+		if (!mtype_data_equal(data, d, &multi))
+			continue;
+		ret = mtype_data_match(data, ext, mext, set, flags);
+		if (ret != 0)
 			goto out;
-		}
 	}
 out:
 	return ret;
@@ -1143,6 +1134,7 @@ mtype_list(const struct ip_set *set,
 	rcu_read_lock();
 	for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
 	     cb->args[IPSET_CB_ARG0]++) {
+		cond_resched_rcu();
 		incomplete = skb_tail_pointer(skb);
 		n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
 		pr_debug("cb->arg bucket: %lu, t %p n %p\n",
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index e864681b8dc5..072a658fde04 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -55,8 +55,9 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
 	       struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
 {
 	struct list_set *map = set->data;
+	struct ip_set_ext *mext = &opt->ext;
 	struct set_elem *e;
-	u32 cmdflags = opt->cmdflags;
+	u32 flags = opt->cmdflags;
 	int ret;
 
 	/* Don't lookup sub-counters at all */
@@ -64,21 +65,11 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
 	if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
 		opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
 	list_for_each_entry_rcu(e, &map->members, list) {
-		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(e, set)))
-			continue;
 		ret = ip_set_test(e->id, skb, par, opt);
-		if (ret > 0) {
-			if (SET_WITH_COUNTER(set))
-				ip_set_update_counter(ext_counter(e, set),
-						      ext, &opt->ext,
-						      cmdflags);
-			if (SET_WITH_SKBINFO(set))
-				ip_set_get_skbinfo(ext_skbinfo(e, set),
-						   ext, &opt->ext,
-						   cmdflags);
-			return ret;
-		}
+		if (ret <= 0)
+			continue;
+		if (ip_set_match_extensions(set, ext, mext, flags, e))
+			return 1;
 	}
 	return 0;
 }
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 299edc6add5a..1c98c907bc63 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -595,7 +595,6 @@ static int ip_vs_app_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip_vs_app_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = ip_vs_app_open,
 	.read	 = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 3e053cb30070..370abbf6f421 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -322,7 +322,7 @@ ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
 {
 	__be16 _ports[2], *pptr;
 
-	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return 1;
 
@@ -1143,7 +1143,6 @@ static int ip_vs_conn_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip_vs_conn_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ip_vs_conn_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1221,7 +1220,6 @@ static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip_vs_conn_sync_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ip_vs_conn_sync_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5cb7cac9177d..5f6f73cf2174 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -433,7 +433,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
 	/*
 	 * IPv6 frags, only the first hit here.
 	 */
-	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return NULL;
 
@@ -566,7 +566,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
 	struct netns_ipvs *ipvs = svc->ipvs;
 	struct net *net = ipvs->net;
 
-	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
 	if (!pptr)
 		return NF_DROP;
 	dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
@@ -982,7 +982,7 @@ static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
 	unsigned int offset;
 
 	*related = 1;
-	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
+	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
 	if (ic == NULL)
 		return NF_DROP;
 
@@ -1214,7 +1214,7 @@ static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
 		return NULL;
 
 	pptr = frag_safe_skb_hp(skb, iph->len,
-				sizeof(_ports), _ports, iph);
+				sizeof(_ports), _ports);
 	if (!pptr)
 		return NULL;
 
@@ -1407,7 +1407,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
 		__be16 _ports[2], *pptr;
 
 		pptr = frag_safe_skb_hp(skb, iph.len,
-					 sizeof(_ports), _ports, &iph);
+					 sizeof(_ports), _ports);
 		if (pptr == NULL)
 			return NF_ACCEPT;	/* Not for me */
 		if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
@@ -1741,7 +1741,7 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
 
 	*related = 1;
 
-	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
+	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
 	if (ic == NULL)
 		return NF_DROP;
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index fff213eacf2a..5ebde4b15810 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2116,7 +2116,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip_vs_info_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ip_vs_info_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -2161,7 +2160,6 @@ static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip_vs_stats_fops = {
-	.owner = THIS_MODULE,
 	.open = ip_vs_stats_seq_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -2230,7 +2228,6 @@ static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip_vs_stats_percpu_fops = {
-	.owner = THIS_MODULE,
 	.open = ip_vs_stats_percpu_seq_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 121a321b91be..bcd9b7bde4ee 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -315,6 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 	switch (skb->ip_summed) {
 	case CHECKSUM_NONE:
 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
+		/* fall through */
 	case CHECKSUM_COMPLETE:
 #ifdef CONFIG_IP_VS_IPV6
 		if (af == AF_INET6) {
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 30e11cd6aa8a..c15ef7c2a1fa 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -319,6 +319,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 		case CHECKSUM_NONE:
 			skb->csum = skb_checksum(skb, udphoff,
 						 skb->len - udphoff, 0);
+			/* fall through */
 		case CHECKSUM_COMPLETE:
 #ifdef CONFIG_IP_VS_IPV6
 			if (af == AF_INET6) {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
new file mode 100644
index 000000000000..6d65389e308f
--- /dev/null
+++ b/net/netfilter/nf_conncount.c
@@ -0,0 +1,373 @@
+/*
+ * count the number of connections matching an arbitrary key.
+ *
+ * (C) 2017 Red Hat GmbH
+ * Author: Florian Westphal <fw@strlen.de>
+ *
+ * split from xt_connlimit.c:
+ *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
+ *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
+ *		only ignore TIME_WAIT or gone connections
+ *   (C) CC Computer Consultants GmbH, 2007
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_count.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+#define CONNCOUNT_SLOTS		256U
+
+#ifdef CONFIG_LOCKDEP
+#define CONNCOUNT_LOCK_SLOTS	8U
+#else
+#define CONNCOUNT_LOCK_SLOTS	256U
+#endif
+
+#define CONNCOUNT_GC_MAX_NODES	8
+#define MAX_KEYLEN		5
+
+/* we will save the tuples of all connections we care about */
+struct nf_conncount_tuple {
+	struct hlist_node		node;
+	struct nf_conntrack_tuple	tuple;
+};
+
+struct nf_conncount_rb {
+	struct rb_node node;
+	struct hlist_head hhead; /* connections/hosts in same subnet */
+	u32 key[MAX_KEYLEN];
+};
+
+static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+
+struct nf_conncount_data {
+	unsigned int keylen;
+	struct rb_root root[CONNCOUNT_SLOTS];
+};
+
+static u_int32_t conncount_rnd __read_mostly;
+static struct kmem_cache *conncount_rb_cachep __read_mostly;
+static struct kmem_cache *conncount_conn_cachep __read_mostly;
+
+static inline bool already_closed(const struct nf_conn *conn)
+{
+	if (nf_ct_protonum(conn) == IPPROTO_TCP)
+		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
+		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
+	else
+		return false;
+}
+
+static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
+{
+	return memcmp(a, b, klen * sizeof(u32));
+}
+
+static bool add_hlist(struct hlist_head *head,
+		      const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conncount_tuple *conn;
+
+	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+	if (conn == NULL)
+		return false;
+	conn->tuple = *tuple;
+	hlist_add_head(&conn->node, head);
+	return true;
+}
+
+static unsigned int check_hlist(struct net *net,
+				struct hlist_head *head,
+				const struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_zone *zone,
+				bool *addit)
+{
+	const struct nf_conntrack_tuple_hash *found;
+	struct nf_conncount_tuple *conn;
+	struct hlist_node *n;
+	struct nf_conn *found_ct;
+	unsigned int length = 0;
+
+	*addit = true;
+
+	/* check the saved connections */
+	hlist_for_each_entry_safe(conn, n, head, node) {
+		found = nf_conntrack_find_get(net, zone, &conn->tuple);
+		if (found == NULL) {
+			hlist_del(&conn->node);
+			kmem_cache_free(conncount_conn_cachep, conn);
+			continue;
+		}
+
+		found_ct = nf_ct_tuplehash_to_ctrack(found);
+
+		if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
+			/*
+			 * Just to be sure we have it only once in the list.
+			 * We should not see tuples twice unless someone hooks
+			 * this into a table without "-p tcp --syn".
+			 */
+			*addit = false;
+		} else if (already_closed(found_ct)) {
+			/*
+			 * we do not care about connections which are
+			 * closed already -> ditch it
+			 */
+			nf_ct_put(found_ct);
+			hlist_del(&conn->node);
+			kmem_cache_free(conncount_conn_cachep, conn);
+			continue;
+		}
+
+		nf_ct_put(found_ct);
+		length++;
+	}
+
+	return length;
+}
+
+static void tree_nodes_free(struct rb_root *root,
+			    struct nf_conncount_rb *gc_nodes[],
+			    unsigned int gc_count)
+{
+	struct nf_conncount_rb *rbconn;
+
+	while (gc_count) {
+		rbconn = gc_nodes[--gc_count];
+		rb_erase(&rbconn->node, root);
+		kmem_cache_free(conncount_rb_cachep, rbconn);
+	}
+}
+
+static unsigned int
+count_tree(struct net *net, struct rb_root *root,
+	   const u32 *key, u8 keylen,
+	   u8 family,
+	   const struct nf_conntrack_tuple *tuple,
+	   const struct nf_conntrack_zone *zone)
+{
+	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
+	struct rb_node **rbnode, *parent;
+	struct nf_conncount_rb *rbconn;
+	struct nf_conncount_tuple *conn;
+	unsigned int gc_count;
+	bool no_gc = false;
+
+ restart:
+	gc_count = 0;
+	parent = NULL;
+	rbnode = &(root->rb_node);
+	while (*rbnode) {
+		int diff;
+		bool addit;
+
+		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+
+		parent = *rbnode;
+		diff = key_diff(key, rbconn->key, keylen);
+		if (diff < 0) {
+			rbnode = &((*rbnode)->rb_left);
+		} else if (diff > 0) {
+			rbnode = &((*rbnode)->rb_right);
+		} else {
+			/* same source network -> be counted! */
+			unsigned int count;
+			count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
+
+			tree_nodes_free(root, gc_nodes, gc_count);
+			if (!addit)
+				return count;
+
+			if (!add_hlist(&rbconn->hhead, tuple))
+				return 0; /* hotdrop */
+
+			return count + 1;
+		}
+
+		if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
+			continue;
+
+		/* only used for GC on hhead, retval and 'addit' ignored */
+		check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
+		if (hlist_empty(&rbconn->hhead))
+			gc_nodes[gc_count++] = rbconn;
+	}
+
+	if (gc_count) {
+		no_gc = true;
+		tree_nodes_free(root, gc_nodes, gc_count);
+		/* tree_node_free before new allocation permits
+		 * allocator to re-use newly free'd object.
+		 *
+		 * This is a rare event; in most cases we will find
+		 * existing node to re-use. (or gc_count is 0).
+		 */
+		goto restart;
+	}
+
+	/* no match, need to insert new node */
+	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
+	if (rbconn == NULL)
+		return 0;
+
+	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+	if (conn == NULL) {
+		kmem_cache_free(conncount_rb_cachep, rbconn);
+		return 0;
+	}
+
+	conn->tuple = *tuple;
+	memcpy(rbconn->key, key, sizeof(u32) * keylen);
+
+	INIT_HLIST_HEAD(&rbconn->hhead);
+	hlist_add_head(&conn->node, &rbconn->hhead);
+
+	rb_link_node(&rbconn->node, parent, rbnode);
+	rb_insert_color(&rbconn->node, root);
+	return 1;
+}
+
+unsigned int nf_conncount_count(struct net *net,
+				struct nf_conncount_data *data,
+				const u32 *key,
+				unsigned int family,
+				const struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_zone *zone)
+{
+	struct rb_root *root;
+	int count;
+	u32 hash;
+
+	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+	root = &data->root[hash];
+
+	spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+
+	count = count_tree(net, root, key, data->keylen, family, tuple, zone);
+
+	spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_count);
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+					    unsigned int keylen)
+{
+	struct nf_conncount_data *data;
+	int ret, i;
+
+	if (keylen % sizeof(u32) ||
+	    keylen / sizeof(u32) > MAX_KEYLEN ||
+	    keylen == 0)
+		return ERR_PTR(-EINVAL);
+
+	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	ret = nf_ct_netns_get(net, family);
+	if (ret < 0) {
+		kfree(data);
+		return ERR_PTR(ret);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
+		data->root[i] = RB_ROOT;
+
+	data->keylen = keylen / sizeof(u32);
+
+	return data;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_init);
+
+static void destroy_tree(struct rb_root *r)
+{
+	struct nf_conncount_tuple *conn;
+	struct nf_conncount_rb *rbconn;
+	struct hlist_node *n;
+	struct rb_node *node;
+
+	while ((node = rb_first(r)) != NULL) {
+		rbconn = rb_entry(node, struct nf_conncount_rb, node);
+
+		rb_erase(node, r);
+
+		hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
+			kmem_cache_free(conncount_conn_cachep, conn);
+
+		kmem_cache_free(conncount_rb_cachep, rbconn);
+	}
+}
+
+void nf_conncount_destroy(struct net *net, unsigned int family,
+			  struct nf_conncount_data *data)
+{
+	unsigned int i;
+
+	nf_ct_netns_put(net, family);
+
+	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
+		destroy_tree(&data->root[i]);
+
+	kfree(data);
+}
+EXPORT_SYMBOL_GPL(nf_conncount_destroy);
+
+static int __init nf_conncount_modinit(void)
+{
+	int i;
+
+	BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
+	BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
+
+	for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+		spin_lock_init(&nf_conncount_locks[i]);
+
+	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
+					   sizeof(struct nf_conncount_tuple),
+					   0, 0, NULL);
+	if (!conncount_conn_cachep)
+		return -ENOMEM;
+
+	conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
+					   sizeof(struct nf_conncount_rb),
+					   0, 0, NULL);
+	if (!conncount_rb_cachep) {
+		kmem_cache_destroy(conncount_conn_cachep);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __exit nf_conncount_modexit(void)
+{
+	kmem_cache_destroy(conncount_conn_cachep);
+	kmem_cache_destroy(conncount_rb_cachep);
+}
+
+module_init(nf_conncount_modinit);
+module_exit(nf_conncount_modexit);
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4efaa3066c78..705198de671d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -58,8 +58,6 @@
 
 #include "nf_internals.h"
 
-#define NF_CONNTRACK_VERSION	"0.5.0"
-
 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
 				      enum nf_nat_manip_type manip,
 				      const struct nlattr *attr) __read_mostly;
@@ -901,6 +899,9 @@ static unsigned int early_drop_list(struct net *net,
 	hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
 		tmp = nf_ct_tuplehash_to_ctrack(h);
 
+		if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
+			continue;
+
 		if (nf_ct_is_expired(tmp)) {
 			nf_ct_gc_expired(tmp);
 			continue;
@@ -975,6 +976,18 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
 	return false;
 }
 
+#define	DAY	(86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+	if (nf_ct_expires(ct) < DAY / 2)
+		ct->timeout = nfct_time_stamp + DAY;
+}
+
 static void gc_worker(struct work_struct *work)
 {
 	unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
@@ -1011,6 +1024,11 @@ static void gc_worker(struct work_struct *work)
 			tmp = nf_ct_tuplehash_to_ctrack(h);
 
 			scanned++;
+			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+				nf_ct_offload_timeout(tmp);
+				continue;
+			}
+
 			if (nf_ct_is_expired(tmp)) {
 				nf_ct_gc_expired(tmp);
 				expired_count++;
@@ -2048,10 +2066,6 @@ int nf_conntrack_init_start(void)
 	if (!nf_conntrack_cachep)
 		goto err_cachep;
 
-	printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
-	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
-	       nf_conntrack_max);
-
 	ret = nf_conntrack_expect_init();
 	if (ret < 0)
 		goto err_expect;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index d6748a8a79c5..8ef21d9f9a00 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -649,7 +649,6 @@ static int exp_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations exp_file_ops = {
-	.owner   = THIS_MODULE,
 	.open    = exp_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index dc6347342e34..1601275efe2d 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -1,4 +1,4 @@
-/****************************************************************************
+/*
  * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323
  * 			      	     conntrack/NAT module.
  *
@@ -8,7 +8,7 @@
  *
  * See ip_conntrack_helper_h323_asn1.h for details.
  *
- ****************************************************************************/
+ */
 
 #ifdef __KERNEL__
 #include <linux/kernel.h>
@@ -140,14 +140,15 @@ static const decoder_t Decoders[] = {
 	decode_choice,
 };
 
-/****************************************************************************
+/*
  * H.323 Types
- ****************************************************************************/
+ */
 #include "nf_conntrack_h323_types.c"
 
-/****************************************************************************
+/*
  * Functions
- ****************************************************************************/
+ */
+
 /* Assume bs is aligned && v < 16384 */
 static unsigned int get_len(struct bitstr *bs)
 {
@@ -177,7 +178,6 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
 	return 0;
 }
 
-/****************************************************************************/
 static unsigned int get_bit(struct bitstr *bs)
 {
 	unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
@@ -187,7 +187,6 @@ static unsigned int get_bit(struct bitstr *bs)
 	return b;
 }
 
-/****************************************************************************/
 /* Assume b <= 8 */
 static unsigned int get_bits(struct bitstr *bs, unsigned int b)
 {
@@ -213,7 +212,6 @@ static unsigned int get_bits(struct bitstr *bs, unsigned int b)
 	return v;
 }
 
-/****************************************************************************/
 /* Assume b <= 32 */
 static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
 {
@@ -251,9 +249,9 @@ static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
 	return v;
 }
 
-/****************************************************************************
+/*
  * Assume bs is aligned and sizeof(unsigned int) == 4
- ****************************************************************************/
+ */
 static unsigned int get_uint(struct bitstr *bs, int b)
 {
 	unsigned int v = 0;
@@ -262,12 +260,15 @@ static unsigned int get_uint(struct bitstr *bs, int b)
 	case 4:
 		v |= *bs->cur++;
 		v <<= 8;
+		/* fall through */
 	case 3:
 		v |= *bs->cur++;
 		v <<= 8;
+		/* fall through */
 	case 2:
 		v |= *bs->cur++;
 		v <<= 8;
+		/* fall through */
 	case 1:
 		v |= *bs->cur++;
 		break;
@@ -275,7 +276,6 @@ static unsigned int get_uint(struct bitstr *bs, int b)
 	return v;
 }
 
-/****************************************************************************/
 static int decode_nul(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -284,7 +284,6 @@ static int decode_nul(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_bool(struct bitstr *bs, const struct field_t *f,
                        char *base, int level)
 {
@@ -296,7 +295,6 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_oid(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -316,7 +314,6 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_int(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -364,7 +361,6 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_enum(struct bitstr *bs, const struct field_t *f,
                        char *base, int level)
 {
@@ -381,7 +377,6 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -418,7 +413,6 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_numstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -439,7 +433,6 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_octstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -493,7 +486,6 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -523,7 +515,6 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_seq(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -653,7 +644,6 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_seqof(struct bitstr *bs, const struct field_t *f,
                         char *base, int level)
 {
@@ -750,8 +740,6 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-
-/****************************************************************************/
 static int decode_choice(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -833,7 +821,6 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
 {
 	static const struct field_t ras_message = {
@@ -849,7 +836,6 @@ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
 	return decode_choice(&bs, &ras_message, (char *) ras, 0);
 }
 
-/****************************************************************************/
 static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
 				      size_t sz, H323_UserInformation *uuie)
 {
@@ -867,7 +853,6 @@ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
 	return decode_seq(&bs, &h323_userinformation, (char *) uuie, 0);
 }
 
-/****************************************************************************/
 int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
 					 MultimediaSystemControlMessage *
 					 mscm)
@@ -886,7 +871,6 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
 			     (char *) mscm, 0);
 }
 
-/****************************************************************************/
 int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
 {
 	unsigned char *p = buf;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index f71f0d2558fd..005589c6d0f6 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -24,6 +24,7 @@
 #include <linux/skbuff.h>
 #include <net/route.h>
 #include <net/ip6_route.h>
+#include <linux/netfilter_ipv6.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
@@ -115,7 +116,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245;
 static struct nf_conntrack_helper nf_conntrack_helper_q931[];
 static struct nf_conntrack_helper nf_conntrack_helper_ras[];
 
-/****************************************************************************/
 static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
 			 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 			 unsigned char **data, int *datalen, int *dataoff)
@@ -219,7 +219,6 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
 	return 0;
 }
 
-/****************************************************************************/
 static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
 			 H245_TransportAddress *taddr,
 			 union nf_inet_addr *addr, __be16 *port)
@@ -254,7 +253,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
 	return 1;
 }
 
-/****************************************************************************/
 static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 			   enum ip_conntrack_info ctinfo,
 			   unsigned int protoff,
@@ -328,7 +326,6 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int expect_t120(struct sk_buff *skb,
 		       struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
@@ -380,7 +377,6 @@ static int expect_t120(struct sk_buff *skb,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_h245_channel(struct sk_buff *skb,
 				struct nf_conn *ct,
 				enum ip_conntrack_info ctinfo,
@@ -410,7 +406,6 @@ static int process_h245_channel(struct sk_buff *skb,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -472,7 +467,6 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
 			enum ip_conntrack_info ctinfo,
 			unsigned int protoff, unsigned char **data, int dataoff,
@@ -542,7 +536,6 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
 			enum ip_conntrack_info ctinfo,
 			unsigned int protoff, unsigned char **data, int dataoff,
@@ -578,7 +571,6 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int h245_help(struct sk_buff *skb, unsigned int protoff,
 		     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -628,7 +620,6 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
 	return NF_DROP;
 }
 
-/****************************************************************************/
 static const struct nf_conntrack_expect_policy h245_exp_policy = {
 	.max_expected	= H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
 	.timeout	= 240,
@@ -643,7 +634,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
 	.expect_policy		= &h245_exp_policy,
 };
 
-/****************************************************************************/
 int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 		  TransportAddress *taddr,
 		  union nf_inet_addr *addr, __be16 *port)
@@ -675,7 +665,6 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 	return 1;
 }
 
-/****************************************************************************/
 static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff, unsigned char **data, int dataoff,
@@ -726,20 +715,15 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
 }
 
 /* If the calling party is on the same side of the forward-to party,
- * we don't need to track the second call */
+ * we don't need to track the second call
+ */
 static int callforward_do_filter(struct net *net,
 				 const union nf_inet_addr *src,
 				 const union nf_inet_addr *dst,
 				 u_int8_t family)
 {
-	const struct nf_afinfo *afinfo;
 	int ret = 0;
 
-	/* rcu_read_lock()ed by nf_hook_thresh */
-	afinfo = nf_get_afinfo(family);
-	if (!afinfo)
-		return 0;
-
 	switch (family) {
 	case AF_INET: {
 		struct flowi4 fl1, fl2;
@@ -750,10 +734,10 @@ static int callforward_do_filter(struct net *net,
 
 		memset(&fl2, 0, sizeof(fl2));
 		fl2.daddr = dst->ip;
-		if (!afinfo->route(net, (struct dst_entry **)&rt1,
-				   flowi4_to_flowi(&fl1), false)) {
-			if (!afinfo->route(net, (struct dst_entry **)&rt2,
-					   flowi4_to_flowi(&fl2), false)) {
+		if (!nf_ip_route(net, (struct dst_entry **)&rt1,
+				 flowi4_to_flowi(&fl1), false)) {
+			if (!nf_ip_route(net, (struct dst_entry **)&rt2,
+					 flowi4_to_flowi(&fl2), false)) {
 				if (rt_nexthop(rt1, fl1.daddr) ==
 				    rt_nexthop(rt2, fl2.daddr) &&
 				    rt1->dst.dev  == rt2->dst.dev)
@@ -766,18 +750,23 @@ static int callforward_do_filter(struct net *net,
 	}
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
 	case AF_INET6: {
-		struct flowi6 fl1, fl2;
+		const struct nf_ipv6_ops *v6ops;
 		struct rt6_info *rt1, *rt2;
+		struct flowi6 fl1, fl2;
+
+		v6ops = nf_get_ipv6_ops();
+		if (!v6ops)
+			return 0;
 
 		memset(&fl1, 0, sizeof(fl1));
 		fl1.daddr = src->in6;
 
 		memset(&fl2, 0, sizeof(fl2));
 		fl2.daddr = dst->in6;
-		if (!afinfo->route(net, (struct dst_entry **)&rt1,
-				   flowi6_to_flowi(&fl1), false)) {
-			if (!afinfo->route(net, (struct dst_entry **)&rt2,
-					   flowi6_to_flowi(&fl2), false)) {
+		if (!v6ops->route(net, (struct dst_entry **)&rt1,
+				  flowi6_to_flowi(&fl1), false)) {
+			if (!v6ops->route(net, (struct dst_entry **)&rt2,
+					  flowi6_to_flowi(&fl2), false)) {
 				if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
 						    rt6_nexthop(rt2, &fl2.daddr)) &&
 				    rt1->dst.dev == rt2->dst.dev)
@@ -794,7 +783,6 @@ static int callforward_do_filter(struct net *net,
 
 }
 
-/****************************************************************************/
 static int expect_callforwarding(struct sk_buff *skb,
 				 struct nf_conn *ct,
 				 enum ip_conntrack_info ctinfo,
@@ -815,7 +803,8 @@ static int expect_callforwarding(struct sk_buff *skb,
 		return 0;
 
 	/* If the calling party is on the same side of the forward-to party,
-	 * we don't need to track the second call */
+	 * we don't need to track the second call
+	 */
 	if (callforward_filter &&
 	    callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3,
 				  nf_ct_l3num(ct))) {
@@ -854,7 +843,6 @@ static int expect_callforwarding(struct sk_buff *skb,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 			 enum ip_conntrack_info ctinfo,
 			 unsigned int protoff,
@@ -925,7 +913,6 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_callproceeding(struct sk_buff *skb,
 				  struct nf_conn *ct,
 				  enum ip_conntrack_info ctinfo,
@@ -958,7 +945,6 @@ static int process_callproceeding(struct sk_buff *skb,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 			   enum ip_conntrack_info ctinfo,
 			   unsigned int protoff,
@@ -990,7 +976,6 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 			    enum ip_conntrack_info ctinfo,
 			    unsigned int protoff,
@@ -1022,7 +1007,6 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 			    enum ip_conntrack_info ctinfo,
 			    unsigned int protoff,
@@ -1063,7 +1047,6 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 			    enum ip_conntrack_info ctinfo,
 			    unsigned int protoff,
@@ -1095,7 +1078,6 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 			enum ip_conntrack_info ctinfo,
 			unsigned int protoff, unsigned char **data, int dataoff,
@@ -1154,7 +1136,6 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int q931_help(struct sk_buff *skb, unsigned int protoff,
 		     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -1203,7 +1184,6 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
 	return NF_DROP;
 }
 
-/****************************************************************************/
 static const struct nf_conntrack_expect_policy q931_exp_policy = {
 	/* T.120 and H.245 */
 	.max_expected		= H323_RTP_CHANNEL_MAX * 4 + 4,
@@ -1231,7 +1211,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
 	},
 };
 
-/****************************************************************************/
 static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
 				   int *datalen)
 {
@@ -1249,7 +1228,6 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
 	return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
 }
 
-/****************************************************************************/
 static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
 					       union nf_inet_addr *addr,
 					       __be16 port)
@@ -1270,7 +1248,6 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
 	return NULL;
 }
 
-/****************************************************************************/
 static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff, unsigned char **data,
@@ -1328,7 +1305,6 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1346,7 +1322,6 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1391,7 +1366,6 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1428,7 +1402,6 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1480,7 +1453,6 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1514,7 +1486,6 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1559,7 +1530,6 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1608,7 +1578,6 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1626,7 +1595,6 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1666,7 +1634,6 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1700,7 +1667,6 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1745,7 +1711,6 @@ static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int ras_help(struct sk_buff *skb, unsigned int protoff,
 		    struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -1788,7 +1753,6 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
 	return NF_DROP;
 }
 
-/****************************************************************************/
 static const struct nf_conntrack_expect_policy ras_exp_policy = {
 	.max_expected		= 32,
 	.timeout		= 240,
@@ -1849,7 +1813,6 @@ static void __exit h323_helper_exit(void)
 	nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
 }
 
-/****************************************************************************/
 static void __exit nf_conntrack_h323_fini(void)
 {
 	h323_helper_exit();
@@ -1857,7 +1820,6 @@ static void __exit nf_conntrack_h323_fini(void)
 	pr_debug("nf_ct_h323: fini\n");
 }
 
-/****************************************************************************/
 static int __init nf_conntrack_h323_init(void)
 {
 	int ret;
@@ -1877,7 +1839,6 @@ err1:
 	return ret;
 }
 
-/****************************************************************************/
 module_init(nf_conntrack_h323_init);
 module_exit(nf_conntrack_h323_fini);
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 382d49792f42..dd177ebee9aa 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -57,8 +57,6 @@
 
 MODULE_LICENSE("GPL");
 
-static char __initdata version[] = "0.93";
-
 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *tuple,
 				const struct nf_conntrack_l4proto *l4proto)
@@ -544,7 +542,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
 	len *= 3u; /* ORIG, REPLY, MASTER */
 
 	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
-	len += l4proto->nla_size;
+	len += l4proto->nlattr_size;
 	if (l4proto->nlattr_tuple_size) {
 		len4 = l4proto->nlattr_tuple_size();
 		len4 *= 3u; /* ORIG, REPLY, MASTER */
@@ -1110,6 +1108,14 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
 				    .len = NF_CT_LABELS_MAX_SIZE },
 };
 
+static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
+{
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		return 0;
+
+	return ctnetlink_filter_match(ct, data);
+}
+
 static int ctnetlink_flush_conntrack(struct net *net,
 				     const struct nlattr * const cda[],
 				     u32 portid, int report)
@@ -1122,7 +1128,7 @@ static int ctnetlink_flush_conntrack(struct net *net,
 			return PTR_ERR(filter);
 	}
 
-	nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
+	nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
 				  portid, report);
 	kfree(filter);
 
@@ -1168,6 +1174,11 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
 
 	ct = nf_ct_tuplehash_to_ctrack(h);
 
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
+		nf_ct_put(ct);
+		return -EBUSY;
+	}
+
 	if (cda[CTA_ID]) {
 		u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
 		if (id != (u32)(unsigned long)ct) {
@@ -3412,7 +3423,6 @@ static int __init ctnetlink_init(void)
 {
 	int ret;
 
-	pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
 	ret = nfnetlink_subsys_register(&ctnl_subsys);
 	if (ret < 0) {
 		pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
@@ -3446,8 +3456,6 @@ err_out:
 
 static void __exit ctnetlink_exit(void)
 {
-	pr_info("ctnetlink: unregistering from nfnetlink.\n");
-
 	unregister_pernet_subsys(&ctnetlink_net_ops);
 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
 	nfnetlink_subsys_unregister(&ctnl_subsys);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index c8e9c9503a08..afdeca53e88b 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -385,14 +385,14 @@ void nf_ct_l4proto_unregister_sysctl(struct net *net,
 
 /* FIXME: Allow NULL functions and sub in pointers to generic for
    them. --RR */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
 {
 	int ret = 0;
 
 	if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos))
 		return -EBUSY;
 
-	if ((l4proto->to_nlattr && !l4proto->nlattr_size) ||
+	if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
 	    (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
 		return -EINVAL;
 
@@ -428,10 +428,6 @@ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
 		goto out_unlock;
 	}
 
-	l4proto->nla_size = 0;
-	if (l4proto->nlattr_size)
-		l4proto->nla_size += l4proto->nlattr_size();
-
 	rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
 			   l4proto);
 out_unlock:
@@ -502,7 +498,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
 
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
 			   unsigned int num_proto)
 {
 	int ret = -EINVAL, ver;
@@ -524,7 +520,7 @@ int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_register);
 
 int nf_ct_l4proto_pernet_register(struct net *net,
-				  struct nf_conntrack_l4proto *const l4proto[],
+				  const struct nf_conntrack_l4proto *const l4proto[],
 				  unsigned int num_proto)
 {
 	int ret = -EINVAL;
@@ -545,7 +541,7 @@ int nf_ct_l4proto_pernet_register(struct net *net,
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
 
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
 			      unsigned int num_proto)
 {
 	mutex_lock(&nf_ct_proto_mutex);
@@ -555,12 +551,12 @@ void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
 
 	synchronize_net();
 	/* Remove all contrack entries for this protocol */
-	nf_ct_iterate_destroy(kill_l4proto, l4proto);
+	nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
 
 void nf_ct_l4proto_pernet_unregister(struct net *net,
-				struct nf_conntrack_l4proto *const l4proto[],
+				const struct nf_conntrack_l4proto *const l4proto[],
 				unsigned int num_proto)
 {
 	while (num_proto-- != 0)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 2a446f4a554c..abe647d5b8c6 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -654,6 +654,12 @@ static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
 	[CTA_PROTOINFO_DCCP_PAD]	= { .type = NLA_UNSPEC },
 };
 
+#define DCCP_NLATTR_SIZE ( \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + sizeof(u64)) + \
+	NLA_ALIGN(NLA_HDRLEN + 0))
+
 static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 {
 	struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
@@ -691,13 +697,6 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 	spin_unlock_bh(&ct->lock);
 	return 0;
 }
-
-static int dccp_nlattr_size(void)
-{
-	return nla_total_size(0)	/* CTA_PROTOINFO_DCCP */
-		+ nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
-}
-
 #endif
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -862,7 +861,7 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.dccp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
 	.l3proto		= AF_INET,
 	.l4proto		= IPPROTO_DCCP,
 	.pkt_to_tuple		= dccp_pkt_to_tuple,
@@ -876,8 +875,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
 	.print_conntrack	= dccp_print_conntrack,
 #endif
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= DCCP_NLATTR_SIZE,
 	.to_nlattr		= dccp_to_nlattr,
-	.nlattr_size		= dccp_nlattr_size,
 	.from_nlattr		= nlattr_to_dccp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
@@ -898,7 +897,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
 	.l3proto		= AF_INET6,
 	.l4proto		= IPPROTO_DCCP,
 	.pkt_to_tuple		= dccp_pkt_to_tuple,
@@ -912,8 +911,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
 	.print_conntrack	= dccp_print_conntrack,
 #endif
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= DCCP_NLATTR_SIZE,
 	.to_nlattr		= dccp_to_nlattr,
-	.nlattr_size		= dccp_nlattr_size,
 	.from_nlattr		= nlattr_to_dccp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 1f86ddf6649a..6c6896d21cd7 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -12,7 +12,7 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
 
-static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static const unsigned int nf_ct_generic_timeout = 600*HZ;
 
 static bool nf_generic_should_process(u8 proto)
 {
@@ -163,7 +163,7 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.generic.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
 {
 	.l3proto		= PF_UNSPEC,
 	.l4proto		= 255,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a2503005d80b..d049ea5a3770 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -48,7 +48,7 @@ enum grep_conntrack {
 	GRE_CT_MAX
 };
 
-static unsigned int gre_timeouts[GRE_CT_MAX] = {
+static const unsigned int gre_timeouts[GRE_CT_MAX] = {
 	[GRE_CT_UNREPLIED]	= 30*HZ,
 	[GRE_CT_REPLIED]	= 180*HZ,
 };
@@ -352,7 +352,7 @@ static int gre_init_net(struct net *net, u_int16_t proto)
 }
 
 /* protocol helper struct */
-static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
+static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
 	.l3proto	 = AF_INET,
 	.l4proto	 = IPPROTO_GRE,
 	.pkt_to_tuple	 = gre_pkt_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 80faf04ddf15..fb9a35d16069 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -52,7 +52,7 @@ static const char *const sctp_conntrack_names[] = {
 #define HOURS * 60 MINS
 #define DAYS  * 24 HOURS
 
-static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
+static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
 	[SCTP_CONNTRACK_CLOSED]			= 10 SECS,
 	[SCTP_CONNTRACK_COOKIE_WAIT]		= 3 SECS,
 	[SCTP_CONNTRACK_COOKIE_ECHOED]		= 3 SECS,
@@ -578,6 +578,11 @@ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
 	[CTA_PROTOINFO_SCTP_VTAG_REPLY]     = { .type = NLA_U32 },
 };
 
+#define SCTP_NLATTR_SIZE ( \
+		NLA_ALIGN(NLA_HDRLEN + 1) + \
+		NLA_ALIGN(NLA_HDRLEN + 4) + \
+		NLA_ALIGN(NLA_HDRLEN + 4))
+
 static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 {
 	struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
@@ -608,12 +613,6 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 
 	return 0;
 }
-
-static int sctp_nlattr_size(void)
-{
-	return nla_total_size(0)	/* CTA_PROTOINFO_SCTP */
-		+ nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1);
-}
 #endif
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -778,7 +777,7 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.sctp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
 	.l3proto		= PF_INET,
 	.l4proto 		= IPPROTO_SCTP,
 	.pkt_to_tuple 		= sctp_pkt_to_tuple,
@@ -793,8 +792,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
 	.can_early_drop		= sctp_can_early_drop,
 	.me 			= THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= SCTP_NLATTR_SIZE,
 	.to_nlattr		= sctp_to_nlattr,
-	.nlattr_size		= sctp_nlattr_size,
 	.from_nlattr		= nlattr_to_sctp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
@@ -815,7 +814,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
 	.l3proto		= PF_INET6,
 	.l4proto 		= IPPROTO_SCTP,
 	.pkt_to_tuple 		= sctp_pkt_to_tuple,
@@ -830,8 +829,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
 	.can_early_drop		= sctp_can_early_drop,
 	.me 			= THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= SCTP_NLATTR_SIZE,
 	.to_nlattr		= sctp_to_nlattr,
-	.nlattr_size		= sctp_nlattr_size,
 	.from_nlattr		= nlattr_to_sctp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37ef35b861f2..e97cdc1cf98c 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -68,7 +68,7 @@ static const char *const tcp_conntrack_names[] = {
 #define HOURS * 60 MINS
 #define DAYS * 24 HOURS
 
-static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
+static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
@@ -305,6 +305,9 @@ static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
 /* Print out the private part of the conntrack. */
 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 {
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		return;
+
 	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
 }
 #endif
@@ -1222,6 +1225,12 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
 	[CTA_PROTOINFO_TCP_FLAGS_REPLY]	    = { .len =  sizeof(struct nf_ct_tcp_flags) },
 };
 
+#define TCP_NLATTR_SIZE	( \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
+	NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+
 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 {
 	struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
@@ -1274,12 +1283,6 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 	return 0;
 }
 
-static int tcp_nlattr_size(void)
-{
-	return nla_total_size(0)	   /* CTA_PROTOINFO_TCP */
-		+ nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
-}
-
 static unsigned int tcp_nlattr_tuple_size(void)
 {
 	static unsigned int size __read_mostly;
@@ -1541,7 +1544,7 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.tcp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
 {
 	.l3proto		= PF_INET,
 	.l4proto 		= IPPROTO_TCP,
@@ -1557,11 +1560,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
 	.can_early_drop		= tcp_can_early_drop,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= tcp_to_nlattr,
-	.nlattr_size		= tcp_nlattr_size,
 	.from_nlattr		= nlattr_to_tcp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
+	.nlattr_size		= TCP_NLATTR_SIZE,
 	.nla_policy		= nf_ct_port_nla_policy,
 #endif
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -1579,7 +1582,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto 		= IPPROTO_TCP,
@@ -1594,8 +1597,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
 	.error			= tcp_error,
 	.can_early_drop		= tcp_can_early_drop,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= TCP_NLATTR_SIZE,
 	.to_nlattr		= tcp_to_nlattr,
-	.nlattr_size		= tcp_nlattr_size,
 	.from_nlattr		= nlattr_to_tcp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 3a5f727103af..fe7243970aa4 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -26,7 +26,7 @@
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
-static unsigned int udp_timeouts[UDP_CT_MAX] = {
+static const unsigned int udp_timeouts[UDP_CT_MAX] = {
 	[UDP_CT_UNREPLIED]	= 30*HZ,
 	[UDP_CT_REPLIED]	= 180*HZ,
 };
@@ -296,7 +296,7 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.udp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
 {
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_UDP,
@@ -328,7 +328,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
 
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
 {
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_UDPLITE,
@@ -360,7 +360,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
 #endif
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_UDP,
@@ -392,7 +392,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
 
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_UDPLITE,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5a101caa3e12..9123fdec5e14 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -309,10 +309,12 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	WARN_ON(!l4proto);
 
 	ret = -ENOSPC;
-	seq_printf(s, "%-8s %u %-8s %u %ld ",
+	seq_printf(s, "%-8s %u %-8s %u ",
 		   l3proto_name(l3proto->l3proto), nf_ct_l3num(ct),
-		   l4proto_name(l4proto->l4proto), nf_ct_protonum(ct),
-		   nf_ct_expires(ct)  / HZ);
+		   l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
+
+	if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		seq_printf(s, "%ld ", nf_ct_expires(ct)  / HZ);
 
 	if (l4proto->print_conntrack)
 		l4proto->print_conntrack(s, ct);
@@ -339,7 +341,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
 		goto release;
 
-	if (test_bit(IPS_ASSURED_BIT, &ct->status))
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		seq_puts(s, "[OFFLOAD] ");
+	else if (test_bit(IPS_ASSURED_BIT, &ct->status))
 		seq_puts(s, "[ASSURED] ");
 
 	if (seq_has_overflowed(s))
@@ -378,7 +382,6 @@ static int ct_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ct_file_ops = {
-	.owner   = THIS_MODULE,
 	.open    = ct_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -471,7 +474,6 @@ static int ct_cpu_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ct_cpu_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = ct_cpu_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table.c
new file mode 100644
index 000000000000..2f5099cb85b8
--- /dev/null
+++ b/net/netfilter/nf_flow_table.c
@@ -0,0 +1,429 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct flow_offload_entry {
+	struct flow_offload	flow;
+	struct nf_conn		*ct;
+	struct rcu_head		rcu_head;
+};
+
+struct flow_offload *
+flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
+{
+	struct flow_offload_entry *entry;
+	struct flow_offload *flow;
+
+	if (unlikely(nf_ct_is_dying(ct) ||
+	    !atomic_inc_not_zero(&ct->ct_general.use)))
+		return NULL;
+
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		goto err_ct_refcnt;
+
+	flow = &entry->flow;
+
+	if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
+		goto err_dst_cache_original;
+
+	if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
+		goto err_dst_cache_reply;
+
+	entry->ct = ct;
+
+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) {
+	case NFPROTO_IPV4:
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in;
+		break;
+	case NFPROTO_IPV6:
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in6;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in6;
+		break;
+	}
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l3proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l3proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l4proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache =
+		  route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache =
+		  route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port =
+		ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.tcp.port;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port =
+		ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dir =
+						FLOW_OFFLOAD_DIR_ORIGINAL;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dir =
+						FLOW_OFFLOAD_DIR_REPLY;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.oifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.oifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
+
+	if (ct->status & IPS_SRC_NAT)
+		flow->flags |= FLOW_OFFLOAD_SNAT;
+	else if (ct->status & IPS_DST_NAT)
+		flow->flags |= FLOW_OFFLOAD_DNAT;
+
+	return flow;
+
+err_dst_cache_reply:
+	dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+err_dst_cache_original:
+	kfree(entry);
+err_ct_refcnt:
+	nf_ct_put(ct);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(flow_offload_alloc);
+
+void flow_offload_free(struct flow_offload *flow)
+{
+	struct flow_offload_entry *e;
+
+	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
+	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
+	e = container_of(flow, struct flow_offload_entry, flow);
+	kfree(e);
+}
+EXPORT_SYMBOL_GPL(flow_offload_free);
+
+void flow_offload_dead(struct flow_offload *flow)
+{
+	flow->flags |= FLOW_OFFLOAD_DYING;
+}
+EXPORT_SYMBOL_GPL(flow_offload_dead);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+{
+	flow->timeout = (u32)jiffies;
+
+	rhashtable_insert_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+			       *flow_table->type->params);
+	rhashtable_insert_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+			       *flow_table->type->params);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(flow_offload_add);
+
+void flow_offload_del(struct nf_flowtable *flow_table,
+		      struct flow_offload *flow)
+{
+	struct flow_offload_entry *e;
+
+	rhashtable_remove_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+			       *flow_table->type->params);
+	rhashtable_remove_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+			       *flow_table->type->params);
+
+	e = container_of(flow, struct flow_offload_entry, flow);
+	kfree_rcu(e, rcu_head);
+}
+EXPORT_SYMBOL_GPL(flow_offload_del);
+
+struct flow_offload_tuple_rhash *
+flow_offload_lookup(struct nf_flowtable *flow_table,
+		    struct flow_offload_tuple *tuple)
+{
+	return rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
+				      *flow_table->type->params);
+}
+EXPORT_SYMBOL_GPL(flow_offload_lookup);
+
+static void nf_flow_release_ct(const struct flow_offload *flow)
+{
+	struct flow_offload_entry *e;
+
+	e = container_of(flow, struct flow_offload_entry, flow);
+	nf_ct_delete(e->ct, 0, 0);
+	nf_ct_put(e->ct);
+}
+
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+			  void (*iter)(struct flow_offload *flow, void *data),
+			  void *data)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct rhashtable_iter hti;
+	struct flow_offload *flow;
+	int err;
+
+	err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
+	if (err)
+		return err;
+
+	rhashtable_walk_start(&hti);
+
+	while ((tuplehash = rhashtable_walk_next(&hti))) {
+		if (IS_ERR(tuplehash)) {
+			err = PTR_ERR(tuplehash);
+			if (err != -EAGAIN)
+				goto out;
+
+			continue;
+		}
+		if (tuplehash->tuple.dir)
+			continue;
+
+		flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+		iter(flow, data);
+	}
+out:
+	rhashtable_walk_stop(&hti);
+	rhashtable_walk_exit(&hti);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
+
+static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+{
+	return (__s32)(flow->timeout - (u32)jiffies) <= 0;
+}
+
+static inline bool nf_flow_is_dying(const struct flow_offload *flow)
+{
+	return flow->flags & FLOW_OFFLOAD_DYING;
+}
+
+void nf_flow_offload_work_gc(struct work_struct *work)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct nf_flowtable *flow_table;
+	struct rhashtable_iter hti;
+	struct flow_offload *flow;
+	int err;
+
+	flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+
+	err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
+	if (err)
+		goto schedule;
+
+	rhashtable_walk_start(&hti);
+
+	while ((tuplehash = rhashtable_walk_next(&hti))) {
+		if (IS_ERR(tuplehash)) {
+			err = PTR_ERR(tuplehash);
+			if (err != -EAGAIN)
+				goto out;
+
+			continue;
+		}
+		if (tuplehash->tuple.dir)
+			continue;
+
+		flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+		if (nf_flow_has_expired(flow) ||
+		    nf_flow_is_dying(flow)) {
+			flow_offload_del(flow_table, flow);
+			nf_flow_release_ct(flow);
+		}
+	}
+out:
+	rhashtable_walk_stop(&hti);
+	rhashtable_walk_exit(&hti);
+schedule:
+	queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
+
+static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
+{
+	const struct flow_offload_tuple *tuple = data;
+
+	return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
+{
+	const struct flow_offload_tuple_rhash *tuplehash = data;
+
+	return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
+					const void *ptr)
+{
+	const struct flow_offload_tuple *tuple = arg->key;
+	const struct flow_offload_tuple_rhash *x = ptr;
+
+	if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
+		return 1;
+
+	return 0;
+}
+
+const struct rhashtable_params nf_flow_offload_rhash_params = {
+	.head_offset		= offsetof(struct flow_offload_tuple_rhash, node),
+	.hashfn			= flow_offload_hash,
+	.obj_hashfn		= flow_offload_hash_obj,
+	.obj_cmpfn		= flow_offload_hash_cmp,
+	.automatic_shrinking	= true,
+};
+EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
+
+static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
+				__be16 port, __be16 new_port)
+{
+	struct tcphdr *tcph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+		return -1;
+
+	tcph = (void *)(skb_network_header(skb) + thoff);
+	inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+
+	return 0;
+}
+
+static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
+				__be16 port, __be16 new_port)
+{
+	struct udphdr *udph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
+		return -1;
+
+	udph = (void *)(skb_network_header(skb) + thoff);
+	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+		inet_proto_csum_replace2(&udph->check, skb, port,
+					 new_port, true);
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	}
+
+	return 0;
+}
+
+static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
+			    u8 protocol, __be16 port, __be16 new_port)
+{
+	switch (protocol) {
+	case IPPROTO_TCP:
+		if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
+			return NF_DROP;
+		break;
+	case IPPROTO_UDP:
+		if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
+			return NF_DROP;
+		break;
+	}
+
+	return 0;
+}
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir)
+{
+	struct flow_ports *hdr;
+	__be16 port, new_port;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+		return -1;
+
+	hdr = (void *)(skb_network_header(skb) + thoff);
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		port = hdr->source;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+		hdr->source = new_port;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		port = hdr->dest;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+		hdr->dest = new_port;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
+}
+EXPORT_SYMBOL_GPL(nf_flow_snat_port);
+
+int nf_flow_dnat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir)
+{
+	struct flow_ports *hdr;
+	__be16 port, new_port;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+		return -1;
+
+	hdr = (void *)(skb_network_header(skb) + thoff);
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		port = hdr->dest;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
+		hdr->dest = new_port;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		port = hdr->source;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
+		hdr->source = new_port;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
+}
+EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
new file mode 100644
index 000000000000..281209aeba8f
--- /dev/null
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+
+static unsigned int
+nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+			  const struct nf_hook_state *state)
+{
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		return nf_flow_offload_ip_hook(priv, skb, state);
+	case htons(ETH_P_IPV6):
+		return nf_flow_offload_ipv6_hook(priv, skb, state);
+	}
+
+	return NF_ACCEPT;
+}
+
+static struct nf_flowtable_type flowtable_inet = {
+	.family		= NFPROTO_INET,
+	.params		= &nf_flow_offload_rhash_params,
+	.gc		= nf_flow_offload_work_gc,
+	.hook		= nf_flow_offload_inet_hook,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nf_flow_inet_module_init(void)
+{
+	nft_register_flowtable_type(&flowtable_inet);
+
+	return 0;
+}
+
+static void __exit nf_flow_inet_module_exit(void)
+{
+	nft_unregister_flowtable_type(&flowtable_inet);
+}
+
+module_init(nf_flow_inet_module_init);
+module_exit(nf_flow_inet_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 44284cd2528d..18f6d7ae995b 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -10,7 +10,7 @@
 int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
 	     const struct nf_hook_entries *entries, unsigned int index,
 	     unsigned int verdict);
-unsigned int nf_queue_nf_hook_drop(struct net *net);
+void nf_queue_nf_hook_drop(struct net *net);
 
 /* nf_log.c */
 int __init netfilter_log_init(void);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 8bb152a7cca4..c2c1b16b7538 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -402,7 +402,6 @@ static int nflog_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations nflog_file_ops = {
-	.owner	 = THIS_MODULE,
 	.open	 = nflog_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index f7e21953b1de..d67a96a25a68 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -10,6 +10,8 @@
 #include <linux/proc_fs.h>
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
 #include <linux/seq_file.h>
 #include <linux/rcupdate.h>
@@ -96,30 +98,56 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
-unsigned int nf_queue_nf_hook_drop(struct net *net)
+void nf_queue_nf_hook_drop(struct net *net)
 {
 	const struct nf_queue_handler *qh;
-	unsigned int count = 0;
 
 	rcu_read_lock();
 	qh = rcu_dereference(net->nf.queue_handler);
 	if (qh)
-		count = qh->nf_hook_drop(net);
+		qh->nf_hook_drop(net);
 	rcu_read_unlock();
-
-	return count;
 }
 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
 
+static void nf_ip_saveroute(const struct sk_buff *skb,
+			    struct nf_queue_entry *entry)
+{
+	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+	if (entry->state.hook == NF_INET_LOCAL_OUT) {
+		const struct iphdr *iph = ip_hdr(skb);
+
+		rt_info->tos = iph->tos;
+		rt_info->daddr = iph->daddr;
+		rt_info->saddr = iph->saddr;
+		rt_info->mark = skb->mark;
+	}
+}
+
+static void nf_ip6_saveroute(const struct sk_buff *skb,
+			     struct nf_queue_entry *entry)
+{
+	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+	if (entry->state.hook == NF_INET_LOCAL_OUT) {
+		const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+		rt_info->daddr = iph->daddr;
+		rt_info->saddr = iph->saddr;
+		rt_info->mark = skb->mark;
+	}
+}
+
 static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 		      const struct nf_hook_entries *entries,
 		      unsigned int index, unsigned int queuenum)
 {
 	int status = -ENOENT;
 	struct nf_queue_entry *entry = NULL;
-	const struct nf_afinfo *afinfo;
 	const struct nf_queue_handler *qh;
 	struct net *net = state->net;
+	unsigned int route_key_size;
 
 	/* QUEUE == DROP if no one is waiting, to be safe. */
 	qh = rcu_dereference(net->nf.queue_handler);
@@ -128,11 +156,19 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 		goto err;
 	}
 
-	afinfo = nf_get_afinfo(state->pf);
-	if (!afinfo)
-		goto err;
+	switch (state->pf) {
+	case AF_INET:
+		route_key_size = sizeof(struct ip_rt_info);
+		break;
+	case AF_INET6:
+		route_key_size = sizeof(struct ip6_rt_info);
+		break;
+	default:
+		route_key_size = 0;
+		break;
+	}
 
-	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
+	entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
 	if (!entry) {
 		status = -ENOMEM;
 		goto err;
@@ -142,12 +178,21 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 		.skb	= skb,
 		.state	= *state,
 		.hook_index = index,
-		.size	= sizeof(*entry) + afinfo->route_key_size,
+		.size	= sizeof(*entry) + route_key_size,
 	};
 
 	nf_queue_entry_get_refs(entry);
 	skb_dst_force(skb);
-	afinfo->saveroute(skb, entry);
+
+	switch (entry->state.pf) {
+	case AF_INET:
+		nf_ip_saveroute(skb, entry);
+		break;
+	case AF_INET6:
+		nf_ip6_saveroute(skb, entry);
+		break;
+	}
+
 	status = qh->outfn(entry, queuenum);
 
 	if (status < 0) {
@@ -204,13 +249,31 @@ repeat:
 	return NF_ACCEPT;
 }
 
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+	switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	case NFPROTO_BRIDGE:
+		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+	case NFPROTO_IPV4:
+		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+	case NFPROTO_IPV6:
+		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+	default:
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
+
+	return NULL;
+}
+
 /* Caller must hold rcu read-side lock */
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 {
 	const struct nf_hook_entry *hook_entry;
 	const struct nf_hook_entries *hooks;
 	struct sk_buff *skb = entry->skb;
-	const struct nf_afinfo *afinfo;
 	const struct net *net;
 	unsigned int i;
 	int err;
@@ -219,12 +282,12 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 	net = entry->state.net;
 	pf = entry->state.pf;
 
-	hooks = rcu_dereference(net->nf.hooks[pf][entry->state.hook]);
+	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
 
 	nf_queue_entry_release_refs(entry);
 
 	i = entry->hook_index;
-	if (WARN_ON_ONCE(i >= hooks->num_hook_entries)) {
+	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
 		kfree_skb(skb);
 		kfree(entry);
 		return;
@@ -237,8 +300,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
 
 	if (verdict == NF_ACCEPT) {
-		afinfo = nf_get_afinfo(entry->state.pf);
-		if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
+		if (nf_reroute(skb, entry) < 0)
 			verdict = NF_DROP;
 	}
 
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 49bd8bb16b18..92139a087260 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -317,7 +317,6 @@ static int synproxy_cpu_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations synproxy_cpu_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= synproxy_cpu_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 07bd4138c84e..0791813a1e7d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -17,6 +17,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_flow_table.h>
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
 #include <net/net_namespace.h>
@@ -24,86 +25,20 @@
 
 static LIST_HEAD(nf_tables_expressions);
 static LIST_HEAD(nf_tables_objects);
-
-/**
- *	nft_register_afinfo - register nf_tables address family info
- *
- *	@afi: address family info to register
- *
- *	Register the address family for use with nf_tables. Returns zero on
- *	success or a negative errno code otherwise.
- */
-int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
-{
-	INIT_LIST_HEAD(&afi->tables);
-	nfnl_lock(NFNL_SUBSYS_NFTABLES);
-	list_add_tail_rcu(&afi->list, &net->nft.af_info);
-	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(nft_register_afinfo);
-
-static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi);
-
-/**
- *	nft_unregister_afinfo - unregister nf_tables address family info
- *
- *	@afi: address family info to unregister
- *
- *	Unregister the address family for use with nf_tables.
- */
-void nft_unregister_afinfo(struct net *net, struct nft_af_info *afi)
-{
-	nfnl_lock(NFNL_SUBSYS_NFTABLES);
-	__nft_release_afinfo(net, afi);
-	list_del_rcu(&afi->list);
-	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-}
-EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
-
-static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family)
-{
-	struct nft_af_info *afi;
-
-	list_for_each_entry(afi, &net->nft.af_info, list) {
-		if (afi->family == family)
-			return afi;
-	}
-	return NULL;
-}
-
-static struct nft_af_info *
-nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
-{
-	struct nft_af_info *afi;
-
-	afi = nft_afinfo_lookup(net, family);
-	if (afi != NULL)
-		return afi;
-#ifdef CONFIG_MODULES
-	if (autoload) {
-		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-		request_module("nft-afinfo-%u", family);
-		nfnl_lock(NFNL_SUBSYS_NFTABLES);
-		afi = nft_afinfo_lookup(net, family);
-		if (afi != NULL)
-			return ERR_PTR(-EAGAIN);
-	}
-#endif
-	return ERR_PTR(-EAFNOSUPPORT);
-}
+static LIST_HEAD(nf_tables_flowtables);
+static u64 table_handle;
 
 static void nft_ctx_init(struct nft_ctx *ctx,
 			 struct net *net,
 			 const struct sk_buff *skb,
 			 const struct nlmsghdr *nlh,
-			 struct nft_af_info *afi,
+			 u8 family,
 			 struct nft_table *table,
 			 struct nft_chain *chain,
 			 const struct nlattr * const *nla)
 {
 	ctx->net	= net;
-	ctx->afi	= afi;
+	ctx->family	= family;
 	ctx->table	= table;
 	ctx->chain	= chain;
 	ctx->nla   	= nla;
@@ -139,29 +74,26 @@ static void nft_trans_destroy(struct nft_trans *trans)
 	kfree(trans);
 }
 
-static int nf_tables_register_hooks(struct net *net,
-				    const struct nft_table *table,
-				    struct nft_chain *chain,
-				    unsigned int hook_nops)
+static int nf_tables_register_hook(struct net *net,
+				   const struct nft_table *table,
+				   struct nft_chain *chain)
 {
 	if (table->flags & NFT_TABLE_F_DORMANT ||
 	    !nft_is_base_chain(chain))
 		return 0;
 
-	return nf_register_net_hooks(net, nft_base_chain(chain)->ops,
-				     hook_nops);
+	return nf_register_net_hook(net, &nft_base_chain(chain)->ops);
 }
 
-static void nf_tables_unregister_hooks(struct net *net,
-				       const struct nft_table *table,
-				       struct nft_chain *chain,
-				       unsigned int hook_nops)
+static void nf_tables_unregister_hook(struct net *net,
+				      const struct nft_table *table,
+				      struct nft_chain *chain)
 {
 	if (table->flags & NFT_TABLE_F_DORMANT ||
 	    !nft_is_base_chain(chain))
 		return;
 
-	nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops);
+	nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
 }
 
 static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -348,34 +280,99 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
 	return err;
 }
 
+static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+				   struct nft_flowtable *flowtable)
+{
+	struct nft_trans *trans;
+
+	trans = nft_trans_alloc(ctx, msg_type,
+				sizeof(struct nft_trans_flowtable));
+	if (trans == NULL)
+		return -ENOMEM;
+
+	if (msg_type == NFT_MSG_NEWFLOWTABLE)
+		nft_activate_next(ctx->net, flowtable);
+
+	nft_trans_flowtable(trans) = flowtable;
+	list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+	return 0;
+}
+
+static int nft_delflowtable(struct nft_ctx *ctx,
+			    struct nft_flowtable *flowtable)
+{
+	int err;
+
+	err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+	if (err < 0)
+		return err;
+
+	nft_deactivate_next(ctx->net, flowtable);
+	ctx->table->use--;
+
+	return err;
+}
+
 /*
  * Tables
  */
 
-static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
+static struct nft_table *nft_table_lookup(const struct net *net,
 					  const struct nlattr *nla,
-					  u8 genmask)
+					  u8 family, u8 genmask)
 {
 	struct nft_table *table;
 
-	list_for_each_entry(table, &afi->tables, list) {
+	list_for_each_entry(table, &net->nft.tables, list) {
 		if (!nla_strcmp(nla, table->name) &&
+		    table->family == family &&
 		    nft_active_genmask(table, genmask))
 			return table;
 	}
 	return NULL;
 }
 
-static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
+static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+						   const struct nlattr *nla,
+						   u8 genmask)
+{
+	struct nft_table *table;
+
+	list_for_each_entry(table, &net->nft.tables, list) {
+		if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
+		    nft_active_genmask(table, genmask))
+			return table;
+	}
+	return NULL;
+}
+
+static struct nft_table *nf_tables_table_lookup(const struct net *net,
 						const struct nlattr *nla,
-						u8 genmask)
+						u8 family, u8 genmask)
+{
+	struct nft_table *table;
+
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	table = nft_table_lookup(net, nla, family, genmask);
+	if (table != NULL)
+		return table;
+
+	return ERR_PTR(-ENOENT);
+}
+
+static struct nft_table *nf_tables_table_lookup_byhandle(const struct net *net,
+							 const struct nlattr *nla,
+							 u8 genmask)
 {
 	struct nft_table *table;
 
 	if (nla == NULL)
 		return ERR_PTR(-EINVAL);
 
-	table = nft_table_lookup(afi, nla, genmask);
+	table = nft_table_lookup_byhandle(net, nla, genmask);
 	if (table != NULL)
 		return table;
 
@@ -390,7 +387,7 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
 static const struct nf_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
 
 static const struct nf_chain_type *
-__nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+__nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
 {
 	int i;
 
@@ -403,22 +400,20 @@ __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
 }
 
 static const struct nf_chain_type *
-nf_tables_chain_type_lookup(const struct nft_af_info *afi,
-			    const struct nlattr *nla,
-			    bool autoload)
+nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family, bool autoload)
 {
 	const struct nf_chain_type *type;
 
-	type = __nf_tables_chain_type_lookup(afi->family, nla);
+	type = __nf_tables_chain_type_lookup(nla, family);
 	if (type != NULL)
 		return type;
 #ifdef CONFIG_MODULES
 	if (autoload) {
 		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-		request_module("nft-chain-%u-%.*s", afi->family,
+		request_module("nft-chain-%u-%.*s", family,
 			       nla_len(nla), (const char *)nla_data(nla));
 		nfnl_lock(NFNL_SUBSYS_NFTABLES);
-		type = __nf_tables_chain_type_lookup(afi->family, nla);
+		type = __nf_tables_chain_type_lookup(nla, family);
 		if (type != NULL)
 			return ERR_PTR(-EAGAIN);
 	}
@@ -430,6 +425,7 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
 	[NFTA_TABLE_NAME]	= { .type = NLA_STRING,
 				    .len = NFT_TABLE_MAXNAMELEN - 1 },
 	[NFTA_TABLE_FLAGS]	= { .type = NLA_U32 },
+	[NFTA_TABLE_HANDLE]	= { .type = NLA_U64 },
 };
 
 static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
@@ -451,7 +447,9 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
 
 	if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
 	    nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
-	    nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
+	    nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
+	    nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
+			 NFTA_TABLE_PAD))
 		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
@@ -476,7 +474,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
 		goto err;
 
 	err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
-					event, 0, ctx->afi->family, ctx->table);
+					event, 0, ctx->family, ctx->table);
 	if (err < 0) {
 		kfree_skb(skb);
 		goto err;
@@ -493,7 +491,6 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
 				 struct netlink_callback *cb)
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	unsigned int idx = 0, s_idx = cb->args[0];
 	struct net *net = sock_net(skb->sk);
@@ -502,30 +499,27 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
 	rcu_read_lock();
 	cb->seq = net->nft.base_seq;
 
-	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
-		if (family != NFPROTO_UNSPEC && family != afi->family)
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (family != NFPROTO_UNSPEC && family != table->family)
 			continue;
 
-		list_for_each_entry_rcu(table, &afi->tables, list) {
-			if (idx < s_idx)
-				goto cont;
-			if (idx > s_idx)
-				memset(&cb->args[1], 0,
-				       sizeof(cb->args) - sizeof(cb->args[0]));
-			if (!nft_is_active(net, table))
-				continue;
-			if (nf_tables_fill_table_info(skb, net,
-						      NETLINK_CB(cb->skb).portid,
-						      cb->nlh->nlmsg_seq,
-						      NFT_MSG_NEWTABLE,
-						      NLM_F_MULTI,
-						      afi->family, table) < 0)
-				goto done;
-
-			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+		if (idx < s_idx)
+			goto cont;
+		if (idx > s_idx)
+			memset(&cb->args[1], 0,
+			       sizeof(cb->args) - sizeof(cb->args[0]));
+		if (!nft_is_active(net, table))
+			continue;
+		if (nf_tables_fill_table_info(skb, net,
+					      NETLINK_CB(cb->skb).portid,
+					      cb->nlh->nlmsg_seq,
+					      NFT_MSG_NEWTABLE, NLM_F_MULTI,
+					      table->family, table) < 0)
+			goto done;
+
+		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
-			idx++;
-		}
+		idx++;
 	}
 done:
 	rcu_read_unlock();
@@ -540,7 +534,6 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_cur(net);
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	struct sk_buff *skb2;
 	int family = nfmsg->nfgen_family;
@@ -553,11 +546,8 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
 		return netlink_dump_start(nlsk, skb, nlh, &c);
 	}
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_TABLE_NAME], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -578,10 +568,7 @@ err:
 	return err;
 }
 
-static void _nf_tables_table_disable(struct net *net,
-				     const struct nft_af_info *afi,
-				     struct nft_table *table,
-				     u32 cnt)
+static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
 {
 	struct nft_chain *chain;
 	u32 i = 0;
@@ -595,14 +582,11 @@ static void _nf_tables_table_disable(struct net *net,
 		if (cnt && i++ == cnt)
 			break;
 
-		nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
-					afi->nops);
+		nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
 	}
 }
 
-static int nf_tables_table_enable(struct net *net,
-				  const struct nft_af_info *afi,
-				  struct nft_table *table)
+static int nf_tables_table_enable(struct net *net, struct nft_table *table)
 {
 	struct nft_chain *chain;
 	int err, i = 0;
@@ -613,8 +597,7 @@ static int nf_tables_table_enable(struct net *net,
 		if (!nft_is_base_chain(chain))
 			continue;
 
-		err = nf_register_net_hooks(net, nft_base_chain(chain)->ops,
-					    afi->nops);
+		err = nf_register_net_hook(net, &nft_base_chain(chain)->ops);
 		if (err < 0)
 			goto err;
 
@@ -623,15 +606,13 @@ static int nf_tables_table_enable(struct net *net,
 	return 0;
 err:
 	if (i)
-		_nf_tables_table_disable(net, afi, table, i);
+		nft_table_disable(net, table, i);
 	return err;
 }
 
-static void nf_tables_table_disable(struct net *net,
-				    const struct nft_af_info *afi,
-				    struct nft_table *table)
+static void nf_tables_table_disable(struct net *net, struct nft_table *table)
 {
-	_nf_tables_table_disable(net, afi, table, 0);
+	nft_table_disable(net, table, 0);
 }
 
 static int nf_tables_updtable(struct nft_ctx *ctx)
@@ -660,7 +641,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
 		nft_trans_table_enable(trans) = false;
 	} else if (!(flags & NFT_TABLE_F_DORMANT) &&
 		   ctx->table->flags & NFT_TABLE_F_DORMANT) {
-		ret = nf_tables_table_enable(ctx->net, ctx->afi, ctx->table);
+		ret = nf_tables_table_enable(ctx->net, ctx->table);
 		if (ret >= 0) {
 			ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
 			nft_trans_table_enable(trans) = true;
@@ -685,19 +666,14 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
 	const struct nlattr *name;
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	int family = nfmsg->nfgen_family;
 	u32 flags = 0;
 	struct nft_ctx ctx;
 	int err;
 
-	afi = nf_tables_afinfo_lookup(net, family, true);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
 	name = nla[NFTA_TABLE_NAME];
-	table = nf_tables_table_lookup(afi, name, genmask);
+	table = nf_tables_table_lookup(net, name, family, genmask);
 	if (IS_ERR(table)) {
 		if (PTR_ERR(table) != -ENOENT)
 			return PTR_ERR(table);
@@ -707,7 +683,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
 		if (nlh->nlmsg_flags & NLM_F_REPLACE)
 			return -EOPNOTSUPP;
 
-		nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+		nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
 		return nf_tables_updtable(&ctx);
 	}
 
@@ -717,47 +693,45 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
 			return -EINVAL;
 	}
 
-	err = -EAFNOSUPPORT;
-	if (!try_module_get(afi->owner))
-		goto err1;
-
 	err = -ENOMEM;
 	table = kzalloc(sizeof(*table), GFP_KERNEL);
 	if (table == NULL)
-		goto err2;
+		goto err_kzalloc;
 
 	table->name = nla_strdup(name, GFP_KERNEL);
 	if (table->name == NULL)
-		goto err3;
+		goto err_strdup;
 
 	INIT_LIST_HEAD(&table->chains);
 	INIT_LIST_HEAD(&table->sets);
 	INIT_LIST_HEAD(&table->objects);
+	INIT_LIST_HEAD(&table->flowtables);
+	table->family = family;
 	table->flags = flags;
+	table->handle = ++table_handle;
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
 	err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
 	if (err < 0)
-		goto err4;
+		goto err_trans;
 
-	list_add_tail_rcu(&table->list, &afi->tables);
+	list_add_tail_rcu(&table->list, &net->nft.tables);
 	return 0;
-err4:
+err_trans:
 	kfree(table->name);
-err3:
+err_strdup:
 	kfree(table);
-err2:
-	module_put(afi->owner);
-err1:
+err_kzalloc:
 	return err;
 }
 
 static int nft_flush_table(struct nft_ctx *ctx)
 {
-	int err;
+	struct nft_flowtable *flowtable, *nft;
 	struct nft_chain *chain, *nc;
 	struct nft_object *obj, *ne;
 	struct nft_set *set, *ns;
+	int err;
 
 	list_for_each_entry(chain, &ctx->table->chains, list) {
 		if (!nft_is_active_next(ctx->net, chain))
@@ -774,7 +748,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
 		if (!nft_is_active_next(ctx->net, set))
 			continue;
 
-		if (set->flags & NFT_SET_ANONYMOUS &&
+		if (nft_set_is_anonymous(set) &&
 		    !list_empty(&set->bindings))
 			continue;
 
@@ -783,6 +757,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
 			goto out;
 	}
 
+	list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
+		err = nft_delflowtable(ctx, flowtable);
+		if (err < 0)
+			goto out;
+	}
+
 	list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
 		err = nft_delobj(ctx, obj);
 		if (err < 0)
@@ -807,30 +787,28 @@ out:
 
 static int nft_flush(struct nft_ctx *ctx, int family)
 {
-	struct nft_af_info *afi;
 	struct nft_table *table, *nt;
 	const struct nlattr * const *nla = ctx->nla;
 	int err = 0;
 
-	list_for_each_entry(afi, &ctx->net->nft.af_info, list) {
-		if (family != AF_UNSPEC && afi->family != family)
+	list_for_each_entry_safe(table, nt, &ctx->net->nft.tables, list) {
+		if (family != AF_UNSPEC && table->family != family)
 			continue;
 
-		ctx->afi = afi;
-		list_for_each_entry_safe(table, nt, &afi->tables, list) {
-			if (!nft_is_active_next(ctx->net, table))
-				continue;
+		ctx->family = table->family;
 
-			if (nla[NFTA_TABLE_NAME] &&
-			    nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
-				continue;
+		if (!nft_is_active_next(ctx->net, table))
+			continue;
 
-			ctx->table = table;
+		if (nla[NFTA_TABLE_NAME] &&
+		    nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
+			continue;
 
-			err = nft_flush_table(ctx);
-			if (err < 0)
-				goto out;
-		}
+		ctx->table = table;
+
+		err = nft_flush_table(ctx);
+		if (err < 0)
+			goto out;
 	}
 out:
 	return err;
@@ -843,20 +821,23 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	int family = nfmsg->nfgen_family;
 	struct nft_ctx ctx;
 
-	nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla);
-	if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL)
+	nft_ctx_init(&ctx, net, skb, nlh, 0, NULL, NULL, nla);
+	if (family == AF_UNSPEC ||
+	    (!nla[NFTA_TABLE_NAME] && !nla[NFTA_TABLE_HANDLE]))
 		return nft_flush(&ctx, family);
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
+	if (nla[NFTA_TABLE_HANDLE])
+		table = nf_tables_table_lookup_byhandle(net,
+							nla[NFTA_TABLE_HANDLE],
+							genmask);
+	else
+		table = nf_tables_table_lookup(net, nla[NFTA_TABLE_NAME],
+					       family, genmask);
 
-	table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -864,7 +845,7 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
 	    table->use > 0)
 		return -EBUSY;
 
-	ctx.afi = afi;
+	ctx.family = family;
 	ctx.table = table;
 
 	return nft_flush_table(&ctx);
@@ -876,7 +857,6 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
 
 	kfree(ctx->table->name);
 	kfree(ctx->table);
-	module_put(ctx->afi->owner);
 }
 
 int nft_register_chain_type(const struct nf_chain_type *ctype)
@@ -1026,7 +1006,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
 
 	if (nft_is_base_chain(chain)) {
 		const struct nft_base_chain *basechain = nft_base_chain(chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 		struct nlattr *nest;
 
 		nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
@@ -1077,7 +1057,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
 		goto err;
 
 	err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
-					event, 0, ctx->afi->family, ctx->table,
+					event, 0, ctx->family, ctx->table,
 					ctx->chain);
 	if (err < 0) {
 		kfree_skb(skb);
@@ -1095,7 +1075,6 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
 				 struct netlink_callback *cb)
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	const struct nft_chain *chain;
 	unsigned int idx = 0, s_idx = cb->args[0];
@@ -1105,31 +1084,30 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
 	rcu_read_lock();
 	cb->seq = net->nft.base_seq;
 
-	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
-		if (family != NFPROTO_UNSPEC && family != afi->family)
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (family != NFPROTO_UNSPEC && family != table->family)
 			continue;
 
-		list_for_each_entry_rcu(table, &afi->tables, list) {
-			list_for_each_entry_rcu(chain, &table->chains, list) {
-				if (idx < s_idx)
-					goto cont;
-				if (idx > s_idx)
-					memset(&cb->args[1], 0,
-					       sizeof(cb->args) - sizeof(cb->args[0]));
-				if (!nft_is_active(net, chain))
-					continue;
-				if (nf_tables_fill_chain_info(skb, net,
-							      NETLINK_CB(cb->skb).portid,
-							      cb->nlh->nlmsg_seq,
-							      NFT_MSG_NEWCHAIN,
-							      NLM_F_MULTI,
-							      afi->family, table, chain) < 0)
-					goto done;
+		list_for_each_entry_rcu(chain, &table->chains, list) {
+			if (idx < s_idx)
+				goto cont;
+			if (idx > s_idx)
+				memset(&cb->args[1], 0,
+				       sizeof(cb->args) - sizeof(cb->args[0]));
+			if (!nft_is_active(net, chain))
+				continue;
+			if (nf_tables_fill_chain_info(skb, net,
+						      NETLINK_CB(cb->skb).portid,
+						      cb->nlh->nlmsg_seq,
+						      NFT_MSG_NEWCHAIN,
+						      NLM_F_MULTI,
+						      table->family, table,
+						      chain) < 0)
+				goto done;
 
-				nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
-				idx++;
-			}
+			idx++;
 		}
 	}
 done:
@@ -1145,7 +1123,6 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_cur(net);
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	const struct nft_chain *chain;
 	struct sk_buff *skb2;
@@ -1159,11 +1136,8 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
 		return netlink_dump_start(nlsk, skb, nlh, &c);
 	}
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -1227,13 +1201,13 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
 static void nft_chain_stats_replace(struct nft_base_chain *chain,
 				    struct nft_stats __percpu *newstats)
 {
+	struct nft_stats __percpu *oldstats;
+
 	if (newstats == NULL)
 		return;
 
 	if (chain->stats) {
-		struct nft_stats __percpu *oldstats =
-				nft_dereference(chain->stats);
-
+		oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
 		rcu_assign_pointer(chain->stats, newstats);
 		synchronize_rcu();
 		free_percpu(oldstats);
@@ -1252,8 +1226,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
 		free_percpu(basechain->stats);
 		if (basechain->stats)
 			static_branch_dec(&nft_counters_enabled);
-		if (basechain->ops[0].dev != NULL)
-			dev_put(basechain->ops[0].dev);
+		if (basechain->ops.dev != NULL)
+			dev_put(basechain->ops.dev);
 		kfree(chain->name);
 		kfree(basechain);
 	} else {
@@ -1264,15 +1238,15 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
 
 struct nft_chain_hook {
 	u32				num;
-	u32				priority;
+	s32				priority;
 	const struct nf_chain_type	*type;
 	struct net_device		*dev;
 };
 
 static int nft_chain_parse_hook(struct net *net,
 				const struct nlattr * const nla[],
-				struct nft_af_info *afi,
-				struct nft_chain_hook *hook, bool create)
+				struct nft_chain_hook *hook, u8 family,
+				bool create)
 {
 	struct nlattr *ha[NFTA_HOOK_MAX + 1];
 	const struct nf_chain_type *type;
@@ -1289,27 +1263,29 @@ static int nft_chain_parse_hook(struct net *net,
 		return -EINVAL;
 
 	hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
-	if (hook->num >= afi->nhooks)
-		return -EINVAL;
-
 	hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
 
-	type = chain_type[afi->family][NFT_CHAIN_T_DEFAULT];
+	type = chain_type[family][NFT_CHAIN_T_DEFAULT];
 	if (nla[NFTA_CHAIN_TYPE]) {
-		type = nf_tables_chain_type_lookup(afi, nla[NFTA_CHAIN_TYPE],
-						   create);
+		type = nf_tables_chain_type_lookup(nla[NFTA_CHAIN_TYPE],
+						   family, create);
 		if (IS_ERR(type))
 			return PTR_ERR(type);
 	}
 	if (!(type->hook_mask & (1 << hook->num)))
 		return -EOPNOTSUPP;
+
+	if (type->type == NFT_CHAIN_T_NAT &&
+	    hook->priority <= NF_IP_PRI_CONNTRACK)
+		return -EOPNOTSUPP;
+
 	if (!try_module_get(type->owner))
 		return -ENOENT;
 
 	hook->type = type;
 
 	hook->dev = NULL;
-	if (afi->flags & NFT_AF_NEEDS_DEV) {
+	if (family == NFPROTO_NETDEV) {
 		char ifname[IFNAMSIZ];
 
 		if (!ha[NFTA_HOOK_DEV]) {
@@ -1344,12 +1320,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 {
 	const struct nlattr * const *nla = ctx->nla;
 	struct nft_table *table = ctx->table;
-	struct nft_af_info *afi = ctx->afi;
 	struct nft_base_chain *basechain;
 	struct nft_stats __percpu *stats;
 	struct net *net = ctx->net;
 	struct nft_chain *chain;
-	unsigned int i;
 	int err;
 
 	if (table->use == UINT_MAX)
@@ -1358,9 +1332,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 	if (nla[NFTA_CHAIN_HOOK]) {
 		struct nft_chain_hook hook;
 		struct nf_hook_ops *ops;
-		nf_hookfn *hookfn;
 
-		err = nft_chain_parse_hook(net, nla, afi, &hook, create);
+		err = nft_chain_parse_hook(net, nla, &hook, family, create);
 		if (err < 0)
 			return err;
 
@@ -1384,23 +1357,19 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 			static_branch_inc(&nft_counters_enabled);
 		}
 
-		hookfn = hook.type->hooks[hook.num];
 		basechain->type = hook.type;
 		chain = &basechain->chain;
 
-		for (i = 0; i < afi->nops; i++) {
-			ops = &basechain->ops[i];
-			ops->pf		= family;
-			ops->hooknum	= hook.num;
-			ops->priority	= hook.priority;
-			ops->priv	= chain;
-			ops->hook	= afi->hooks[ops->hooknum];
-			ops->dev	= hook.dev;
-			if (hookfn)
-				ops->hook = hookfn;
-			if (afi->hook_ops_init)
-				afi->hook_ops_init(ops, i);
-		}
+		ops		= &basechain->ops;
+		ops->pf		= family;
+		ops->hooknum	= hook.num;
+		ops->priority	= hook.priority;
+		ops->priv	= chain;
+		ops->hook	= hook.type->hooks[ops->hooknum];
+		ops->dev	= hook.dev;
+
+		if (basechain->type->type == NFT_CHAIN_T_NAT)
+			ops->nat_hook = true;
 
 		chain->flags |= NFT_BASE_CHAIN;
 		basechain->policy = policy;
@@ -1418,7 +1387,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 		goto err1;
 	}
 
-	err = nf_tables_register_hooks(net, table, chain, afi->nops);
+	err = nf_tables_register_hook(net, table, chain);
 	if (err < 0)
 		goto err1;
 
@@ -1432,7 +1401,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 
 	return 0;
 err2:
-	nf_tables_unregister_hooks(net, table, chain, afi->nops);
+	nf_tables_unregister_hook(net, table, chain);
 err1:
 	nf_tables_chain_destroy(chain);
 
@@ -1445,20 +1414,19 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
 	const struct nlattr * const *nla = ctx->nla;
 	struct nft_table *table = ctx->table;
 	struct nft_chain *chain = ctx->chain;
-	struct nft_af_info *afi = ctx->afi;
 	struct nft_base_chain *basechain;
 	struct nft_stats *stats = NULL;
 	struct nft_chain_hook hook;
 	const struct nlattr *name;
 	struct nf_hook_ops *ops;
 	struct nft_trans *trans;
-	int err, i;
+	int err;
 
 	if (nla[NFTA_CHAIN_HOOK]) {
 		if (!nft_is_base_chain(chain))
 			return -EBUSY;
 
-		err = nft_chain_parse_hook(ctx->net, nla, ctx->afi, &hook,
+		err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
 					   create);
 		if (err < 0)
 			return err;
@@ -1469,14 +1437,12 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
 			return -EBUSY;
 		}
 
-		for (i = 0; i < afi->nops; i++) {
-			ops = &basechain->ops[i];
-			if (ops->hooknum != hook.num ||
-			    ops->priority != hook.priority ||
-			    ops->dev != hook.dev) {
-				nft_chain_release_hook(&hook);
-				return -EBUSY;
-			}
+		ops = &basechain->ops;
+		if (ops->hooknum != hook.num ||
+		    ops->priority != hook.priority ||
+		    ops->dev != hook.dev) {
+			nft_chain_release_hook(&hook);
+			return -EBUSY;
 		}
 		nft_chain_release_hook(&hook);
 	}
@@ -1539,7 +1505,6 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
 	const struct nlattr * uninitialized_var(name);
 	u8 genmask = nft_genmask_next(net);
 	int family = nfmsg->nfgen_family;
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_chain *chain;
 	u8 policy = NF_ACCEPT;
@@ -1549,11 +1514,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
 
 	create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
-	afi = nf_tables_afinfo_lookup(net, family, true);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -1593,7 +1555,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
 		}
 	}
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
 
 	if (chain != NULL) {
 		if (nlh->nlmsg_flags & NLM_F_EXCL)
@@ -1614,24 +1576,26 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_chain *chain;
 	struct nft_rule *rule;
 	int family = nfmsg->nfgen_family;
 	struct nft_ctx ctx;
+	u64 handle;
 	u32 use;
 	int err;
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
-	chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
+	if (nla[NFTA_CHAIN_HANDLE]) {
+		handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
+		chain = nf_tables_chain_lookup_byhandle(table, handle, genmask);
+	} else {
+		chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
+	}
 	if (IS_ERR(chain))
 		return PTR_ERR(chain);
 
@@ -1639,7 +1603,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
 	    chain->use > 0)
 		return -EBUSY;
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
 
 	use = chain->use;
 	list_for_each_entry(rule, &chain->rules, list) {
@@ -1804,7 +1768,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
 	if (err < 0)
 		return err;
 
-	type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
+	type = nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
 	if (IS_ERR(type))
 		return PTR_ERR(type);
 
@@ -2027,7 +1991,7 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
 		goto err;
 
 	err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
-				       event, 0, ctx->afi->family, ctx->table,
+				       event, 0, ctx->family, ctx->table,
 				       ctx->chain, rule);
 	if (err < 0) {
 		kfree_skb(skb);
@@ -2051,7 +2015,6 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
 	const struct nft_rule_dump_ctx *ctx = cb->data;
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	const struct nft_chain *chain;
 	const struct nft_rule *rule;
@@ -2062,39 +2025,37 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
 	rcu_read_lock();
 	cb->seq = net->nft.base_seq;
 
-	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
-		if (family != NFPROTO_UNSPEC && family != afi->family)
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (family != NFPROTO_UNSPEC && family != table->family)
 			continue;
 
-		list_for_each_entry_rcu(table, &afi->tables, list) {
-			if (ctx && ctx->table &&
-			    strcmp(ctx->table, table->name) != 0)
+		if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
+			continue;
+
+		list_for_each_entry_rcu(chain, &table->chains, list) {
+			if (ctx && ctx->chain &&
+			    strcmp(ctx->chain, chain->name) != 0)
 				continue;
 
-			list_for_each_entry_rcu(chain, &table->chains, list) {
-				if (ctx && ctx->chain &&
-				    strcmp(ctx->chain, chain->name) != 0)
-					continue;
-
-				list_for_each_entry_rcu(rule, &chain->rules, list) {
-					if (!nft_is_active(net, rule))
-						goto cont;
-					if (idx < s_idx)
-						goto cont;
-					if (idx > s_idx)
-						memset(&cb->args[1], 0,
-						       sizeof(cb->args) - sizeof(cb->args[0]));
-					if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
-								      cb->nlh->nlmsg_seq,
-								      NFT_MSG_NEWRULE,
-								      NLM_F_MULTI | NLM_F_APPEND,
-								      afi->family, table, chain, rule) < 0)
-						goto done;
-
-					nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+			list_for_each_entry_rcu(rule, &chain->rules, list) {
+				if (!nft_is_active(net, rule))
+					goto cont;
+				if (idx < s_idx)
+					goto cont;
+				if (idx > s_idx)
+					memset(&cb->args[1], 0,
+					       sizeof(cb->args) - sizeof(cb->args[0]));
+				if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
+							      cb->nlh->nlmsg_seq,
+							      NFT_MSG_NEWRULE,
+							      NLM_F_MULTI | NLM_F_APPEND,
+							      table->family,
+							      table, chain, rule) < 0)
+					goto done;
+
+				nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
-					idx++;
-				}
+				idx++;
 			}
 		}
 	}
@@ -2124,7 +2085,6 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_cur(net);
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	const struct nft_chain *chain;
 	const struct nft_rule *rule;
@@ -2168,11 +2128,8 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
 		return netlink_dump_start(nlsk, skb, nlh, &c);
 	}
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -2229,7 +2186,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
-	struct nft_af_info *afi;
+	int family = nfmsg->nfgen_family;
 	struct nft_table *table;
 	struct nft_chain *chain;
 	struct nft_rule *rule, *old_rule = NULL;
@@ -2245,11 +2202,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
 
 	create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
-	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -2288,7 +2242,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
 			return PTR_ERR(old_rule);
 	}
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
 
 	n = 0;
 	size = 0;
@@ -2412,18 +2366,14 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_chain *chain = NULL;
 	struct nft_rule *rule;
 	int family = nfmsg->nfgen_family, err = 0;
 	struct nft_ctx ctx;
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -2434,7 +2384,7 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
 			return PTR_ERR(chain);
 	}
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
 
 	if (chain) {
 		if (nla[NFTA_RULE_HANDLE]) {
@@ -2601,6 +2551,7 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
 	[NFTA_SET_USERDATA]		= { .type = NLA_BINARY,
 					    .len  = NFT_USERDATA_MAXLEN },
 	[NFTA_SET_OBJ_TYPE]		= { .type = NLA_U32 },
+	[NFTA_SET_HANDLE]		= { .type = NLA_U64 },
 };
 
 static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
@@ -2614,26 +2565,17 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
 				     u8 genmask)
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-	struct nft_af_info *afi = NULL;
+	int family = nfmsg->nfgen_family;
 	struct nft_table *table = NULL;
 
-	if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
-		afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
-		if (IS_ERR(afi))
-			return PTR_ERR(afi);
-	}
-
 	if (nla[NFTA_SET_TABLE] != NULL) {
-		if (afi == NULL)
-			return -EAFNOSUPPORT;
-
-		table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE],
-					       genmask);
+		table = nf_tables_table_lookup(net, nla[NFTA_SET_TABLE],
+					       family, genmask);
 		if (IS_ERR(table))
 			return PTR_ERR(table);
 	}
 
-	nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
+	nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
 	return 0;
 }
 
@@ -2653,6 +2595,22 @@ static struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
 	return ERR_PTR(-ENOENT);
 }
 
+static struct nft_set *nf_tables_set_lookup_byhandle(const struct nft_table *table,
+						     const struct nlattr *nla, u8 genmask)
+{
+	struct nft_set *set;
+
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	list_for_each_entry(set, &table->sets, list) {
+		if (be64_to_cpu(nla_get_be64(nla)) == set->handle &&
+		    nft_active_genmask(set, genmask))
+			return set;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
 static struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
 						 const struct nlattr *nla,
 						 u8 genmask)
@@ -2760,7 +2718,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
 		goto nla_put_failure;
 
 	nfmsg = nlmsg_data(nlh);
-	nfmsg->nfgen_family	= ctx->afi->family;
+	nfmsg->nfgen_family	= ctx->family;
 	nfmsg->version		= NFNETLINK_V0;
 	nfmsg->res_id		= htons(ctx->net->nft.base_seq & 0xffff);
 
@@ -2768,6 +2726,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
 		goto nla_put_failure;
 	if (nla_put_string(skb, NFTA_SET_NAME, set->name))
 		goto nla_put_failure;
+	if (nla_put_be64(skb, NFTA_SET_HANDLE, cpu_to_be64(set->handle),
+			 NFTA_SET_PAD))
+		goto nla_put_failure;
 	if (set->flags != 0)
 		if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
 			goto nla_put_failure;
@@ -2852,10 +2813,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	const struct nft_set *set;
 	unsigned int idx, s_idx = cb->args[0];
-	struct nft_af_info *afi;
 	struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
 	struct net *net = sock_net(skb->sk);
-	int cur_family = cb->args[3];
 	struct nft_ctx *ctx = cb->data, ctx_set;
 
 	if (cb->args[1])
@@ -2864,51 +2823,44 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
 	rcu_read_lock();
 	cb->seq = net->nft.base_seq;
 
-	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
-		if (ctx->afi && ctx->afi != afi)
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (ctx->family != NFPROTO_UNSPEC &&
+		    ctx->family != table->family)
 			continue;
 
-		if (cur_family) {
-			if (afi->family != cur_family)
+		if (ctx->table && ctx->table != table)
+			continue;
+
+		if (cur_table) {
+			if (cur_table != table)
 				continue;
 
-			cur_family = 0;
+			cur_table = NULL;
 		}
-		list_for_each_entry_rcu(table, &afi->tables, list) {
-			if (ctx->table && ctx->table != table)
-				continue;
+		idx = 0;
+		list_for_each_entry_rcu(set, &table->sets, list) {
+			if (idx < s_idx)
+				goto cont;
+			if (!nft_is_active(net, set))
+				goto cont;
 
-			if (cur_table) {
-				if (cur_table != table)
-					continue;
+			ctx_set = *ctx;
+			ctx_set.table = table;
+			ctx_set.family = table->family;
 
-				cur_table = NULL;
+			if (nf_tables_fill_set(skb, &ctx_set, set,
+					       NFT_MSG_NEWSET,
+					       NLM_F_MULTI) < 0) {
+				cb->args[0] = idx;
+				cb->args[2] = (unsigned long) table;
+				goto done;
 			}
-			idx = 0;
-			list_for_each_entry_rcu(set, &table->sets, list) {
-				if (idx < s_idx)
-					goto cont;
-				if (!nft_is_active(net, set))
-					goto cont;
-
-				ctx_set = *ctx;
-				ctx_set.table = table;
-				ctx_set.afi = afi;
-				if (nf_tables_fill_set(skb, &ctx_set, set,
-						       NFT_MSG_NEWSET,
-						       NLM_F_MULTI) < 0) {
-					cb->args[0] = idx;
-					cb->args[2] = (unsigned long) table;
-					cb->args[3] = afi->family;
-					goto done;
-				}
-				nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
-				idx++;
-			}
-			if (s_idx)
-				s_idx = 0;
+			idx++;
 		}
+		if (s_idx)
+			s_idx = 0;
 	}
 	cb->args[1] = 1;
 done:
@@ -3006,8 +2958,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
+	int family = nfmsg->nfgen_family;
 	const struct nft_set_ops *ops;
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_set *set;
 	struct nft_ctx ctx;
@@ -3114,15 +3066,12 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 
 	create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
-	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_SET_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
 
 	set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME], genmask);
 	if (IS_ERR(set)) {
@@ -3188,6 +3137,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 	set->udata  = udata;
 	set->timeout = timeout;
 	set->gc_int = gc_int;
+	set->handle = nf_tables_alloc_handle(table);
 
 	err = ops->init(set, &desc, nla);
 	if (err < 0)
@@ -3245,7 +3195,10 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
 	if (err < 0)
 		return err;
 
-	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
+	if (nla[NFTA_SET_HANDLE])
+		set = nf_tables_set_lookup_byhandle(ctx.table, nla[NFTA_SET_HANDLE], genmask);
+	else
+		set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
 	if (IS_ERR(set))
 		return PTR_ERR(set);
 
@@ -3277,7 +3230,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 	struct nft_set_binding *i;
 	struct nft_set_iter iter;
 
-	if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+	if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
 		return -EBUSY;
 
 	if (binding->flags & NFT_SET_MAP) {
@@ -3312,7 +3265,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
 {
 	list_del_rcu(&binding->list);
 
-	if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
+	if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
 	    nft_is_active(ctx->net, set))
 		nf_tables_set_destroy(ctx, set);
 }
@@ -3380,19 +3333,15 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
 				      u8 genmask)
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-	struct nft_af_info *afi;
+	int family = nfmsg->nfgen_family;
 	struct nft_table *table;
 
-	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE],
-				       genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE],
+				       family, genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
-	nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
+	nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
 	return 0;
 }
 
@@ -3497,7 +3446,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct nft_set_dump_ctx *dump_ctx = cb->data;
 	struct net *net = sock_net(skb->sk);
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_set *set;
 	struct nft_set_dump_args args;
@@ -3509,21 +3457,19 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 	int event;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
-		if (afi != dump_ctx->ctx.afi)
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
+		    dump_ctx->ctx.family != table->family)
 			continue;
 
-		list_for_each_entry_rcu(table, &afi->tables, list) {
-			if (table != dump_ctx->ctx.table)
-				continue;
+		if (table != dump_ctx->ctx.table)
+			continue;
 
-			list_for_each_entry_rcu(set, &table->sets, list) {
-				if (set == dump_ctx->set) {
-					set_found = true;
-					break;
-				}
+		list_for_each_entry_rcu(set, &table->sets, list) {
+			if (set == dump_ctx->set) {
+				set_found = true;
+				break;
 			}
-			break;
 		}
 		break;
 	}
@@ -3543,7 +3489,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 		goto nla_put_failure;
 
 	nfmsg = nlmsg_data(nlh);
-	nfmsg->nfgen_family = afi->family;
+	nfmsg->nfgen_family = table->family;
 	nfmsg->version      = NFNETLINK_V0;
 	nfmsg->res_id	    = htons(net->nft.base_seq & 0xffff);
 
@@ -3606,7 +3552,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
 		goto nla_put_failure;
 
 	nfmsg = nlmsg_data(nlh);
-	nfmsg->nfgen_family	= ctx->afi->family;
+	nfmsg->nfgen_family	= ctx->family;
 	nfmsg->version		= NFNETLINK_V0;
 	nfmsg->res_id		= htons(ctx->net->nft.base_seq & 0xffff);
 
@@ -3963,7 +3909,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
 		list_for_each_entry(binding, &set->bindings, list) {
 			struct nft_ctx bind_ctx = {
 				.net	= ctx->net,
-				.afi	= ctx->afi,
+				.family	= ctx->family,
 				.table	= ctx->table,
 				.chain	= (struct nft_chain *)binding->chain,
 			};
@@ -4382,6 +4328,21 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
 }
 EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
 
+struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
+						 const struct nlattr *nla,
+						 u32 objtype, u8 genmask)
+{
+	struct nft_object *obj;
+
+	list_for_each_entry(obj, &table->objects, list) {
+		if (be64_to_cpu(nla_get_be64(nla)) == obj->handle &&
+		    objtype == obj->ops->type->type &&
+		    nft_active_genmask(obj, genmask))
+			return obj;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
 static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
 	[NFTA_OBJ_TABLE]	= { .type = NLA_STRING,
 				    .len = NFT_TABLE_MAXNAMELEN - 1 },
@@ -4389,6 +4350,7 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
 				    .len = NFT_OBJ_MAXNAMELEN - 1 },
 	[NFTA_OBJ_TYPE]		= { .type = NLA_U32 },
 	[NFTA_OBJ_DATA]		= { .type = NLA_NESTED },
+	[NFTA_OBJ_HANDLE]	= { .type = NLA_U64},
 };
 
 static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
@@ -4494,7 +4456,6 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
 	const struct nft_object_type *type;
 	u8 genmask = nft_genmask_next(net);
 	int family = nfmsg->nfgen_family;
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_object *obj;
 	struct nft_ctx ctx;
@@ -4506,11 +4467,8 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
 	    !nla[NFTA_OBJ_DATA])
 		return -EINVAL;
 
-	afi = nf_tables_afinfo_lookup(net, family, true);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_OBJ_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -4528,7 +4486,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
 		return 0;
 	}
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
 
 	type = nft_obj_type_get(objtype);
 	if (IS_ERR(type))
@@ -4540,6 +4498,8 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
 		goto err1;
 	}
 	obj->table = table;
+	obj->handle = nf_tables_alloc_handle(table);
+
 	obj->name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
 	if (!obj->name) {
 		err = -ENOMEM;
@@ -4586,7 +4546,9 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
 	    nla_put_string(skb, NFTA_OBJ_NAME, obj->name) ||
 	    nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
 	    nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
-	    nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
+	    nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) ||
+	    nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
+			 NFTA_OBJ_PAD))
 		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
@@ -4605,7 +4567,6 @@ struct nft_obj_filter {
 static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	unsigned int idx = 0, s_idx = cb->args[0];
 	struct nft_obj_filter *filter = cb->data;
@@ -4620,38 +4581,37 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
 	rcu_read_lock();
 	cb->seq = net->nft.base_seq;
 
-	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
-		if (family != NFPROTO_UNSPEC && family != afi->family)
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (family != NFPROTO_UNSPEC && family != table->family)
 			continue;
 
-		list_for_each_entry_rcu(table, &afi->tables, list) {
-			list_for_each_entry_rcu(obj, &table->objects, list) {
-				if (!nft_is_active(net, obj))
-					goto cont;
-				if (idx < s_idx)
-					goto cont;
-				if (idx > s_idx)
-					memset(&cb->args[1], 0,
-					       sizeof(cb->args) - sizeof(cb->args[0]));
-				if (filter && filter->table[0] &&
-				    strcmp(filter->table, table->name))
-					goto cont;
-				if (filter &&
-				    filter->type != NFT_OBJECT_UNSPEC &&
-				    obj->ops->type->type != filter->type)
-					goto cont;
+		list_for_each_entry_rcu(obj, &table->objects, list) {
+			if (!nft_is_active(net, obj))
+				goto cont;
+			if (idx < s_idx)
+				goto cont;
+			if (idx > s_idx)
+				memset(&cb->args[1], 0,
+				       sizeof(cb->args) - sizeof(cb->args[0]));
+			if (filter && filter->table[0] &&
+			    strcmp(filter->table, table->name))
+				goto cont;
+			if (filter &&
+			    filter->type != NFT_OBJECT_UNSPEC &&
+			    obj->ops->type->type != filter->type)
+				goto cont;
 
-				if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
-							    cb->nlh->nlmsg_seq,
-							    NFT_MSG_NEWOBJ,
-							    NLM_F_MULTI | NLM_F_APPEND,
-							    afi->family, table, obj, reset) < 0)
-					goto done;
+			if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
+						    cb->nlh->nlmsg_seq,
+						    NFT_MSG_NEWOBJ,
+						    NLM_F_MULTI | NLM_F_APPEND,
+						    table->family, table,
+						    obj, reset) < 0)
+				goto done;
 
-				nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
-				idx++;
-			}
+			idx++;
 		}
 	}
 done:
@@ -4703,7 +4663,6 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_cur(net);
 	int family = nfmsg->nfgen_family;
-	const struct nft_af_info *afi;
 	const struct nft_table *table;
 	struct nft_object *obj;
 	struct sk_buff *skb2;
@@ -4734,11 +4693,8 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
 	    !nla[NFTA_OBJ_TYPE])
 		return -EINVAL;
 
-	afi = nf_tables_afinfo_lookup(net, family, false);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_OBJ_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
@@ -4784,32 +4740,33 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
 	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u8 genmask = nft_genmask_next(net);
 	int family = nfmsg->nfgen_family;
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_object *obj;
 	struct nft_ctx ctx;
 	u32 objtype;
 
 	if (!nla[NFTA_OBJ_TYPE] ||
-	    !nla[NFTA_OBJ_NAME])
+	    (!nla[NFTA_OBJ_NAME] && !nla[NFTA_OBJ_HANDLE]))
 		return -EINVAL;
 
-	afi = nf_tables_afinfo_lookup(net, family, true);
-	if (IS_ERR(afi))
-		return PTR_ERR(afi);
-
-	table = nf_tables_table_lookup(afi, nla[NFTA_OBJ_TABLE], genmask);
+	table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family,
+				       genmask);
 	if (IS_ERR(table))
 		return PTR_ERR(table);
 
 	objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
-	obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
+	if (nla[NFTA_OBJ_HANDLE])
+		obj = nf_tables_obj_lookup_byhandle(table, nla[NFTA_OBJ_HANDLE],
+						    objtype, genmask);
+	else
+		obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME],
+					   objtype, genmask);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 	if (obj->use > 0)
 		return -EBUSY;
 
-	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
 
 	return nft_delobj(&ctx, obj);
 }
@@ -4847,7 +4804,613 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
 				 struct nft_object *obj, int event)
 {
 	nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
-		       ctx->afi->family, ctx->report, GFP_KERNEL);
+		       ctx->family, ctx->report, GFP_KERNEL);
+}
+
+/*
+ * Flow tables
+ */
+void nft_register_flowtable_type(struct nf_flowtable_type *type)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_add_tail_rcu(&type->list, &nf_tables_flowtables);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_register_flowtable_type);
+
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_del_rcu(&type->list);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_flowtable_type);
+
+static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
+	[NFTA_FLOWTABLE_TABLE]		= { .type = NLA_STRING,
+					    .len = NFT_NAME_MAXLEN - 1 },
+	[NFTA_FLOWTABLE_NAME]		= { .type = NLA_STRING,
+					    .len = NFT_NAME_MAXLEN - 1 },
+	[NFTA_FLOWTABLE_HOOK]		= { .type = NLA_NESTED },
+	[NFTA_FLOWTABLE_HANDLE]		= { .type = NLA_U64 },
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+						 const struct nlattr *nla,
+						 u8 genmask)
+{
+	struct nft_flowtable *flowtable;
+
+	list_for_each_entry(flowtable, &table->flowtables, list) {
+		if (!nla_strcmp(nla, flowtable->name) &&
+		    nft_active_genmask(flowtable, genmask))
+			return flowtable;
+	}
+	return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
+
+struct nft_flowtable *
+nf_tables_flowtable_lookup_byhandle(const struct nft_table *table,
+				    const struct nlattr *nla, u8 genmask)
+{
+       struct nft_flowtable *flowtable;
+
+       list_for_each_entry(flowtable, &table->flowtables, list) {
+               if (be64_to_cpu(nla_get_be64(nla)) == flowtable->handle &&
+                   nft_active_genmask(flowtable, genmask))
+                       return flowtable;
+       }
+       return ERR_PTR(-ENOENT);
+}
+
+#define NFT_FLOWTABLE_DEVICE_MAX	8
+
+static int nf_tables_parse_devices(const struct nft_ctx *ctx,
+				   const struct nlattr *attr,
+				   struct net_device *dev_array[], int *len)
+{
+	const struct nlattr *tmp;
+	struct net_device *dev;
+	char ifname[IFNAMSIZ];
+	int rem, n = 0, err;
+
+	nla_for_each_nested(tmp, attr, rem) {
+		if (nla_type(tmp) != NFTA_DEVICE_NAME) {
+			err = -EINVAL;
+			goto err1;
+		}
+
+		nla_strlcpy(ifname, tmp, IFNAMSIZ);
+		dev = dev_get_by_name(ctx->net, ifname);
+		if (!dev) {
+			err = -ENOENT;
+			goto err1;
+		}
+
+		dev_array[n++] = dev;
+		if (n == NFT_FLOWTABLE_DEVICE_MAX) {
+			err = -EFBIG;
+			goto err1;
+		}
+	}
+	if (!len)
+		return -EINVAL;
+
+	err = 0;
+err1:
+	*len = n;
+	return err;
+}
+
+static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = {
+	[NFTA_FLOWTABLE_HOOK_NUM]	= { .type = NLA_U32 },
+	[NFTA_FLOWTABLE_HOOK_PRIORITY]	= { .type = NLA_U32 },
+	[NFTA_FLOWTABLE_HOOK_DEVS]	= { .type = NLA_NESTED },
+};
+
+static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
+					  const struct nlattr *attr,
+					  struct nft_flowtable *flowtable)
+{
+	struct net_device *dev_array[NFT_FLOWTABLE_DEVICE_MAX];
+	struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
+	struct nf_hook_ops *ops;
+	int hooknum, priority;
+	int err, n = 0, i;
+
+	err = nla_parse_nested(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
+			       nft_flowtable_hook_policy, NULL);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
+	    !tb[NFTA_FLOWTABLE_HOOK_PRIORITY] ||
+	    !tb[NFTA_FLOWTABLE_HOOK_DEVS])
+		return -EINVAL;
+
+	hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
+	if (hooknum != NF_NETDEV_INGRESS)
+		return -EINVAL;
+
+	priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
+
+	err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
+				      dev_array, &n);
+	if (err < 0)
+		goto err1;
+
+	ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL);
+	if (!ops) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	flowtable->hooknum	= hooknum;
+	flowtable->priority	= priority;
+	flowtable->ops		= ops;
+	flowtable->ops_len	= n;
+
+	for (i = 0; i < n; i++) {
+		flowtable->ops[i].pf		= NFPROTO_NETDEV;
+		flowtable->ops[i].hooknum	= hooknum;
+		flowtable->ops[i].priority	= priority;
+		flowtable->ops[i].priv		= &flowtable->data.rhashtable;
+		flowtable->ops[i].hook		= flowtable->data.type->hook;
+		flowtable->ops[i].dev		= dev_array[i];
+	}
+
+	err = 0;
+err1:
+	for (i = 0; i < n; i++)
+		dev_put(dev_array[i]);
+
+	return err;
+}
+
+static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
+{
+	const struct nf_flowtable_type *type;
+
+	list_for_each_entry(type, &nf_tables_flowtables, list) {
+		if (family == type->family)
+			return type;
+	}
+	return NULL;
+}
+
+static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family)
+{
+	const struct nf_flowtable_type *type;
+
+	type = __nft_flowtable_type_get(family);
+	if (type != NULL && try_module_get(type->owner))
+		return type;
+
+#ifdef CONFIG_MODULES
+	if (type == NULL) {
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nf-flowtable-%u", family);
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		if (__nft_flowtable_type_get(family))
+			return ERR_PTR(-EAGAIN);
+	}
+#endif
+	return ERR_PTR(-ENOENT);
+}
+
+void nft_flow_table_iterate(struct net *net,
+			    void (*iter)(struct nf_flowtable *flowtable, void *data),
+			    void *data)
+{
+	struct nft_flowtable *flowtable;
+	const struct nft_table *table;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+			iter(&flowtable->data, data);
+		}
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
+
+static void nft_unregister_flowtable_net_hooks(struct net *net,
+					       struct nft_flowtable *flowtable)
+{
+	int i;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		if (!flowtable->ops[i].dev)
+			continue;
+
+		nf_unregister_net_hook(net, &flowtable->ops[i]);
+	}
+}
+
+static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
+				  struct sk_buff *skb,
+				  const struct nlmsghdr *nlh,
+				  const struct nlattr * const nla[],
+				  struct netlink_ext_ack *extack)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nf_flowtable_type *type;
+	u8 genmask = nft_genmask_next(net);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	struct nft_table *table;
+	struct nft_ctx ctx;
+	int err, i, k;
+
+	if (!nla[NFTA_FLOWTABLE_TABLE] ||
+	    !nla[NFTA_FLOWTABLE_NAME] ||
+	    !nla[NFTA_FLOWTABLE_HOOK])
+		return -EINVAL;
+
+	table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
+				       family, genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+					       genmask);
+	if (IS_ERR(flowtable)) {
+		err = PTR_ERR(flowtable);
+		if (err != -ENOENT)
+			return err;
+	} else {
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+
+		return 0;
+	}
+
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+
+	flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL);
+	if (!flowtable)
+		return -ENOMEM;
+
+	flowtable->table = table;
+	flowtable->handle = nf_tables_alloc_handle(table);
+
+	flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL);
+	if (!flowtable->name) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	type = nft_flowtable_type_get(family);
+	if (IS_ERR(type)) {
+		err = PTR_ERR(type);
+		goto err2;
+	}
+
+	flowtable->data.type = type;
+	err = rhashtable_init(&flowtable->data.rhashtable, type->params);
+	if (err < 0)
+		goto err3;
+
+	err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+					     flowtable);
+	if (err < 0)
+		goto err3;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		err = nf_register_net_hook(net, &flowtable->ops[i]);
+		if (err < 0)
+			goto err4;
+	}
+
+	err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+	if (err < 0)
+		goto err5;
+
+	INIT_DEFERRABLE_WORK(&flowtable->data.gc_work, type->gc);
+	queue_delayed_work(system_power_efficient_wq,
+			   &flowtable->data.gc_work, HZ);
+
+	list_add_tail_rcu(&flowtable->list, &table->flowtables);
+	table->use++;
+
+	return 0;
+err5:
+	i = flowtable->ops_len;
+err4:
+	for (k = i - 1; k >= 0; k--)
+		nf_unregister_net_hook(net, &flowtable->ops[i]);
+
+	kfree(flowtable->ops);
+err3:
+	module_put(type->owner);
+err2:
+	kfree(flowtable->name);
+err1:
+	kfree(flowtable);
+	return err;
+}
+
+static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
+				  struct sk_buff *skb,
+				  const struct nlmsghdr *nlh,
+				  const struct nlattr * const nla[],
+				  struct netlink_ext_ack *extack)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	u8 genmask = nft_genmask_next(net);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	struct nft_table *table;
+	struct nft_ctx ctx;
+
+	table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
+				       family, genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	if (nla[NFTA_FLOWTABLE_HANDLE])
+		flowtable = nf_tables_flowtable_lookup_byhandle(table,
+								nla[NFTA_FLOWTABLE_HANDLE],
+								genmask);
+	else
+		flowtable = nf_tables_flowtable_lookup(table,
+						       nla[NFTA_FLOWTABLE_NAME],
+						       genmask);
+	if (IS_ERR(flowtable))
+                return PTR_ERR(flowtable);
+	if (flowtable->use > 0)
+		return -EBUSY;
+
+	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+
+	return nft_delflowtable(&ctx, flowtable);
+}
+
+static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+					 u32 portid, u32 seq, int event,
+					 u32 flags, int family,
+					 struct nft_flowtable *flowtable)
+{
+	struct nlattr *nest, *nest_devs;
+	struct nfgenmsg *nfmsg;
+	struct nlmsghdr *nlh;
+	int i;
+
+	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family	= family;
+	nfmsg->version		= NFNETLINK_V0;
+	nfmsg->res_id		= htons(net->nft.base_seq & 0xffff);
+
+	if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
+	    nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
+	    nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+	    nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
+			 NFTA_FLOWTABLE_PAD))
+		goto nla_put_failure;
+
+	nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK);
+	if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
+	    nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority)))
+		goto nla_put_failure;
+
+	nest_devs = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK_DEVS);
+	if (!nest_devs)
+		goto nla_put_failure;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		if (flowtable->ops[i].dev &&
+		    nla_put_string(skb, NFTA_DEVICE_NAME,
+				   flowtable->ops[i].dev->name))
+			goto nla_put_failure;
+	}
+	nla_nest_end(skb, nest_devs);
+	nla_nest_end(skb, nest);
+
+	nlmsg_end(skb, nlh);
+	return 0;
+
+nla_put_failure:
+	nlmsg_trim(skb, nlh);
+	return -1;
+}
+
+struct nft_flowtable_filter {
+	char		*table;
+};
+
+static int nf_tables_dump_flowtable(struct sk_buff *skb,
+				    struct netlink_callback *cb)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+	struct nft_flowtable_filter *filter = cb->data;
+	unsigned int idx = 0, s_idx = cb->args[0];
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	const struct nft_table *table;
+
+	rcu_read_lock();
+	cb->seq = net->nft.base_seq;
+
+	list_for_each_entry_rcu(table, &net->nft.tables, list) {
+		if (family != NFPROTO_UNSPEC && family != table->family)
+			continue;
+
+		list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+			if (!nft_is_active(net, flowtable))
+				goto cont;
+			if (idx < s_idx)
+				goto cont;
+			if (idx > s_idx)
+				memset(&cb->args[1], 0,
+				       sizeof(cb->args) - sizeof(cb->args[0]));
+			if (filter && filter->table[0] &&
+			    strcmp(filter->table, table->name))
+				goto cont;
+
+			if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid,
+							  cb->nlh->nlmsg_seq,
+							  NFT_MSG_NEWFLOWTABLE,
+							  NLM_F_MULTI | NLM_F_APPEND,
+							  table->family, flowtable) < 0)
+				goto done;
+
+			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+			idx++;
+		}
+	}
+done:
+	rcu_read_unlock();
+
+	cb->args[0] = idx;
+	return skb->len;
+}
+
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+{
+	struct nft_flowtable_filter *filter = cb->data;
+
+	if (!filter)
+		return 0;
+
+	kfree(filter->table);
+	kfree(filter);
+
+	return 0;
+}
+
+static struct nft_flowtable_filter *
+nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+{
+	struct nft_flowtable_filter *filter;
+
+	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+	if (!filter)
+		return ERR_PTR(-ENOMEM);
+
+	if (nla[NFTA_FLOWTABLE_TABLE]) {
+		filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+					   GFP_KERNEL);
+		if (!filter->table) {
+			kfree(filter);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	return filter;
+}
+
+static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
+				  struct sk_buff *skb,
+				  const struct nlmsghdr *nlh,
+				  const struct nlattr * const nla[],
+				  struct netlink_ext_ack *extack)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	u8 genmask = nft_genmask_cur(net);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	const struct nft_table *table;
+	struct sk_buff *skb2;
+	int err;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_flowtable,
+			.done = nf_tables_dump_flowtable_done,
+		};
+
+		if (nla[NFTA_FLOWTABLE_TABLE]) {
+			struct nft_flowtable_filter *filter;
+
+			filter = nft_flowtable_filter_alloc(nla);
+			if (IS_ERR(filter))
+				return -ENOMEM;
+
+			c.data = filter;
+		}
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+
+	if (!nla[NFTA_FLOWTABLE_NAME])
+		return -EINVAL;
+
+	table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
+				       family, genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+					       genmask);
+	if (IS_ERR(flowtable))
+		return PTR_ERR(flowtable);
+
+	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb2)
+		return -ENOMEM;
+
+	err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid,
+					    nlh->nlmsg_seq,
+					    NFT_MSG_NEWFLOWTABLE, 0, family,
+					    flowtable);
+	if (err < 0)
+		goto err;
+
+	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+err:
+	kfree_skb(skb2);
+	return err;
+}
+
+static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
+				       struct nft_flowtable *flowtable,
+				       int event)
+{
+	struct sk_buff *skb;
+	int err;
+
+	if (ctx->report &&
+	    !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+		return;
+
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
+					    ctx->seq, event, 0,
+					    ctx->family, flowtable);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto err;
+	}
+
+	nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+		       ctx->report, GFP_KERNEL);
+	return;
+err:
+	nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
+}
+
+static void nft_flowtable_destroy(void *ptr, void *arg)
+{
+	kfree(ptr);
+}
+
+static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+{
+	cancel_delayed_work_sync(&flowtable->data.gc_work);
+	kfree(flowtable->name);
+	rhashtable_free_and_destroy(&flowtable->data.rhashtable,
+				    nft_flowtable_destroy, NULL);
+	module_put(flowtable->data.type->owner);
 }
 
 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -4880,6 +5443,46 @@ nla_put_failure:
 	return -EMSGSIZE;
 }
 
+static void nft_flowtable_event(unsigned long event, struct net_device *dev,
+				struct nft_flowtable *flowtable)
+{
+	int i;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		if (flowtable->ops[i].dev != dev)
+			continue;
+
+		nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
+		flowtable->ops[i].dev = NULL;
+		break;
+	}
+}
+
+static int nf_tables_flowtable_event(struct notifier_block *this,
+				     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct nft_flowtable *flowtable;
+	struct nft_table *table;
+
+	if (event != NETDEV_UNREGISTER)
+		return 0;
+
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_for_each_entry(table, &dev_net(dev)->nft.tables, list) {
+		list_for_each_entry(flowtable, &table->flowtables, list) {
+			nft_flowtable_event(event, dev, flowtable);
+		}
+	}
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_tables_flowtable_notifier = {
+	.notifier_call	= nf_tables_flowtable_event,
+};
+
 static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
 				 int event)
 {
@@ -5032,6 +5635,21 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
 		.attr_count	= NFTA_OBJ_MAX,
 		.policy		= nft_obj_policy,
 	},
+	[NFT_MSG_NEWFLOWTABLE] = {
+		.call_batch	= nf_tables_newflowtable,
+		.attr_count	= NFTA_FLOWTABLE_MAX,
+		.policy		= nft_flowtable_policy,
+	},
+	[NFT_MSG_GETFLOWTABLE] = {
+		.call		= nf_tables_getflowtable,
+		.attr_count	= NFTA_FLOWTABLE_MAX,
+		.policy		= nft_flowtable_policy,
+	},
+	[NFT_MSG_DELFLOWTABLE] = {
+		.call_batch	= nf_tables_delflowtable,
+		.attr_count	= NFTA_FLOWTABLE_MAX,
+		.policy		= nft_flowtable_policy,
+	},
 };
 
 static void nft_chain_commit_update(struct nft_trans *trans)
@@ -5077,6 +5695,9 @@ static void nf_tables_commit_release(struct nft_trans *trans)
 	case NFT_MSG_DELOBJ:
 		nft_obj_destroy(nft_trans_obj(trans));
 		break;
+	case NFT_MSG_DELFLOWTABLE:
+		nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+		break;
 	}
 	kfree(trans);
 }
@@ -5103,7 +5724,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 			if (nft_trans_table_update(trans)) {
 				if (!nft_trans_table_enable(trans)) {
 					nf_tables_table_disable(net,
-								trans->ctx.afi,
 								trans->ctx.table);
 					trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
 				}
@@ -5129,10 +5749,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 		case NFT_MSG_DELCHAIN:
 			list_del_rcu(&trans->ctx.chain->list);
 			nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
-			nf_tables_unregister_hooks(trans->ctx.net,
-						   trans->ctx.table,
-						   trans->ctx.chain,
-						   trans->ctx.afi->nops);
+			nf_tables_unregister_hook(trans->ctx.net,
+						  trans->ctx.table,
+						  trans->ctx.chain);
 			break;
 		case NFT_MSG_NEWRULE:
 			nft_clear(trans->ctx.net, nft_trans_rule(trans));
@@ -5152,7 +5771,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 			/* This avoids hitting -EBUSY when deleting the table
 			 * from the transaction.
 			 */
-			if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS &&
+			if (nft_set_is_anonymous(nft_trans_set(trans)) &&
 			    !list_empty(&nft_trans_set(trans)->bindings))
 				trans->ctx.table->use--;
 
@@ -5195,6 +5814,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 			nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
 					     NFT_MSG_DELOBJ);
 			break;
+		case NFT_MSG_NEWFLOWTABLE:
+			nft_clear(net, nft_trans_flowtable(trans));
+			nf_tables_flowtable_notify(&trans->ctx,
+						   nft_trans_flowtable(trans),
+						   NFT_MSG_NEWFLOWTABLE);
+			nft_trans_destroy(trans);
+			break;
+		case NFT_MSG_DELFLOWTABLE:
+			list_del_rcu(&nft_trans_flowtable(trans)->list);
+			nf_tables_flowtable_notify(&trans->ctx,
+						   nft_trans_flowtable(trans),
+						   NFT_MSG_DELFLOWTABLE);
+			nft_unregister_flowtable_net_hooks(net,
+					nft_trans_flowtable(trans));
+			break;
 		}
 	}
 
@@ -5232,6 +5866,9 @@ static void nf_tables_abort_release(struct nft_trans *trans)
 	case NFT_MSG_NEWOBJ:
 		nft_obj_destroy(nft_trans_obj(trans));
 		break;
+	case NFT_MSG_NEWFLOWTABLE:
+		nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+		break;
 	}
 	kfree(trans);
 }
@@ -5248,7 +5885,6 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
 			if (nft_trans_table_update(trans)) {
 				if (nft_trans_table_enable(trans)) {
 					nf_tables_table_disable(net,
-								trans->ctx.afi,
 								trans->ctx.table);
 					trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
 				}
@@ -5269,10 +5905,9 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
 			} else {
 				trans->ctx.table->use--;
 				list_del_rcu(&trans->ctx.chain->list);
-				nf_tables_unregister_hooks(trans->ctx.net,
-							   trans->ctx.table,
-							   trans->ctx.chain,
-							   trans->ctx.afi->nops);
+				nf_tables_unregister_hook(trans->ctx.net,
+							  trans->ctx.table,
+							  trans->ctx.chain);
 			}
 			break;
 		case NFT_MSG_DELCHAIN:
@@ -5322,6 +5957,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
 			nft_clear(trans->ctx.net, nft_trans_obj(trans));
 			nft_trans_destroy(trans);
 			break;
+		case NFT_MSG_NEWFLOWTABLE:
+			trans->ctx.table->use--;
+			list_del_rcu(&nft_trans_flowtable(trans)->list);
+			nft_unregister_flowtable_net_hooks(net,
+					nft_trans_flowtable(trans));
+			break;
+		case NFT_MSG_DELFLOWTABLE:
+			trans->ctx.table->use++;
+			nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+			nft_trans_destroy(trans);
+			break;
 		}
 	}
 
@@ -5373,7 +6019,7 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
 	if (nft_is_base_chain(chain)) {
 		basechain = nft_base_chain(chain);
 
-		if ((1 << basechain->ops[0].hooknum) & hook_flags)
+		if ((1 << basechain->ops.hooknum) & hook_flags)
 			return 0;
 
 		return -EOPNOTSUPP;
@@ -5841,28 +6487,13 @@ int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
 }
 EXPORT_SYMBOL_GPL(nft_data_dump);
 
-static int __net_init nf_tables_init_net(struct net *net)
-{
-	INIT_LIST_HEAD(&net->nft.af_info);
-	INIT_LIST_HEAD(&net->nft.commit_list);
-	net->nft.base_seq = 1;
-	return 0;
-}
-
-static void __net_exit nf_tables_exit_net(struct net *net)
-{
-	WARN_ON_ONCE(!list_empty(&net->nft.af_info));
-	WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
-}
-
 int __nft_release_basechain(struct nft_ctx *ctx)
 {
 	struct nft_rule *rule, *nr;
 
 	BUG_ON(!nft_is_base_chain(ctx->chain));
 
-	nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain,
-				   ctx->afi->nops);
+	nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
 	list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
 		list_del(&rule->list);
 		ctx->chain->use--;
@@ -5876,9 +6507,9 @@ int __nft_release_basechain(struct nft_ctx *ctx)
 }
 EXPORT_SYMBOL_GPL(__nft_release_basechain);
 
-/* Called by nft_unregister_afinfo() from __net_exit path, nfnl_lock is held. */
-static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
+static void __nft_release_tables(struct net *net)
 {
+	struct nft_flowtable *flowtable, *nf;
 	struct nft_table *table, *nt;
 	struct nft_chain *chain, *nc;
 	struct nft_object *obj, *ne;
@@ -5886,13 +6517,16 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 	struct nft_set *set, *ns;
 	struct nft_ctx ctx = {
 		.net	= net,
-		.afi	= afi,
 	};
 
-	list_for_each_entry_safe(table, nt, &afi->tables, list) {
+	list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
+		ctx.family = table->family;
+
 		list_for_each_entry(chain, &table->chains, list)
-			nf_tables_unregister_hooks(net, table, chain,
-						   afi->nops);
+			nf_tables_unregister_hook(net, table, chain);
+		list_for_each_entry(flowtable, &table->flowtables, list)
+			nf_unregister_net_hooks(net, flowtable->ops,
+						flowtable->ops_len);
 		/* No packets are walking on these chains anymore. */
 		ctx.table = table;
 		list_for_each_entry(chain, &table->chains, list) {
@@ -5903,6 +6537,11 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 				nf_tables_rule_destroy(&ctx, rule);
 			}
 		}
+		list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
+			list_del(&flowtable->list);
+			table->use--;
+			nf_tables_flowtable_destroy(flowtable);
+		}
 		list_for_each_entry_safe(set, ns, &table->sets, list) {
 			list_del(&set->list);
 			table->use--;
@@ -5923,6 +6562,21 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 	}
 }
 
+static int __net_init nf_tables_init_net(struct net *net)
+{
+	INIT_LIST_HEAD(&net->nft.tables);
+	INIT_LIST_HEAD(&net->nft.commit_list);
+	net->nft.base_seq = 1;
+	return 0;
+}
+
+static void __net_exit nf_tables_exit_net(struct net *net)
+{
+	__nft_release_tables(net);
+	WARN_ON_ONCE(!list_empty(&net->nft.tables));
+	WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+}
+
 static struct pernet_operations nf_tables_net_ops = {
 	.init	= nf_tables_init_net,
 	.exit	= nf_tables_exit_net,
@@ -5947,7 +6601,8 @@ static int __init nf_tables_module_init(void)
 	if (err < 0)
 		goto err3;
 
-	pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
+	register_netdevice_notifier(&nf_tables_flowtable_notifier);
+
 	return register_pernet_subsys(&nf_tables_net_ops);
 err3:
 	nf_tables_core_module_exit();
@@ -5961,6 +6616,7 @@ static void __exit nf_tables_module_exit(void)
 {
 	unregister_pernet_subsys(&nf_tables_net_ops);
 	nfnetlink_subsys_unregister(&nf_tables_subsys);
+	unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
 	rcu_barrier();
 	nf_tables_core_module_exit();
 	kfree(info);
diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c
index f713cc205669..e30c7da09d0d 100644
--- a/net/netfilter/nf_tables_inet.c
+++ b/net/netfilter/nf_tables_inet.c
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
 #include <net/netfilter/nf_tables.h>
@@ -16,56 +17,27 @@
 #include <net/netfilter/nf_tables_ipv6.h>
 #include <net/ip.h>
 
-static void nft_inet_hook_ops_init(struct nf_hook_ops *ops, unsigned int n)
+static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
+				      const struct nf_hook_state *state)
 {
-	struct nft_af_info *afi;
-
-	if (n == 1)
-		afi = &nft_af_ipv4;
-	else
-		afi = &nft_af_ipv6;
-
-	ops->pf = afi->family;
-	if (afi->hooks[ops->hooknum])
-		ops->hook = afi->hooks[ops->hooknum];
-}
-
-static struct nft_af_info nft_af_inet __read_mostly = {
-	.family		= NFPROTO_INET,
-	.nhooks		= NF_INET_NUMHOOKS,
-	.owner		= THIS_MODULE,
-	.nops		= 2,
-	.hook_ops_init	= nft_inet_hook_ops_init,
-};
-
-static int __net_init nf_tables_inet_init_net(struct net *net)
-{
-	net->nft.inet = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
-	if (net->nft.inet == NULL)
-		return -ENOMEM;
-	memcpy(net->nft.inet, &nft_af_inet, sizeof(nft_af_inet));
-
-	if (nft_register_afinfo(net, net->nft.inet) < 0)
-		goto err;
-
-	return 0;
-
-err:
-	kfree(net->nft.inet);
-	return -ENOMEM;
-}
-
-static void __net_exit nf_tables_inet_exit_net(struct net *net)
-{
-	nft_unregister_afinfo(net, net->nft.inet);
-	kfree(net->nft.inet);
+	struct nft_pktinfo pkt;
+
+	nft_set_pktinfo(&pkt, skb, state);
+
+	switch (state->pf) {
+	case NFPROTO_IPV4:
+		nft_set_pktinfo_ipv4(&pkt, skb);
+		break;
+	case NFPROTO_IPV6:
+		nft_set_pktinfo_ipv6(&pkt, skb);
+		break;
+	default:
+		break;
+	}
+
+	return nft_do_chain(&pkt, priv);
 }
 
-static struct pernet_operations nf_tables_inet_net_ops = {
-	.init	= nf_tables_inet_init_net,
-	.exit	= nf_tables_inet_exit_net,
-};
-
 static const struct nf_chain_type filter_inet = {
 	.name		= "filter",
 	.type		= NFT_CHAIN_T_DEFAULT,
@@ -76,26 +48,22 @@ static const struct nf_chain_type filter_inet = {
 			  (1 << NF_INET_FORWARD) |
 			  (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING),
+	.hooks		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_inet,
+		[NF_INET_LOCAL_OUT]	= nft_do_chain_inet,
+		[NF_INET_FORWARD]	= nft_do_chain_inet,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_inet,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_inet,
+        },
 };
 
 static int __init nf_tables_inet_init(void)
 {
-	int ret;
-
-	ret = nft_register_chain_type(&filter_inet);
-	if (ret < 0)
-		return ret;
-
-	ret = register_pernet_subsys(&nf_tables_inet_net_ops);
-	if (ret < 0)
-		nft_unregister_chain_type(&filter_inet);
-
-	return ret;
+	return nft_register_chain_type(&filter_inet);
 }
 
 static void __exit nf_tables_inet_exit(void)
 {
-	unregister_pernet_subsys(&nf_tables_inet_net_ops);
 	nft_unregister_chain_type(&filter_inet);
 }
 
@@ -104,4 +72,4 @@ module_exit(nf_tables_inet_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(1);
+MODULE_ALIAS_NFT_CHAIN(1, "filter");
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 403432988313..4041fafca934 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -21,66 +21,32 @@ nft_do_chain_netdev(void *priv, struct sk_buff *skb,
 {
 	struct nft_pktinfo pkt;
 
+	nft_set_pktinfo(&pkt, skb, state);
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
-		nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv4_validate(&pkt, skb);
 		break;
 	case htons(ETH_P_IPV6):
-		nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv6_validate(&pkt, skb);
 		break;
 	default:
-		nft_set_pktinfo_unspec(&pkt, skb, state);
+		nft_set_pktinfo_unspec(&pkt, skb);
 		break;
 	}
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static struct nft_af_info nft_af_netdev __read_mostly = {
-	.family		= NFPROTO_NETDEV,
-	.nhooks		= NF_NETDEV_NUMHOOKS,
-	.owner		= THIS_MODULE,
-	.flags		= NFT_AF_NEEDS_DEV,
-	.nops		= 1,
-	.hooks		= {
-		[NF_NETDEV_INGRESS]	= nft_do_chain_netdev,
-	},
-};
-
-static int nf_tables_netdev_init_net(struct net *net)
-{
-	net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
-	if (net->nft.netdev == NULL)
-		return -ENOMEM;
-
-	memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev));
-
-	if (nft_register_afinfo(net, net->nft.netdev) < 0)
-		goto err;
-
-	return 0;
-err:
-	kfree(net->nft.netdev);
-	return -ENOMEM;
-}
-
-static void nf_tables_netdev_exit_net(struct net *net)
-{
-	nft_unregister_afinfo(net, net->nft.netdev);
-	kfree(net->nft.netdev);
-}
-
-static struct pernet_operations nf_tables_netdev_net_ops = {
-	.init	= nf_tables_netdev_init_net,
-	.exit	= nf_tables_netdev_exit_net,
-};
-
 static const struct nf_chain_type nft_filter_chain_netdev = {
 	.name		= "filter",
 	.type		= NFT_CHAIN_T_DEFAULT,
 	.family		= NFPROTO_NETDEV,
 	.owner		= THIS_MODULE,
 	.hook_mask	= (1 << NF_NETDEV_INGRESS),
+	.hooks		= {
+		[NF_NETDEV_INGRESS]	= nft_do_chain_netdev,
+	},
 };
 
 static void nft_netdev_event(unsigned long event, struct net_device *dev,
@@ -96,7 +62,7 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
 		__nft_release_basechain(ctx);
 		break;
 	case NETDEV_CHANGENAME:
-		if (dev->ifindex != basechain->ops[0].dev->ifindex)
+		if (dev->ifindex != basechain->ops.dev->ifindex)
 			return;
 
 		strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
@@ -108,7 +74,6 @@ static int nf_tables_netdev_event(struct notifier_block *this,
 				  unsigned long event, void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	struct nft_af_info *afi;
 	struct nft_table *table;
 	struct nft_chain *chain, *nr;
 	struct nft_ctx ctx = {
@@ -120,20 +85,18 @@ static int nf_tables_netdev_event(struct notifier_block *this,
 		return NOTIFY_DONE;
 
 	nfnl_lock(NFNL_SUBSYS_NFTABLES);
-	list_for_each_entry(afi, &dev_net(dev)->nft.af_info, list) {
-		ctx.afi = afi;
-		if (afi->family != NFPROTO_NETDEV)
+	list_for_each_entry(table, &ctx.net->nft.tables, list) {
+		if (table->family != NFPROTO_NETDEV)
 			continue;
 
-		list_for_each_entry(table, &afi->tables, list) {
-			ctx.table = table;
-			list_for_each_entry_safe(chain, nr, &table->chains, list) {
-				if (!nft_is_base_chain(chain))
-					continue;
+		ctx.family = table->family;
+		ctx.table = table;
+		list_for_each_entry_safe(chain, nr, &table->chains, list) {
+			if (!nft_is_base_chain(chain))
+				continue;
 
-				ctx.chain = chain;
-				nft_netdev_event(event, dev, &ctx);
-			}
+			ctx.chain = chain;
+			nft_netdev_event(event, dev, &ctx);
 		}
 	}
 	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
@@ -153,27 +116,21 @@ static int __init nf_tables_netdev_init(void)
 	if (ret)
 		return ret;
 
-	ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
-	if (ret)
-		goto err1;
-
 	ret = register_netdevice_notifier(&nf_tables_netdev_notifier);
 	if (ret)
-		goto err2;
+		goto err_register_netdevice_notifier;
 
 	return 0;
 
-err2:
-	unregister_pernet_subsys(&nf_tables_netdev_net_ops);
-err1:
+err_register_netdevice_notifier:
 	nft_unregister_chain_type(&nft_filter_chain_netdev);
+
 	return ret;
 }
 
 static void __exit nf_tables_netdev_exit(void)
 {
 	unregister_netdevice_notifier(&nf_tables_netdev_notifier);
-	unregister_pernet_subsys(&nf_tables_netdev_net_ops);
 	nft_unregister_chain_type(&nft_filter_chain_netdev);
 }
 
@@ -182,4 +139,4 @@ module_exit(nf_tables_netdev_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */
+MODULE_ALIAS_NFT_CHAIN(5, "filter"); /* NFPROTO_NETDEV */
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 733d3e4a30d8..03ead8a9e90c 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -37,8 +37,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
 	rcu_dereference_protected(table[(id)].subsys, \
 				  lockdep_nfnl_is_held((id)))
 
-static char __initdata nfversion[] = "0.30";
-
 static struct {
 	struct mutex				mutex;
 	const struct nfnetlink_subsystem __rcu	*subsys;
@@ -580,13 +578,11 @@ static int __init nfnetlink_init(void)
 	for (i=0; i<NFNL_SUBSYS_COUNT; i++)
 		mutex_init(&table[i].mutex);
 
-	pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
 	return register_pernet_subsys(&nfnetlink_net_ops);
 }
 
 static void __exit nfnetlink_exit(void)
 {
-	pr_info("Removing netfilter NETLINK layer.\n");
 	unregister_pernet_subsys(&nfnetlink_net_ops);
 }
 module_init(nfnetlink_init);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c45e6d4358ab..88d427f9f9e6 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -527,7 +527,6 @@ static int __init nfnl_acct_init(void)
 		goto err_out;
 	}
 
-	pr_info("nfnl_acct: registering with nfnetlink.\n");
 	ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
 	if (ret < 0) {
 		pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
@@ -543,7 +542,6 @@ err_out:
 
 static void __exit nfnl_acct_exit(void)
 {
-	pr_info("nfnl_acct: unregistering from nfnetlink.\n");
 	nfnetlink_subsys_unregister(&nfnl_acct_subsys);
 	unregister_pernet_subsys(&nfnl_acct_ops);
 }
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 32b1c0b44e79..95b04702a655 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -615,8 +615,6 @@ err_out:
 
 static void __exit cttimeout_exit(void)
 {
-	pr_info("cttimeout: unregistering from nfnetlink.\n");
-
 	nfnetlink_subsys_unregister(&cttimeout_subsys);
 
 	unregister_pernet_subsys(&cttimeout_ops);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e955bec0acc6..7b46aa4c478d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1054,7 +1054,6 @@ static int nful_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations nful_file_ops = {
-	.owner	 = THIS_MODULE,
 	.open	 = nful_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index c09b36755ed7..8bba23160a68 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -941,23 +941,18 @@ static struct notifier_block nfqnl_dev_notifier = {
 	.notifier_call	= nfqnl_rcv_dev_event,
 };
 
-static unsigned int nfqnl_nf_hook_drop(struct net *net)
+static void nfqnl_nf_hook_drop(struct net *net)
 {
 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-	unsigned int instances = 0;
 	int i;
 
 	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 		struct nfqnl_instance *inst;
 		struct hlist_head *head = &q->instance_table[i];
 
-		hlist_for_each_entry_rcu(inst, head, hlist) {
+		hlist_for_each_entry_rcu(inst, head, hlist)
 			nfqnl_flush(inst, NULL, 0);
-			instances++;
-		}
 	}
-
-	return instances;
 }
 
 static int
@@ -1482,7 +1477,6 @@ static int nfqnl_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations nfqnl_file_ops = {
-	.owner	 = THIS_MODULE,
 	.open	 = nfqnl_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index c2945eb3397c..fa90a8402845 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -44,6 +44,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
 	case NFT_CMP_LT:
 		if (d == 0)
 			goto mismatch;
+		/* fall through */
 	case NFT_CMP_LTE:
 		if (d > 0)
 			goto mismatch;
@@ -51,6 +52,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
 	case NFT_CMP_GT:
 		if (d == 0)
 			goto mismatch;
+		/* fall through */
 	case NFT_CMP_GTE:
 		if (d < 0)
 			goto mismatch;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index b89f4f65b2a0..8e23726b9081 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -144,7 +144,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
 {
 	par->net	= ctx->net;
 	par->table	= ctx->table->name;
-	switch (ctx->afi->family) {
+	switch (ctx->family) {
 	case AF_INET:
 		entry->e4.ip.proto = proto;
 		entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
@@ -169,13 +169,13 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		par->hook_mask = 1 << ops->hooknum;
 	} else {
 		par->hook_mask = 0;
 	}
-	par->family	= ctx->afi->family;
+	par->family	= ctx->family;
 	par->nft_compat = true;
 }
 
@@ -267,7 +267,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 	par.net = ctx->net;
 	par.target = target;
 	par.targinfo = info;
-	par.family = ctx->afi->family;
+	par.family = ctx->family;
 	if (par.target->destroy != NULL)
 		par.target->destroy(&par);
 
@@ -302,7 +302,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		hook_mask = 1 << ops->hooknum;
 		if (target->hooks && !(hook_mask & target->hooks))
@@ -358,7 +358,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
 {
 	par->net	= ctx->net;
 	par->table	= ctx->table->name;
-	switch (ctx->afi->family) {
+	switch (ctx->family) {
 	case AF_INET:
 		entry->e4.ip.proto = proto;
 		entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
@@ -383,13 +383,13 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		par->hook_mask = 1 << ops->hooknum;
 	} else {
 		par->hook_mask = 0;
 	}
-	par->family	= ctx->afi->family;
+	par->family	= ctx->family;
 	par->nft_compat = true;
 }
 
@@ -446,7 +446,7 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 	par.net = ctx->net;
 	par.match = match;
 	par.matchinfo = info;
-	par.family = ctx->afi->family;
+	par.family = ctx->family;
 	if (par.match->destroy != NULL)
 		par.match->destroy(&par);
 
@@ -481,7 +481,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		hook_mask = 1 << ops->hooknum;
 		if (match->hooks && !(hook_mask & match->hooks))
@@ -648,7 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
 
 	mt_name = nla_data(tb[NFTA_MATCH_NAME]);
 	rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
-	family = ctx->afi->family;
+	family = ctx->family;
 
 	/* Re-use the existing match if it's already loaded. */
 	list_for_each_entry(nft_match, &nft_match_list, head) {
@@ -733,7 +733,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
 
 	tg_name = nla_data(tb[NFTA_TARGET_NAME]);
 	rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
-	family = ctx->afi->family;
+	family = ctx->family;
 
 	/* Re-use the existing target if it's already loaded. */
 	list_for_each_entry(nft_target, &nft_target_list, head) {
@@ -812,8 +812,6 @@ static int __init nft_compat_module_init(void)
 		goto err_target;
 	}
 
-	pr_info("nf_tables_compat: (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>\n");
-
 	return ret;
 
 err_target:
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 2647b895f4b0..6ab274b14484 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -405,7 +405,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
 		if (tb[NFTA_CT_DIRECTION] == NULL)
 			return -EINVAL;
 
-		switch (ctx->afi->family) {
+		switch (ctx->family) {
 		case NFPROTO_IPV4:
 			len = FIELD_SIZEOF(struct nf_conntrack_tuple,
 					   src.u3.ip);
@@ -456,7 +456,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
 	if (err < 0)
 		return err;
 
-	err = nf_ct_netns_get(ctx->net, ctx->afi->family);
+	err = nf_ct_netns_get(ctx->net, ctx->family);
 	if (err < 0)
 		return err;
 
@@ -550,7 +550,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
 	if (err < 0)
 		goto err1;
 
-	err = nf_ct_netns_get(ctx->net, ctx->afi->family);
+	err = nf_ct_netns_get(ctx->net, ctx->family);
 	if (err < 0)
 		goto err1;
 
@@ -564,7 +564,7 @@ err1:
 static void nft_ct_get_destroy(const struct nft_ctx *ctx,
 			       const struct nft_expr *expr)
 {
-	nf_ct_netns_put(ctx->net, ctx->afi->family);
+	nf_ct_netns_put(ctx->net, ctx->family);
 }
 
 static void nft_ct_set_destroy(const struct nft_ctx *ctx,
@@ -573,7 +573,7 @@ static void nft_ct_set_destroy(const struct nft_ctx *ctx,
 	struct nft_ct *priv = nft_expr_priv(expr);
 
 	__nft_ct_set_destroy(ctx, priv);
-	nf_ct_netns_put(ctx->net, ctx->afi->family);
+	nf_ct_netns_put(ctx->net, ctx->family);
 }
 
 static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -734,7 +734,7 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
 	struct nft_ct_helper_obj *priv = nft_obj_data(obj);
 	struct nf_conntrack_helper *help4, *help6;
 	char name[NF_CT_HELPER_NAME_LEN];
-	int family = ctx->afi->family;
+	int family = ctx->family;
 
 	if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
 		return -EINVAL;
@@ -753,14 +753,14 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
 
 	switch (family) {
 	case NFPROTO_IPV4:
-		if (ctx->afi->family == NFPROTO_IPV6)
+		if (ctx->family == NFPROTO_IPV6)
 			return -EINVAL;
 
 		help4 = nf_conntrack_helper_try_module_get(name, family,
 							   priv->l4proto);
 		break;
 	case NFPROTO_IPV6:
-		if (ctx->afi->family == NFPROTO_IPV4)
+		if (ctx->family == NFPROTO_IPV4)
 			return -EINVAL;
 
 		help6 = nf_conntrack_helper_try_module_get(name, family,
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 66221ad891a9..fc83e29d6634 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -164,7 +164,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 	}
 
 	priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
-	err = nft_validate_register_load(priv->sreg_key, set->klen);;
+	err = nft_validate_register_load(priv->sreg_key, set->klen);
 	if (err < 0)
 		return err;
 
@@ -184,7 +184,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 	if (tb[NFTA_DYNSET_EXPR] != NULL) {
 		if (!(set->flags & NFT_SET_EVAL))
 			return -EINVAL;
-		if (!(set->flags & NFT_SET_ANONYMOUS))
+		if (!nft_set_is_anonymous(set))
 			return -EOPNOTSUPP;
 
 		priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
new file mode 100644
index 000000000000..4503b8dcf9c0
--- /dev/null
+++ b/net/netfilter/nft_flow_offload.c
@@ -0,0 +1,264 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/ip.h> /* for ipv4 options. */
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct nft_flow_offload {
+	struct nft_flowtable	*flowtable;
+};
+
+static int nft_flow_route(const struct nft_pktinfo *pkt,
+			  const struct nf_conn *ct,
+			  struct nf_flow_route *route,
+			  enum ip_conntrack_dir dir)
+{
+	struct dst_entry *this_dst = skb_dst(pkt->skb);
+	struct dst_entry *other_dst = NULL;
+	struct flowi fl;
+
+	memset(&fl, 0, sizeof(fl));
+	switch (nft_pf(pkt)) {
+	case NFPROTO_IPV4:
+		fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+		break;
+	case NFPROTO_IPV6:
+		fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+		break;
+	}
+
+	nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
+	if (!other_dst)
+		return -ENOENT;
+
+	route->tuple[dir].dst		= this_dst;
+	route->tuple[dir].ifindex	= nft_in(pkt)->ifindex;
+	route->tuple[!dir].dst		= other_dst;
+	route->tuple[!dir].ifindex	= nft_out(pkt)->ifindex;
+
+	return 0;
+}
+
+static bool nft_flow_offload_skip(struct sk_buff *skb)
+{
+	struct ip_options *opt  = &(IPCB(skb)->opt);
+
+	if (unlikely(opt->optlen))
+		return true;
+	if (skb_sec_path(skb))
+		return true;
+
+	return false;
+}
+
+static void nft_flow_offload_eval(const struct nft_expr *expr,
+				  struct nft_regs *regs,
+				  const struct nft_pktinfo *pkt)
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+	struct nf_flowtable *flowtable = &priv->flowtable->data;
+	enum ip_conntrack_info ctinfo;
+	struct nf_flow_route route;
+	struct flow_offload *flow;
+	enum ip_conntrack_dir dir;
+	struct nf_conn *ct;
+	int ret;
+
+	if (nft_flow_offload_skip(pkt->skb))
+		goto out;
+
+	ct = nf_ct_get(pkt->skb, &ctinfo);
+	if (!ct)
+		goto out;
+
+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+		break;
+	default:
+		goto out;
+	}
+
+	if (test_bit(IPS_HELPER_BIT, &ct->status))
+		goto out;
+
+	if (ctinfo == IP_CT_NEW ||
+	    ctinfo == IP_CT_RELATED)
+		goto out;
+
+	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+		goto out;
+
+	dir = CTINFO2DIR(ctinfo);
+	if (nft_flow_route(pkt, ct, &route, dir) < 0)
+		goto err_flow_route;
+
+	flow = flow_offload_alloc(ct, &route);
+	if (!flow)
+		goto err_flow_alloc;
+
+	ret = flow_offload_add(flowtable, flow);
+	if (ret < 0)
+		goto err_flow_add;
+
+	return;
+
+err_flow_add:
+	flow_offload_free(flow);
+err_flow_alloc:
+	dst_release(route.tuple[!dir].dst);
+err_flow_route:
+	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+out:
+	regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+				     const struct nft_expr *expr,
+				     const struct nft_data **data)
+{
+	unsigned int hook_mask = (1 << NF_INET_FORWARD);
+
+	return nft_chain_validate_hooks(ctx->chain, hook_mask);
+}
+
+static int nft_flow_offload_init(const struct nft_ctx *ctx,
+				 const struct nft_expr *expr,
+				 const struct nlattr * const tb[])
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+	u8 genmask = nft_genmask_next(ctx->net);
+	struct nft_flowtable *flowtable;
+
+	if (!tb[NFTA_FLOW_TABLE_NAME])
+		return -EINVAL;
+
+	flowtable = nf_tables_flowtable_lookup(ctx->table,
+					       tb[NFTA_FLOW_TABLE_NAME],
+					       genmask);
+	if (IS_ERR(flowtable))
+		return PTR_ERR(flowtable);
+
+	priv->flowtable = flowtable;
+	flowtable->use++;
+
+	return nf_ct_netns_get(ctx->net, ctx->family);
+}
+
+static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
+				     const struct nft_expr *expr)
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+	priv->flowtable->use--;
+	nf_ct_netns_put(ctx->net, ctx->family);
+}
+
+static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+	if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_flow_offload_type;
+static const struct nft_expr_ops nft_flow_offload_ops = {
+	.type		= &nft_flow_offload_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
+	.eval		= nft_flow_offload_eval,
+	.init		= nft_flow_offload_init,
+	.destroy	= nft_flow_offload_destroy,
+	.validate	= nft_flow_offload_validate,
+	.dump		= nft_flow_offload_dump,
+};
+
+static struct nft_expr_type nft_flow_offload_type __read_mostly = {
+	.name		= "flow_offload",
+	.ops		= &nft_flow_offload_ops,
+	.maxattr	= NFTA_FLOW_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static void flow_offload_iterate_cleanup(struct flow_offload *flow, void *data)
+{
+	struct net_device *dev = data;
+
+	if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
+		return;
+
+	flow_offload_dead(flow);
+}
+
+static void nft_flow_offload_iterate_cleanup(struct nf_flowtable *flowtable,
+					     void *data)
+{
+	nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
+}
+
+static int flow_offload_netdev_event(struct notifier_block *this,
+				     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (event != NETDEV_DOWN)
+		return NOTIFY_DONE;
+
+	nft_flow_table_iterate(dev_net(dev), nft_flow_offload_iterate_cleanup, dev);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block flow_offload_netdev_notifier = {
+	.notifier_call	= flow_offload_netdev_event,
+};
+
+static int __init nft_flow_offload_module_init(void)
+{
+	int err;
+
+	register_netdevice_notifier(&flow_offload_netdev_notifier);
+
+	err = nft_register_expr(&nft_flow_offload_type);
+	if (err < 0)
+		goto register_expr;
+
+	return 0;
+
+register_expr:
+	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+	return err;
+}
+
+static void __exit nft_flow_offload_module_exit(void)
+{
+	struct net *net;
+
+	nft_unregister_expr(&nft_flow_offload_type);
+	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+	rtnl_lock();
+	for_each_net(net)
+		nft_flow_table_iterate(net, nft_flow_offload_iterate_cleanup, NULL);
+	rtnl_unlock();
+}
+
+module_init(nft_flow_offload_module_init);
+module_exit(nft_flow_offload_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("flow_offload");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6f6e64423643..a27be36dc0af 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -112,7 +112,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
 		break;
 	}
 
-	err = nf_logger_find_get(ctx->afi->family, li->type);
+	err = nf_logger_find_get(ctx->family, li->type);
 	if (err < 0)
 		goto err1;
 
@@ -133,7 +133,7 @@ static void nft_log_destroy(const struct nft_ctx *ctx,
 	if (priv->prefix != nft_log_null_prefix)
 		kfree(priv->prefix);
 
-	nf_logger_put(ctx->afi->family, li->type);
+	nf_logger_put(ctx->family, li->type);
 }
 
 static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 6ac03d4266c9..9d8655bc1bea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -73,7 +73,7 @@ int nft_masq_init(const struct nft_ctx *ctx,
 		}
 	}
 
-	return nf_ct_netns_get(ctx->net, ctx->afi->family);
+	return nf_ct_netns_get(ctx->net, ctx->family);
 }
 EXPORT_SYMBOL_GPL(nft_masq_init);
 
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 5a60eb23a7ed..8fb91940e2e7 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -210,6 +210,11 @@ void nft_meta_get_eval(const struct nft_expr *expr,
 		*dest = prandom_u32_state(state);
 		break;
 	}
+#ifdef CONFIG_XFRM
+	case NFT_META_SECPATH:
+		nft_reg_store8(dest, !!skb->sp);
+		break;
+#endif
 	default:
 		WARN_ON(1);
 		goto err;
@@ -308,6 +313,11 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 		prandom_init_once(&nft_prandom_state);
 		len = sizeof(u32);
 		break;
+#ifdef CONFIG_XFRM
+	case NFT_META_SECPATH:
+		len = sizeof(u8);
+		break;
+#endif
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -318,6 +328,38 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 }
 EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
+static int nft_meta_get_validate(const struct nft_ctx *ctx,
+				 const struct nft_expr *expr,
+				 const struct nft_data **data)
+{
+#ifdef CONFIG_XFRM
+	const struct nft_meta *priv = nft_expr_priv(expr);
+	unsigned int hooks;
+
+	if (priv->key != NFT_META_SECPATH)
+		return 0;
+
+	switch (ctx->family) {
+	case NFPROTO_NETDEV:
+		hooks = 1 << NF_NETDEV_INGRESS;
+		break;
+	case NFPROTO_IPV4:
+	case NFPROTO_IPV6:
+	case NFPROTO_INET:
+		hooks = (1 << NF_INET_PRE_ROUTING) |
+			(1 << NF_INET_LOCAL_IN) |
+			(1 << NF_INET_FORWARD);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return nft_chain_validate_hooks(ctx->chain, hooks);
+#else
+	return 0;
+#endif
+}
+
 int nft_meta_set_validate(const struct nft_ctx *ctx,
 			  const struct nft_expr *expr,
 			  const struct nft_data **data)
@@ -328,7 +370,7 @@ int nft_meta_set_validate(const struct nft_ctx *ctx,
 	if (priv->key != NFT_META_PKTTYPE)
 		return 0;
 
-	switch (ctx->afi->family) {
+	switch (ctx->family) {
 	case NFPROTO_BRIDGE:
 		hooks = 1 << NF_BR_PRE_ROUTING;
 		break;
@@ -434,6 +476,7 @@ static const struct nft_expr_ops nft_meta_get_ops = {
 	.eval		= nft_meta_get_eval,
 	.init		= nft_meta_get_init,
 	.dump		= nft_meta_get_dump,
+	.validate	= nft_meta_get_validate,
 };
 
 static const struct nft_expr_ops nft_meta_set_ops = {
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index ed548d06b6dd..1f36954c2ba9 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -142,7 +142,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 		return -EINVAL;
 
 	family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
-	if (family != ctx->afi->family)
+	if (family != ctx->family)
 		return -EOPNOTSUPP;
 
 	switch (family) {
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 1e66538bf0ff..c64cbe78dee7 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -75,7 +75,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
 			return -EINVAL;
 	}
 
-	return nf_ct_netns_get(ctx->net, ctx->afi->family);
+	return nf_ct_netns_get(ctx->net, ctx->family);
 }
 EXPORT_SYMBOL_GPL(nft_redir_init);
 
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index a6b7d05aeacf..11a2071b6dd4 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -27,7 +27,7 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
 {
 	u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst);
 	const struct sk_buff *skb = pkt->skb;
-	const struct nf_afinfo *ai;
+	struct dst_entry *dst = NULL;
 	struct flowi fl;
 
 	memset(&fl, 0, sizeof(fl));
@@ -43,15 +43,10 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
 		break;
 	}
 
-	ai = nf_get_afinfo(nft_pf(pkt));
-	if (ai) {
-		struct dst_entry *dst = NULL;
-
-		ai->route(nft_net(pkt), &dst, &fl, false);
-		if (dst) {
-			mtu = min(mtu, dst_mtu(dst));
-			dst_release(dst);
-		}
+	nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt));
+	if (dst) {
+		mtu = min(mtu, dst_mtu(dst));
+		dst_release(dst);
 	}
 
 	if (mtu <= minlen || mtu > 0xffff)
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index f8166c1d5430..3f1624ee056f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -251,11 +251,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
 	if (err)
 		return;
 
-	err = rhashtable_walk_start(&hti);
-	if (err && err != -EAGAIN) {
-		iter->err = err;
-		goto out;
-	}
+	rhashtable_walk_start(&hti);
 
 	while ((he = rhashtable_walk_next(&hti))) {
 		if (IS_ERR(he)) {
@@ -306,9 +302,7 @@ static void nft_rhash_gc(struct work_struct *work)
 	if (err)
 		goto schedule;
 
-	err = rhashtable_walk_start(&hti);
-	if (err && err != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&hti);
 
 	while ((he = rhashtable_walk_next(&hti))) {
 		if (IS_ERR(he)) {
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
new file mode 100644
index 000000000000..0b660c568156
--- /dev/null
+++ b/net/netfilter/utils.c
@@ -0,0 +1,90 @@
+#include <linux/kernel.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_queue.h>
+
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+		    unsigned int dataoff, u_int8_t protocol,
+		    unsigned short family)
+{
+	const struct nf_ipv6_ops *v6ops;
+	__sum16 csum = 0;
+
+	switch (family) {
+	case AF_INET:
+		csum = nf_ip_checksum(skb, hook, dataoff, protocol);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			csum = v6ops->checksum(skb, hook, dataoff, protocol);
+		break;
+	}
+
+	return csum;
+}
+EXPORT_SYMBOL_GPL(nf_checksum);
+
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			    unsigned int dataoff, unsigned int len,
+			    u_int8_t protocol, unsigned short family)
+{
+	const struct nf_ipv6_ops *v6ops;
+	__sum16 csum = 0;
+
+	switch (family) {
+	case AF_INET:
+		csum = nf_ip_checksum_partial(skb, hook, dataoff, len,
+					      protocol);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			csum = v6ops->checksum_partial(skb, hook, dataoff, len,
+						       protocol);
+		break;
+	}
+
+	return csum;
+}
+EXPORT_SYMBOL_GPL(nf_checksum_partial);
+
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+	     bool strict, unsigned short family)
+{
+	const struct nf_ipv6_ops *v6ops;
+	int ret = 0;
+
+	switch (family) {
+	case AF_INET:
+		ret = nf_ip_route(net, dst, fl, strict);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			ret = v6ops->route(net, dst, fl, strict);
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_route);
+
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+	const struct nf_ipv6_ops *v6ops;
+	int ret = 0;
+
+	switch (entry->state.pf) {
+	case AF_INET:
+		ret = nf_ip_reroute(skb, entry);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			ret = v6ops->reroute(skb, entry);
+		break;
+	}
+	return ret;
+}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 55802e97f906..0b56bf05c169 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1027,7 +1027,7 @@ void xt_free_table_info(struct xt_table_info *info)
 }
 EXPORT_SYMBOL(xt_free_table_info);
 
-/* Find table by name, grabs mutex & ref.  Returns NULL on error. */
+/* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 				    const char *name)
 {
@@ -1043,17 +1043,17 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 
 	/* Table doesn't exist in this netns, re-try init */
 	list_for_each_entry(t, &init_net.xt.tables[af], list) {
+		int err;
+
 		if (strcmp(t->name, name))
 			continue;
-		if (!try_module_get(t->me)) {
-			mutex_unlock(&xt[af].mutex);
-			return NULL;
-		}
-
+		if (!try_module_get(t->me))
+			goto out;
 		mutex_unlock(&xt[af].mutex);
-		if (t->table_init(net) != 0) {
+		err = t->table_init(net);
+		if (err < 0) {
 			module_put(t->me);
-			return NULL;
+			return ERR_PTR(err);
 		}
 
 		found = t;
@@ -1073,10 +1073,28 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 	module_put(found->me);
  out:
 	mutex_unlock(&xt[af].mutex);
-	return NULL;
+	return ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL_GPL(xt_find_table_lock);
 
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+					    const char *name)
+{
+	struct xt_table *t = xt_find_table_lock(net, af, name);
+
+#ifdef CONFIG_MODULES
+	if (IS_ERR(t)) {
+		int err = request_module("%stable_%s", xt_prefix[af], name);
+		if (err < 0)
+			return ERR_PTR(err);
+		t = xt_find_table_lock(net, af, name);
+	}
+#endif
+
+	return t;
+}
+EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
+
 void xt_table_unlock(struct xt_table *table)
 {
 	mutex_unlock(&xt[table->af].mutex);
@@ -1344,7 +1362,6 @@ static int xt_table_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations xt_table_ops = {
-	.owner	 = THIS_MODULE,
 	.open	 = xt_table_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -1397,7 +1414,7 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
 		trav->curr = trav->curr->next;
 		if (trav->curr != trav->head)
 			break;
-		/* fallthru, _stop will unlock */
+		/* fall through */
 	default:
 		return NULL;
 	}
@@ -1480,7 +1497,6 @@ static int xt_match_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations xt_match_ops = {
-	.owner	 = THIS_MODULE,
 	.open	 = xt_match_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -1533,7 +1549,6 @@ static int xt_target_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations xt_target_ops = {
-	.owner	 = THIS_MODULE,
 	.open	 = xt_target_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 9dae4d665965..99bb8e410f22 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -48,7 +48,6 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net,
 				    unsigned int family)
 {
 	struct flowi fl;
-	const struct nf_afinfo *ai;
 	struct rtable *rt = NULL;
 	u_int32_t mtu     = ~0U;
 
@@ -62,10 +61,8 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net,
 		memset(fl6, 0, sizeof(*fl6));
 		fl6->daddr = ipv6_hdr(skb)->saddr;
 	}
-	ai = nf_get_afinfo(family);
-	if (ai != NULL)
-		ai->route(net, (struct dst_entry **)&rt, &fl, false);
 
+	nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
 	if (rt != NULL) {
 		mtu = dst_mtu(&rt->dst);
 		dst_release(&rt->dst);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 3b2be2ae6987..911a7c0da504 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -36,7 +36,7 @@ MODULE_ALIAS("ip6t_addrtype");
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 			    const struct in6_addr *addr, u16 mask)
 {
-	const struct nf_afinfo *afinfo;
+	const struct nf_ipv6_ops *v6ops;
 	struct flowi6 flow;
 	struct rt6_info *rt;
 	u32 ret = 0;
@@ -47,17 +47,14 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 	if (dev)
 		flow.flowi6_oif = dev->ifindex;
 
-	afinfo = nf_get_afinfo(NFPROTO_IPV6);
-	if (afinfo != NULL) {
-		const struct nf_ipv6_ops *v6ops;
-
+	v6ops = nf_get_ipv6_ops();
+	if (v6ops) {
 		if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
-			v6ops = nf_get_ipv6_ops();
-			if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+			if (v6ops->chk_addr(net, addr, dev, true))
 				ret = XT_ADDRTYPE_LOCAL;
 		}
-		route_err = afinfo->route(net, (struct dst_entry **)&rt,
-					  flowi6_to_flowi(&flow), false);
+		route_err = v6ops->route(net, (struct dst_entry **)&rt,
+					 flowi6_to_flowi(&flow), false);
 	} else {
 		route_err = 1;
 	}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index a6214f235333..b1b17b9353e1 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -12,292 +12,30 @@
  * GPL (C) 1999  Rusty Russell (rusty@rustcorp.com.au).
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
+
 #include <linux/module.h>
-#include <linux/random.h>
 #include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/netfilter/nf_conntrack_tcp.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_connlimit.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_zones.h>
-
-#define CONNLIMIT_SLOTS		256U
-
-#ifdef CONFIG_LOCKDEP
-#define CONNLIMIT_LOCK_SLOTS	8U
-#else
-#define CONNLIMIT_LOCK_SLOTS	256U
-#endif
-
-#define CONNLIMIT_GC_MAX_NODES	8
-
-/* we will save the tuples of all connections we care about */
-struct xt_connlimit_conn {
-	struct hlist_node		node;
-	struct nf_conntrack_tuple	tuple;
-};
-
-struct xt_connlimit_rb {
-	struct rb_node node;
-	struct hlist_head hhead; /* connections/hosts in same subnet */
-	union nf_inet_addr addr; /* search key */
-};
-
-static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp;
-
-struct xt_connlimit_data {
-	struct rb_root climit_root[CONNLIMIT_SLOTS];
-};
-
-static u_int32_t connlimit_rnd __read_mostly;
-static struct kmem_cache *connlimit_rb_cachep __read_mostly;
-static struct kmem_cache *connlimit_conn_cachep __read_mostly;
-
-static inline unsigned int connlimit_iphash(__be32 addr)
-{
-	return jhash_1word((__force __u32)addr,
-			    connlimit_rnd) % CONNLIMIT_SLOTS;
-}
-
-static inline unsigned int
-connlimit_iphash6(const union nf_inet_addr *addr)
-{
-	return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6),
-		       connlimit_rnd) % CONNLIMIT_SLOTS;
-}
-
-static inline bool already_closed(const struct nf_conn *conn)
-{
-	if (nf_ct_protonum(conn) == IPPROTO_TCP)
-		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
-		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
-	else
-		return 0;
-}
-
-static int
-same_source(const union nf_inet_addr *addr,
-	    const union nf_inet_addr *u3, u_int8_t family)
-{
-	if (family == NFPROTO_IPV4)
-		return ntohl(addr->ip) - ntohl(u3->ip);
-
-	return memcmp(addr->ip6, u3->ip6, sizeof(addr->ip6));
-}
-
-static bool add_hlist(struct hlist_head *head,
-		      const struct nf_conntrack_tuple *tuple,
-		      const union nf_inet_addr *addr)
-{
-	struct xt_connlimit_conn *conn;
-
-	conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
-	if (conn == NULL)
-		return false;
-	conn->tuple = *tuple;
-	hlist_add_head(&conn->node, head);
-	return true;
-}
-
-static unsigned int check_hlist(struct net *net,
-				struct hlist_head *head,
-				const struct nf_conntrack_tuple *tuple,
-				const struct nf_conntrack_zone *zone,
-				bool *addit)
-{
-	const struct nf_conntrack_tuple_hash *found;
-	struct xt_connlimit_conn *conn;
-	struct hlist_node *n;
-	struct nf_conn *found_ct;
-	unsigned int length = 0;
-
-	*addit = true;
-
-	/* check the saved connections */
-	hlist_for_each_entry_safe(conn, n, head, node) {
-		found = nf_conntrack_find_get(net, zone, &conn->tuple);
-		if (found == NULL) {
-			hlist_del(&conn->node);
-			kmem_cache_free(connlimit_conn_cachep, conn);
-			continue;
-		}
-
-		found_ct = nf_ct_tuplehash_to_ctrack(found);
-
-		if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
-			/*
-			 * Just to be sure we have it only once in the list.
-			 * We should not see tuples twice unless someone hooks
-			 * this into a table without "-p tcp --syn".
-			 */
-			*addit = false;
-		} else if (already_closed(found_ct)) {
-			/*
-			 * we do not care about connections which are
-			 * closed already -> ditch it
-			 */
-			nf_ct_put(found_ct);
-			hlist_del(&conn->node);
-			kmem_cache_free(connlimit_conn_cachep, conn);
-			continue;
-		}
-
-		nf_ct_put(found_ct);
-		length++;
-	}
-
-	return length;
-}
-
-static void tree_nodes_free(struct rb_root *root,
-			    struct xt_connlimit_rb *gc_nodes[],
-			    unsigned int gc_count)
-{
-	struct xt_connlimit_rb *rbconn;
-
-	while (gc_count) {
-		rbconn = gc_nodes[--gc_count];
-		rb_erase(&rbconn->node, root);
-		kmem_cache_free(connlimit_rb_cachep, rbconn);
-	}
-}
-
-static unsigned int
-count_tree(struct net *net, struct rb_root *root,
-	   const struct nf_conntrack_tuple *tuple,
-	   const union nf_inet_addr *addr,
-	   u8 family, const struct nf_conntrack_zone *zone)
-{
-	struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
-	struct rb_node **rbnode, *parent;
-	struct xt_connlimit_rb *rbconn;
-	struct xt_connlimit_conn *conn;
-	unsigned int gc_count;
-	bool no_gc = false;
-
- restart:
-	gc_count = 0;
-	parent = NULL;
-	rbnode = &(root->rb_node);
-	while (*rbnode) {
-		int diff;
-		bool addit;
-
-		rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
-
-		parent = *rbnode;
-		diff = same_source(addr, &rbconn->addr, family);
-		if (diff < 0) {
-			rbnode = &((*rbnode)->rb_left);
-		} else if (diff > 0) {
-			rbnode = &((*rbnode)->rb_right);
-		} else {
-			/* same source network -> be counted! */
-			unsigned int count;
-			count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
-
-			tree_nodes_free(root, gc_nodes, gc_count);
-			if (!addit)
-				return count;
-
-			if (!add_hlist(&rbconn->hhead, tuple, addr))
-				return 0; /* hotdrop */
-
-			return count + 1;
-		}
-
-		if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
-			continue;
-
-		/* only used for GC on hhead, retval and 'addit' ignored */
-		check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
-		if (hlist_empty(&rbconn->hhead))
-			gc_nodes[gc_count++] = rbconn;
-	}
-
-	if (gc_count) {
-		no_gc = true;
-		tree_nodes_free(root, gc_nodes, gc_count);
-		/* tree_node_free before new allocation permits
-		 * allocator to re-use newly free'd object.
-		 *
-		 * This is a rare event; in most cases we will find
-		 * existing node to re-use. (or gc_count is 0).
-		 */
-		goto restart;
-	}
-
-	/* no match, need to insert new node */
-	rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC);
-	if (rbconn == NULL)
-		return 0;
-
-	conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
-	if (conn == NULL) {
-		kmem_cache_free(connlimit_rb_cachep, rbconn);
-		return 0;
-	}
-
-	conn->tuple = *tuple;
-	rbconn->addr = *addr;
-
-	INIT_HLIST_HEAD(&rbconn->hhead);
-	hlist_add_head(&conn->node, &rbconn->hhead);
-
-	rb_link_node(&rbconn->node, parent, rbnode);
-	rb_insert_color(&rbconn->node, root);
-	return 1;
-}
-
-static int count_them(struct net *net,
-		      struct xt_connlimit_data *data,
-		      const struct nf_conntrack_tuple *tuple,
-		      const union nf_inet_addr *addr,
-		      u_int8_t family,
-		      const struct nf_conntrack_zone *zone)
-{
-	struct rb_root *root;
-	int count;
-	u32 hash;
-
-	if (family == NFPROTO_IPV6)
-		hash = connlimit_iphash6(addr);
-	else
-		hash = connlimit_iphash(addr->ip);
-	root = &data->climit_root[hash];
-
-	spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
-
-	count = count_tree(net, root, tuple, addr, family, zone);
-
-	spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
-
-	return count;
-}
+#include <net/netfilter/nf_conntrack_count.h>
 
 static bool
 connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	struct net *net = xt_net(par);
 	const struct xt_connlimit_info *info = par->matchinfo;
-	union nf_inet_addr addr;
 	struct nf_conntrack_tuple tuple;
 	const struct nf_conntrack_tuple *tuple_ptr = &tuple;
 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
 	enum ip_conntrack_info ctinfo;
 	const struct nf_conn *ct;
 	unsigned int connections;
+	u32 key[5];
 
 	ct = nf_ct_get(skb, &ctinfo);
 	if (ct != NULL) {
@@ -310,6 +48,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
 	if (xt_family(par) == NFPROTO_IPV6) {
 		const struct ipv6hdr *iph = ipv6_hdr(skb);
+		union nf_inet_addr addr;
 		unsigned int i;
 
 		memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
@@ -317,22 +56,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
 		for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i)
 			addr.ip6[i] &= info->mask.ip6[i];
+		memcpy(key, &addr, sizeof(addr.ip6));
+		key[4] = zone->id;
 	} else {
 		const struct iphdr *iph = ip_hdr(skb);
-		addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+		key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
 			  iph->daddr : iph->saddr;
 
-		addr.ip &= info->mask.ip;
+		key[0] &= info->mask.ip;
+		key[1] = zone->id;
 	}
 
-	connections = count_them(net, info->data, tuple_ptr, &addr,
-				 xt_family(par), zone);
+	connections = nf_conncount_count(net, info->data, key,
+					 xt_family(par), tuple_ptr, zone);
 	if (connections == 0)
 		/* kmalloc failed, drop it entirely */
 		goto hotdrop;
 
-	return (connections > info->limit) ^
-	       !!(info->flags & XT_CONNLIMIT_INVERT);
+	return (connections > info->limit) ^ !!(info->flags & XT_CONNLIMIT_INVERT);
 
  hotdrop:
 	par->hotdrop = true;
@@ -342,61 +83,27 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 static int connlimit_mt_check(const struct xt_mtchk_param *par)
 {
 	struct xt_connlimit_info *info = par->matchinfo;
-	unsigned int i;
-	int ret;
+	unsigned int keylen;
 
-	net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd));
-
-	ret = nf_ct_netns_get(par->net, par->family);
-	if (ret < 0) {
-		pr_info("cannot load conntrack support for "
-			"address family %u\n", par->family);
-		return ret;
-	}
+	keylen = sizeof(u32);
+	if (par->family == NFPROTO_IPV6)
+		keylen += sizeof(struct in6_addr);
+	else
+		keylen += sizeof(struct in_addr);
 
 	/* init private data */
-	info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
-	if (info->data == NULL) {
-		nf_ct_netns_put(par->net, par->family);
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
-		info->data->climit_root[i] = RB_ROOT;
+	info->data = nf_conncount_init(par->net, par->family, keylen);
+	if (IS_ERR(info->data))
+		return PTR_ERR(info->data);
 
 	return 0;
 }
 
-static void destroy_tree(struct rb_root *r)
-{
-	struct xt_connlimit_conn *conn;
-	struct xt_connlimit_rb *rbconn;
-	struct hlist_node *n;
-	struct rb_node *node;
-
-	while ((node = rb_first(r)) != NULL) {
-		rbconn = rb_entry(node, struct xt_connlimit_rb, node);
-
-		rb_erase(node, r);
-
-		hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
-			kmem_cache_free(connlimit_conn_cachep, conn);
-
-		kmem_cache_free(connlimit_rb_cachep, rbconn);
-	}
-}
-
 static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
 {
 	const struct xt_connlimit_info *info = par->matchinfo;
-	unsigned int i;
-
-	nf_ct_netns_put(par->net, par->family);
-
-	for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
-		destroy_tree(&info->data->climit_root[i]);
 
-	kfree(info->data);
+	nf_conncount_destroy(par->net, par->family, info->data);
 }
 
 static struct xt_match connlimit_mt_reg __read_mostly = {
@@ -413,40 +120,12 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
 
 static int __init connlimit_mt_init(void)
 {
-	int ret, i;
-
-	BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
-	BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
-
-	for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i)
-		spin_lock_init(&xt_connlimit_locks[i]);
-
-	connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
-					   sizeof(struct xt_connlimit_conn),
-					   0, 0, NULL);
-	if (!connlimit_conn_cachep)
-		return -ENOMEM;
-
-	connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb",
-					   sizeof(struct xt_connlimit_rb),
-					   0, 0, NULL);
-	if (!connlimit_rb_cachep) {
-		kmem_cache_destroy(connlimit_conn_cachep);
-		return -ENOMEM;
-	}
-	ret = xt_register_match(&connlimit_mt_reg);
-	if (ret != 0) {
-		kmem_cache_destroy(connlimit_conn_cachep);
-		kmem_cache_destroy(connlimit_rb_cachep);
-	}
-	return ret;
+	return xt_register_match(&connlimit_mt_reg);
 }
 
 static void __exit connlimit_mt_exit(void)
 {
 	xt_unregister_match(&connlimit_mt_reg);
-	kmem_cache_destroy(connlimit_conn_cachep);
-	kmem_cache_destroy(connlimit_rb_cachep);
 }
 
 module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 5da8746f7b88..ca6847403ca2 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -353,7 +353,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
 static bool select_all(const struct xt_hashlimit_htable *ht,
 		       const struct dsthash_ent *he)
 {
-	return 1;
+	return true;
 }
 
 static bool select_gc(const struct xt_hashlimit_htable *ht,
@@ -1266,7 +1266,6 @@ static int dl_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dl_file_ops_v2 = {
-	.owner   = THIS_MODULE,
 	.open    = dl_proc_open_v2,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1274,7 +1273,6 @@ static const struct file_operations dl_file_ops_v2 = {
 };
 
 static const struct file_operations dl_file_ops_v1 = {
-	.owner   = THIS_MODULE,
 	.open    = dl_proc_open_v1,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1282,7 +1280,6 @@ static const struct file_operations dl_file_ops_v1 = {
 };
 
 static const struct file_operations dl_file_ops = {
-	.owner   = THIS_MODULE,
 	.open    = dl_proc_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 000e70377f85..7ca64a50db04 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -58,7 +58,7 @@ static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
 		 */
 		pr_debug("Dropping evil IPComp tinygram.\n");
 		par->hotdrop = true;
-		return 0;
+		return false;
 	}
 
 	return spi_match(compinfo->spis[0], compinfo->spis[1],
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 2b4ab189bba7..5639fb03bdd9 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -93,7 +93,8 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
 	if (dst->xfrm == NULL)
 		return -1;
 
-	for (i = 0; dst && dst->xfrm; dst = dst->child, i++) {
+	for (i = 0; dst && dst->xfrm;
+	     dst = ((struct xfrm_dst *)dst)->child, i++) {
 		pos = strict ? i : 0;
 		if (pos >= info->len)
 			return 0;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 64285702afd5..16b6b11ee83f 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -39,13 +39,17 @@ match_set(ip_set_id_t index, const struct sk_buff *skb,
 	return inv;
 }
 
-#define ADT_OPT(n, f, d, fs, cfs, t)	\
-struct ip_set_adt_opt n = {		\
-	.family	= f,			\
-	.dim = d,			\
-	.flags = fs,			\
-	.cmdflags = cfs,		\
-	.ext.timeout = t,		\
+#define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo)	\
+struct ip_set_adt_opt n = {				\
+	.family	= f,					\
+	.dim = d,					\
+	.flags = fs,					\
+	.cmdflags = cfs,				\
+	.ext.timeout = t,				\
+	.ext.packets = p,				\
+	.ext.bytes = b,					\
+	.ext.packets_op = po,				\
+	.ext.bytes_op = bo,				\
 }
 
 /* Revision 0 interface: backward compatible with netfilter/iptables */
@@ -56,7 +60,8 @@ set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
 	const struct xt_set_info_match_v0 *info = par->matchinfo;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.u.compat.dim,
-		info->match_set.u.compat.flags, 0, UINT_MAX);
+		info->match_set.u.compat.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	return match_set(info->match_set.index, skb, par, &opt,
 			 info->match_set.u.compat.flags & IPSET_INV_MATCH);
@@ -119,7 +124,8 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
 	const struct xt_set_info_match_v1 *info = par->matchinfo;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.dim,
-		info->match_set.flags, 0, UINT_MAX);
+		info->match_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	if (opt.flags & IPSET_RETURN_NOMATCH)
 		opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
@@ -161,45 +167,21 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par)
 /* Revision 3 match */
 
 static bool
-match_counter0(u64 counter, const struct ip_set_counter_match0 *info)
-{
-	switch (info->op) {
-	case IPSET_COUNTER_NONE:
-		return true;
-	case IPSET_COUNTER_EQ:
-		return counter == info->value;
-	case IPSET_COUNTER_NE:
-		return counter != info->value;
-	case IPSET_COUNTER_LT:
-		return counter < info->value;
-	case IPSET_COUNTER_GT:
-		return counter > info->value;
-	}
-	return false;
-}
-
-static bool
 set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v3 *info = par->matchinfo;
-	int ret;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.dim,
-		info->match_set.flags, info->flags, UINT_MAX);
+		info->match_set.flags, info->flags, UINT_MAX,
+		info->packets.value, info->bytes.value,
+		info->packets.op, info->bytes.op);
 
 	if (info->packets.op != IPSET_COUNTER_NONE ||
 	    info->bytes.op != IPSET_COUNTER_NONE)
 		opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
 
-	ret = match_set(info->match_set.index, skb, par, &opt,
-			info->match_set.flags & IPSET_INV_MATCH);
-
-	if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
-		return ret;
-
-	if (!match_counter0(opt.ext.packets, &info->packets))
-		return false;
-	return match_counter0(opt.ext.bytes, &info->bytes);
+	return match_set(info->match_set.index, skb, par, &opt,
+			 info->match_set.flags & IPSET_INV_MATCH);
 }
 
 #define set_match_v3_checkentry	set_match_v1_checkentry
@@ -208,45 +190,21 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
 /* Revision 4 match */
 
 static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
-{
-	switch (info->op) {
-	case IPSET_COUNTER_NONE:
-		return true;
-	case IPSET_COUNTER_EQ:
-		return counter == info->value;
-	case IPSET_COUNTER_NE:
-		return counter != info->value;
-	case IPSET_COUNTER_LT:
-		return counter < info->value;
-	case IPSET_COUNTER_GT:
-		return counter > info->value;
-	}
-	return false;
-}
-
-static bool
 set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v4 *info = par->matchinfo;
-	int ret;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.dim,
-		info->match_set.flags, info->flags, UINT_MAX);
+		info->match_set.flags, info->flags, UINT_MAX,
+		info->packets.value, info->bytes.value,
+		info->packets.op, info->bytes.op);
 
 	if (info->packets.op != IPSET_COUNTER_NONE ||
 	    info->bytes.op != IPSET_COUNTER_NONE)
 		opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
 
-	ret = match_set(info->match_set.index, skb, par, &opt,
-			info->match_set.flags & IPSET_INV_MATCH);
-
-	if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
-		return ret;
-
-	if (!match_counter(opt.ext.packets, &info->packets))
-		return false;
-	return match_counter(opt.ext.bytes, &info->bytes);
+	return match_set(info->match_set.index, skb, par, &opt,
+			 info->match_set.flags & IPSET_INV_MATCH);
 }
 
 #define set_match_v4_checkentry	set_match_v1_checkentry
@@ -260,9 +218,11 @@ set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_set_info_target_v0 *info = par->targinfo;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.u.compat.dim,
-		info->add_set.u.compat.flags, 0, UINT_MAX);
+		info->add_set.u.compat.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.u.compat.dim,
-		info->del_set.u.compat.flags, 0, UINT_MAX);
+		info->del_set.u.compat.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
@@ -333,9 +293,11 @@ set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_set_info_target_v1 *info = par->targinfo;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
-		info->add_set.flags, 0, UINT_MAX);
+		info->add_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
+		info->del_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
@@ -402,9 +364,11 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_set_info_target_v2 *info = par->targinfo;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
-		info->add_set.flags, info->flags, info->timeout);
+		info->add_set.flags, info->flags, info->timeout,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
+		info->del_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	/* Normalize to fit into jiffies */
 	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
@@ -432,11 +396,14 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
 	int ret;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
-		info->add_set.flags, info->flags, info->timeout);
+		info->add_set.flags, info->flags, info->timeout,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
+		info->del_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 	ADT_OPT(map_opt, xt_family(par), info->map_set.dim,
-		info->map_set.flags, 0, UINT_MAX);
+		info->map_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	/* Normalize to fit into jiffies */
 	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 84a4e4c3be4b..2ad445c1d27c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -65,6 +65,7 @@
 #include <linux/net_namespace.h>
 
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include <net/sock.h>
 #include <net/scm.h>
 #include <net/netlink.h>
@@ -145,8 +146,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 static BLOCKING_NOTIFIER_HEAD(netlink_chain);
 
-static DEFINE_SPINLOCK(netlink_tap_lock);
-static struct list_head netlink_tap_all __read_mostly;
 
 static const struct rhashtable_params netlink_rhashtable_params;
 
@@ -173,14 +172,24 @@ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
 	return new;
 }
 
+static unsigned int netlink_tap_net_id;
+
+struct netlink_tap_net {
+	struct list_head netlink_tap_all;
+	struct mutex netlink_tap_lock;
+};
+
 int netlink_add_tap(struct netlink_tap *nt)
 {
+	struct net *net = dev_net(nt->dev);
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
 		return -EINVAL;
 
-	spin_lock(&netlink_tap_lock);
-	list_add_rcu(&nt->list, &netlink_tap_all);
-	spin_unlock(&netlink_tap_lock);
+	mutex_lock(&nn->netlink_tap_lock);
+	list_add_rcu(&nt->list, &nn->netlink_tap_all);
+	mutex_unlock(&nn->netlink_tap_lock);
 
 	__module_get(nt->module);
 
@@ -190,12 +199,14 @@ EXPORT_SYMBOL_GPL(netlink_add_tap);
 
 static int __netlink_remove_tap(struct netlink_tap *nt)
 {
+	struct net *net = dev_net(nt->dev);
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
 	bool found = false;
 	struct netlink_tap *tmp;
 
-	spin_lock(&netlink_tap_lock);
+	mutex_lock(&nn->netlink_tap_lock);
 
-	list_for_each_entry(tmp, &netlink_tap_all, list) {
+	list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
 		if (nt == tmp) {
 			list_del_rcu(&nt->list);
 			found = true;
@@ -205,7 +216,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
 
 	pr_warn("__netlink_remove_tap: %p not found\n", nt);
 out:
-	spin_unlock(&netlink_tap_lock);
+	mutex_unlock(&nn->netlink_tap_lock);
 
 	if (found)
 		module_put(nt->module);
@@ -224,6 +235,26 @@ int netlink_remove_tap(struct netlink_tap *nt)
 }
 EXPORT_SYMBOL_GPL(netlink_remove_tap);
 
+static __net_init int netlink_tap_init_net(struct net *net)
+{
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
+	INIT_LIST_HEAD(&nn->netlink_tap_all);
+	mutex_init(&nn->netlink_tap_lock);
+	return 0;
+}
+
+static void __net_exit netlink_tap_exit_net(struct net *net)
+{
+}
+
+static struct pernet_operations netlink_tap_net_ops = {
+	.init = netlink_tap_init_net,
+	.exit = netlink_tap_exit_net,
+	.id   = &netlink_tap_net_id,
+	.size = sizeof(struct netlink_tap_net),
+};
+
 static bool netlink_filter_tap(const struct sk_buff *skb)
 {
 	struct sock *sk = skb->sk;
@@ -277,7 +308,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
 	return ret;
 }
 
-static void __netlink_deliver_tap(struct sk_buff *skb)
+static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
 {
 	int ret;
 	struct netlink_tap *tmp;
@@ -285,19 +316,21 @@ static void __netlink_deliver_tap(struct sk_buff *skb)
 	if (!netlink_filter_tap(skb))
 		return;
 
-	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
+	list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
 		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
 		if (unlikely(ret))
 			break;
 	}
 }
 
-static void netlink_deliver_tap(struct sk_buff *skb)
+static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
 {
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
 	rcu_read_lock();
 
-	if (unlikely(!list_empty(&netlink_tap_all)))
-		__netlink_deliver_tap(skb);
+	if (unlikely(!list_empty(&nn->netlink_tap_all)))
+		__netlink_deliver_tap(skb, nn);
 
 	rcu_read_unlock();
 }
@@ -306,7 +339,7 @@ static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
 				       struct sk_buff *skb)
 {
 	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
-		netlink_deliver_tap(skb);
+		netlink_deliver_tap(sock_net(dst), skb);
 }
 
 static void netlink_overrun(struct sock *sk)
@@ -1216,7 +1249,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
 {
 	int len = skb->len;
 
-	netlink_deliver_tap(skb);
+	netlink_deliver_tap(sock_net(sk), skb);
 
 	skb_queue_tail(&sk->sk_receive_queue, skb);
 	sk->sk_data_ready(sk);
@@ -2482,8 +2515,9 @@ static int netlink_walk_start(struct nl_seq_iter *iter)
 		return err;
 	}
 
-	err = rhashtable_walk_start(&iter->hti);
-	return err == -EAGAIN ? 0 : err;
+	rhashtable_walk_start(&iter->hti);
+
+	return 0;
 }
 
 static void netlink_walk_stop(struct nl_seq_iter *iter)
@@ -2604,7 +2638,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations netlink_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= netlink_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -2734,12 +2767,11 @@ static int __init netlink_proto_init(void)
 		}
 	}
 
-	INIT_LIST_HEAD(&netlink_tap_all);
-
 	netlink_add_usersock_entry();
 
 	sock_register(&netlink_family_ops);
 	register_pernet_subsys(&netlink_net_ops);
+	register_pernet_subsys(&netlink_tap_net_ops);
 	/* The netlink device handler may be needed early. */
 	rtnetlink_init();
 out:
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 8faa20b4d457..7dda33b9b784 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -115,11 +115,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 	if (!s_num)
 		rhashtable_walk_enter(&tbl->hash, hti);
 
-	ret = rhashtable_walk_start(hti);
-	if (ret == -EAGAIN)
-		ret = 0;
-	if (ret)
-		goto stop;
+	rhashtable_walk_start(hti);
 
 	while ((nlsk = rhashtable_walk_next(hti))) {
 		if (IS_ERR(nlsk)) {
@@ -146,8 +142,8 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 		}
 	}
 
-stop:
 	rhashtable_walk_stop(hti);
+
 	if (ret)
 		goto done;
 
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7ed9d4422a73..9ba30c63be3d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1344,7 +1344,6 @@ static int nr_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations nr_info_fops = {
-	.owner = THIS_MODULE,
 	.open = nr_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 75e6ba970fde..b5a7dcb30991 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -901,7 +901,6 @@ static int nr_node_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations nr_nodes_fops = {
-	.owner = THIS_MODULE,
 	.open = nr_node_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -968,7 +967,6 @@ static int nr_neigh_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations nr_neigh_fops = {
-	.owner = THIS_MODULE,
 	.open = nr_neigh_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index b27c5c6d9cab..62f36cc938ca 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1266,14 +1266,14 @@ static int parse_nat(const struct nlattr *attr,
 		/* Do not allow flags if no type is given. */
 		if (info->range.flags) {
 			OVS_NLERR(log,
-				  "NAT flags may be given only when NAT range (SRC or DST) is also specified.\n"
+				  "NAT flags may be given only when NAT range (SRC or DST) is also specified."
 				  );
 			return -EINVAL;
 		}
 		info->nat = OVS_CT_NAT;   /* NAT existing connections. */
 	} else if (!info->commit) {
 		OVS_NLERR(log,
-			  "NAT attributes may be specified only when CT COMMIT flag is also specified.\n"
+			  "NAT attributes may be specified only when CT COMMIT flag is also specified."
 			  );
 		return -EINVAL;
 	}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index f039064ce922..56b8e7167790 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -56,12 +56,12 @@
 
 u64 ovs_flow_used_time(unsigned long flow_jiffies)
 {
-	struct timespec cur_ts;
+	struct timespec64 cur_ts;
 	u64 cur_ms, idle_ms;
 
-	ktime_get_ts(&cur_ts);
+	ktime_get_ts64(&cur_ts);
 	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
-	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+	cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
 		 cur_ts.tv_nsec / NSEC_PER_MSEC;
 
 	return cur_ms - idle_ms;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index f143908b651d..7322aa1e382e 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -49,6 +49,7 @@
 #include <net/mpls.h>
 #include <net/vxlan.h>
 #include <net/tun_proto.h>
+#include <net/erspan.h>
 
 #include "flow_netlink.h"
 
@@ -329,7 +330,8 @@ size_t ovs_tun_key_attr_size(void)
 		+ nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_CSUM */
 		+ nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_OAM */
 		+ nla_total_size(256)  /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
-		/* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
+		/* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS and
+		 * OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS is mutually exclusive with
 		 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
 		 */
 		+ nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
@@ -400,6 +402,7 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
 						.next = ovs_vxlan_ext_key_lens },
 	[OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
 	[OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
+	[OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS]   = { .len = OVS_ATTR_VARIABLE },
 };
 
 static const struct ovs_len_tbl
@@ -631,6 +634,33 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
 	return 0;
 }
 
+static int erspan_tun_opt_from_nlattr(const struct nlattr *a,
+				      struct sw_flow_match *match, bool is_mask,
+				      bool log)
+{
+	unsigned long opt_key_offset;
+
+	BUILD_BUG_ON(sizeof(struct erspan_metadata) >
+		     sizeof(match->key->tun_opts));
+
+	if (nla_len(a) > sizeof(match->key->tun_opts)) {
+		OVS_NLERR(log, "ERSPAN option length err (len %d, max %zu).",
+			  nla_len(a), sizeof(match->key->tun_opts));
+		return -EINVAL;
+	}
+
+	if (!is_mask)
+		SW_FLOW_KEY_PUT(match, tun_opts_len,
+				sizeof(struct erspan_metadata), false);
+	else
+		SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
+
+	opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
+	SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
+				  nla_len(a), is_mask);
+	return 0;
+}
+
 static int ip_tun_from_nlattr(const struct nlattr *attr,
 			      struct sw_flow_match *match, bool is_mask,
 			      bool log)
@@ -738,6 +768,20 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
 			break;
 		case OVS_TUNNEL_KEY_ATTR_PAD:
 			break;
+		case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
+			if (opts_type) {
+				OVS_NLERR(log, "Multiple metadata blocks provided");
+				return -EINVAL;
+			}
+
+			err = erspan_tun_opt_from_nlattr(a, match, is_mask,
+							 log);
+			if (err)
+				return err;
+
+			tun_flags |= TUNNEL_ERSPAN_OPT;
+			opts_type = type;
+			break;
 		default:
 			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
 				  type);
@@ -862,6 +906,10 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
 		else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
 			 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
 			return -EMSGSIZE;
+		else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
+			 nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
+				 swkey_tun_opts_len, tun_opts))
+			return -EMSGSIZE;
 	}
 
 	return 0;
@@ -2454,7 +2502,7 @@ static int validate_geneve_opts(struct sw_flow_key *key)
 
 		option = (struct geneve_opt *)((u8 *)option + len);
 		opts_len -= len;
-	};
+	}
 
 	key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
 
@@ -2486,8 +2534,10 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
 			break;
 		case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
 			break;
+		case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
+			break;
 		}
-	};
+	}
 
 	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
 	if (start < 0)
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 3fbfc78991ac..04b94281a30b 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -488,7 +488,7 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
 		long long int max_bucket_size;
 
 		band = &meter->bands[i];
-		max_bucket_size = (band->burst_size + band->rate) * 1000;
+		max_bucket_size = (band->burst_size + band->rate) * 1000LL;
 
 		band->bucket += delta_ms * band->rate;
 		if (band->bucket > max_bucket_size)
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 04a3128adcf0..bb95c43aae76 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -16,7 +16,6 @@
  * 02110-1301, USA
  */
 
-#include <linux/hardirq.h>
 #include <linux/if_vlan.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
@@ -126,18 +125,12 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 	}
 }
 
-static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
-{
-	dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
-}
-
 static const struct net_device_ops internal_dev_netdev_ops = {
 	.ndo_open = internal_dev_open,
 	.ndo_stop = internal_dev_stop,
 	.ndo_start_xmit = internal_dev_xmit,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_get_stats64 = internal_get_stats,
-	.ndo_set_rx_headroom = internal_set_rx_headroom,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -154,7 +147,7 @@ static void do_setup(struct net_device *netdev)
 
 	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
-			      IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
+			      IFF_NO_QUEUE;
 	netdev->needs_free_netdev = true;
 	netdev->priv_destructor = internal_dev_destructor;
 	netdev->ethtool_ops = &internal_dev_ethtool_ops;
@@ -195,7 +188,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 		err = -ENOMEM;
 		goto error_free_netdev;
 	}
-	vport->dev->needed_headroom = vport->dp->max_headroom;
 
 	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
 	internal_dev = internal_dev_priv(vport->dev);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3b4d6a3cf190..1d1483007e46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
 	struct sk_buff *orig_skb = skb;
 	struct netdev_queue *txq;
 	int ret = NETDEV_TX_BUSY;
+	bool again = false;
 
 	if (unlikely(!netif_running(dev) ||
 		     !netif_carrier_ok(dev)))
 		goto drop;
 
-	skb = validate_xmit_skb_list(skb, dev);
+	skb = validate_xmit_skb_list(skb, dev, &again);
 	if (skb != orig_skb)
 		goto drop;
 
@@ -4530,7 +4531,6 @@ static int packet_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations packet_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= packet_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index da754fc926e7..871eaf2cb85e 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -299,16 +299,21 @@ out:
 
 int __init phonet_netlink_register(void)
 {
-	int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit,
-				  NULL, 0);
+	int err = rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWADDR,
+				       addr_doit, NULL, 0);
 	if (err)
 		return err;
 
-	/* Further __rtnl_register() cannot fail */
-	__rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, 0);
-	__rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, 0);
-	__rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, 0);
-	__rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, 0);
-	__rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, 0);
+	/* Further rtnl_register_module() cannot fail */
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELADDR,
+			     addr_doit, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETADDR,
+			     NULL, getaddr_dumpit, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWROUTE,
+			     route_doit, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELROUTE,
+			     route_doit, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETROUTE,
+			     NULL, route_dumpit, 0);
 	return 0;
 }
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 44417480dab7..08f6751d2030 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -635,7 +635,6 @@ static int pn_sock_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations pn_sock_seq_fops = {
-	.owner = THIS_MODULE,
 	.open = pn_sock_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -818,7 +817,6 @@ static int pn_res_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations pn_res_seq_fops = {
-	.owner = THIS_MODULE,
 	.open = pn_res_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 77ab05e23001..5fb3929e3d7d 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1116,9 +1116,13 @@ static int __init qrtr_proto_init(void)
 		return rc;
 	}
 
-	rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
+	rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
+	if (rc) {
+		sock_unregister(qrtr_family.family);
+		proto_unregister(&qrtr_proto);
+	}
 
-	return 0;
+	return rc;
 }
 postcore_initcall(qrtr_proto_init);
 
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 75d43dc8e96b..5aa3a64aa4f0 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
 			  rs, &addr, (int)ntohs(*port));
 			break;
 		} else {
+			rs->rs_bound_addr = 0;
 			rds_sock_put(rs);
 			ret = -ENOMEM;
 			break;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 8398fee7c866..8d19fd25dce3 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -219,7 +219,11 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
 	spin_lock_irqsave(&rds_cong_lock, flags);
 
 	list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
-		if (!test_and_set_bit(0, &conn->c_map_queued)) {
+		struct rds_conn_path *cp = &conn->c_path[0];
+
+		rcu_read_lock();
+		if (!test_and_set_bit(0, &conn->c_map_queued) &&
+		    !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
 			rds_stats_inc(s_cong_update_queued);
 			/* We cannot inline the call to rds_send_xmit() here
 			 * for two reasons (both pertaining to a TCP transport):
@@ -235,9 +239,9 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
 			 *    therefore trigger warnings.
 			 * Defer the xmit to rds_send_worker() instead.
 			 */
-			queue_delayed_work(rds_wq,
-					   &conn->c_path[0].cp_send_w, 0);
+			queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 		}
+		rcu_read_unlock();
 	}
 
 	spin_unlock_irqrestore(&rds_cong_lock, flags);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7ee2d5d68b78..b10c0ef36d8d 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -230,8 +230,8 @@ static struct rds_connection *__rds_conn_create(struct net *net,
 
 	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
 	  conn, &laddr, &faddr,
-	  trans->t_name ? trans->t_name : "[unknown]",
-	  is_outgoing ? "(outgoing)" : "");
+	  strnlen(trans->t_name, sizeof(trans->t_name)) ? trans->t_name :
+	  "[unknown]", is_outgoing ? "(outgoing)" : "");
 
 	/*
 	 * Since we ran without holding the conn lock, someone could
@@ -382,10 +382,13 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
 {
 	struct rds_message *rm, *rtmp;
 
+	set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
+
 	if (!cp->cp_transport_data)
 		return;
 
 	/* make sure lingering queued work won't try to ref the conn */
+	synchronize_rcu();
 	cancel_delayed_work_sync(&cp->cp_send_w);
 	cancel_delayed_work_sync(&cp->cp_recv_w);
 
@@ -403,6 +406,11 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
 	if (cp->cp_xmit_rm)
 		rds_message_put(cp->cp_xmit_rm);
 
+	WARN_ON(delayed_work_pending(&cp->cp_send_w));
+	WARN_ON(delayed_work_pending(&cp->cp_recv_w));
+	WARN_ON(delayed_work_pending(&cp->cp_conn_w));
+	WARN_ON(work_pending(&cp->cp_down_w));
+
 	cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
 }
 
@@ -424,7 +432,6 @@ void rds_conn_destroy(struct rds_connection *conn)
 		 "%pI4\n", conn, &conn->c_laddr,
 		 &conn->c_faddr);
 
-	conn->c_destroy_in_prog = 1;
 	/* Ensure conn will not be scheduled for reconnect */
 	spin_lock_irq(&rds_conn_lock);
 	hlist_del_init_rcu(&conn->c_hash_node);
@@ -445,7 +452,6 @@ void rds_conn_destroy(struct rds_connection *conn)
 	 */
 	rds_cong_remove_conn(conn);
 
-	put_net(conn->c_net);
 	kfree(conn->c_path);
 	kmem_cache_free(rds_conn_slab, conn);
 
@@ -684,10 +690,13 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
 {
 	atomic_set(&cp->cp_state, RDS_CONN_ERROR);
 
-	if (!destroy && cp->cp_conn->c_destroy_in_prog)
+	rcu_read_lock();
+	if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		rcu_read_unlock();
 		return;
-
+	}
 	queue_work(rds_wq, &cp->cp_down_w);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
 
@@ -704,9 +713,15 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
  */
 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
 {
+	rcu_read_lock();
+	if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		rcu_read_unlock();
+		return;
+	}
 	if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
 		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
 
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c349c71babff..374ae83b60d4 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -88,6 +88,7 @@ enum {
 #define RDS_RECONNECT_PENDING	1
 #define RDS_IN_XMIT		2
 #define RDS_RECV_REFILL		3
+#define	RDS_DESTROY_PENDING	4
 
 /* Max number of multipaths per RDS connection. Must be a power of 2 */
 #define	RDS_MPATH_WORKERS	8
@@ -139,8 +140,7 @@ struct rds_connection {
 	__be32			c_faddr;
 	unsigned int		c_loopback:1,
 				c_ping_triggered:1,
-				c_destroy_in_prog:1,
-				c_pad_to_32:29;
+				c_pad_to_32:30;
 	int			c_npaths;
 	struct rds_connection	*c_passive;
 	struct rds_transport	*c_trans;
@@ -150,7 +150,7 @@ struct rds_connection {
 
 	/* Protocol version */
 	unsigned int		c_version;
-	struct net		*c_net;
+	possible_net_t		c_net;
 
 	struct list_head	c_map_item;
 	unsigned long		c_map_queued;
@@ -165,13 +165,13 @@ struct rds_connection {
 static inline
 struct net *rds_conn_net(struct rds_connection *conn)
 {
-	return conn->c_net;
+	return read_pnet(&conn->c_net);
 }
 
 static inline
 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
 {
-	conn->c_net = get_net(net);
+	write_pnet(&conn->c_net, net);
 }
 
 #define RDS_FLAG_CONG_BITMAP	0x01
diff --git a/net/rds/send.c b/net/rds/send.c
index f72466c63f0c..d3e32d1f3c7d 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -162,6 +162,12 @@ restart:
 		goto out;
 	}
 
+	if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		release_in_xmit(cp);
+		ret = -ENETUNREACH; /* dont requeue send work */
+		goto out;
+	}
+
 	/*
 	 * we record the send generation after doing the xmit acquire.
 	 * if someone else manages to jump in and do some work, we'll use
@@ -437,7 +443,12 @@ over_batch:
 		    !list_empty(&cp->cp_send_queue)) && !raced) {
 			if (batch_count < send_batch_count)
 				goto restart;
-			queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+			rcu_read_lock();
+			if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+				ret = -ENETUNREACH;
+			else
+				queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+			rcu_read_unlock();
 		} else if (raced) {
 			rds_stats_inc(s_send_lock_queue_raced);
 		}
@@ -1151,6 +1162,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 	else
 		cpath = &conn->c_path[0];
 
+	if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
 	rds_conn_path_connect_if_down(cpath);
 
 	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -1190,9 +1206,17 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 	rds_stats_inc(s_send_queued);
 
 	ret = rds_send_xmit(cpath);
-	if (ret == -ENOMEM || ret == -EAGAIN)
-		queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
-
+	if (ret == -ENOMEM || ret == -EAGAIN) {
+		ret = 0;
+		rcu_read_lock();
+		if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
+			ret = -ENETUNREACH;
+		else
+			queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
+		rcu_read_unlock();
+	}
+	if (ret)
+		goto out;
 	rds_message_put(rm);
 	return payload_len;
 
@@ -1270,7 +1294,10 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
 	rds_stats_inc(s_send_pong);
 
 	/* schedule the send work on rds_wq */
-	queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+	rcu_read_lock();
+	if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+		queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+	rcu_read_unlock();
 
 	rds_message_put(rm);
 	return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index ab7356e0ba83..9920d2f84eff 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -271,16 +271,33 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
 	return -EADDRNOTAVAIL;
 }
 
+static void rds_tcp_conn_free(void *arg)
+{
+	struct rds_tcp_connection *tc = arg;
+	unsigned long flags;
+
+	rdsdebug("freeing tc %p\n", tc);
+
+	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+	if (!tc->t_tcp_node_detached)
+		list_del(&tc->t_tcp_node);
+	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
+	kmem_cache_free(rds_tcp_conn_slab, tc);
+}
+
 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 {
 	struct rds_tcp_connection *tc;
-	int i;
+	int i, j;
+	int ret = 0;
 
 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
 		tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
-		if (!tc)
-			return -ENOMEM;
-
+		if (!tc) {
+			ret = -ENOMEM;
+			break;
+		}
 		mutex_init(&tc->t_conn_path_lock);
 		tc->t_sock = NULL;
 		tc->t_tinc = NULL;
@@ -291,26 +308,17 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 		tc->t_cpath = &conn->c_path[i];
 
 		spin_lock_irq(&rds_tcp_conn_lock);
+		tc->t_tcp_node_detached = false;
 		list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
 		spin_unlock_irq(&rds_tcp_conn_lock);
 		rdsdebug("rds_conn_path [%d] tc %p\n", i,
 			 conn->c_path[i].cp_transport_data);
 	}
-
-	return 0;
-}
-
-static void rds_tcp_conn_free(void *arg)
-{
-	struct rds_tcp_connection *tc = arg;
-	unsigned long flags;
-	rdsdebug("freeing tc %p\n", tc);
-
-	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
-	list_del(&tc->t_tcp_node);
-	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
-
-	kmem_cache_free(rds_tcp_conn_slab, tc);
+	if (ret) {
+		for (j = 0; j < i; j++)
+			rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
+	}
+	return ret;
 }
 
 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
@@ -496,27 +504,6 @@ static struct pernet_operations rds_tcp_net_ops = {
 	.size = sizeof(struct rds_tcp_net),
 };
 
-/* explicitly send a RST on each socket, thereby releasing any socket refcnts
- * that may otherwise hold up netns deletion.
- */
-static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
-{
-	struct rds_conn_path *cp;
-	struct rds_tcp_connection *tc;
-	int i;
-	struct sock *sk;
-
-	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
-		cp = &conn->c_path[i];
-		tc = cp->cp_transport_data;
-		if (!tc->t_sock)
-			continue;
-		sk = tc->t_sock->sk;
-		sk->sk_prot->disconnect(sk, 0);
-		tcp_done(sk);
-	}
-}
-
 static void rds_tcp_kill_sock(struct net *net)
 {
 	struct rds_tcp_connection *tc, *_tc;
@@ -528,18 +515,20 @@ static void rds_tcp_kill_sock(struct net *net)
 	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
 	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
-		struct net *c_net = tc->t_cpath->cp_conn->c_net;
+		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
 		if (net != c_net || !tc->t_sock)
 			continue;
-		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
+		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
 			list_move_tail(&tc->t_tcp_node, &tmp_list);
+		} else {
+			list_del(&tc->t_tcp_node);
+			tc->t_tcp_node_detached = true;
+		}
 	}
 	spin_unlock_irq(&rds_tcp_conn_lock);
-	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
-		rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
+	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
 		rds_conn_destroy(tc->t_cpath->cp_conn);
-	}
 }
 
 void *rds_tcp_listen_sock_def_readable(struct net *net)
@@ -587,7 +576,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
 
 	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
-		struct net *c_net = tc->t_cpath->cp_conn->c_net;
+		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
 		if (net != c_net || !tc->t_sock)
 			continue;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 864ca7d8f019..c6fa080e9b6d 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -12,6 +12,7 @@ struct rds_tcp_incoming {
 struct rds_tcp_connection {
 
 	struct list_head	t_tcp_node;
+	bool			t_tcp_node_detached;
 	struct rds_conn_path	*t_cpath;
 	/* t_conn_path_lock synchronizes the connection establishment between
 	 * rds_tcp_accept_one and rds_tcp_conn_path_connect
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 46f74dad0e16..534c67aeb20f 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
 		 cp->cp_conn, tc, sock);
 
 	if (sock) {
-		if (cp->cp_conn->c_destroy_in_prog)
+		if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
 			rds_tcp_set_linger(sock);
 		sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
 		lock_sock(sock->sk);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e006ef8e6d40..dd707b9e73e5 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -321,8 +321,12 @@ void rds_tcp_data_ready(struct sock *sk)
 	ready = tc->t_orig_data_ready;
 	rds_tcp_stats_inc(s_tcp_data_ready_calls);
 
-	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM)
-		queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
+		rcu_read_lock();
+		if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+		rcu_read_unlock();
+	}
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
 	ready(sk);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 9b76e0fa1722..16f65744d984 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -202,8 +202,11 @@ void rds_tcp_write_space(struct sock *sk)
 	tc->t_last_seen_una = rds_tcp_snd_una(tc);
 	rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+	rcu_read_lock();
+	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
+	    !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
 		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+	rcu_read_unlock();
 
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index f121daa402c8..eb76db1360b0 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -87,8 +87,12 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
 
 	cp->cp_reconnect_jiffies = 0;
 	set_bit(0, &cp->cp_conn->c_map_queued);
-	queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
-	queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+	rcu_read_lock();
+	if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+		queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+	}
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_connect_path_complete);
 
@@ -133,7 +137,10 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
 	set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
 	if (cp->cp_reconnect_jiffies == 0) {
 		cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
-		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+		rcu_read_lock();
+		if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+			queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+		rcu_read_unlock();
 		return;
 	}
 
@@ -141,8 +148,11 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
 	rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
 		 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
 		 conn, &conn->c_laddr, &conn->c_faddr);
-	queue_delayed_work(rds_wq, &cp->cp_conn_w,
-			   rand % cp->cp_reconnect_jiffies);
+	rcu_read_lock();
+	if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+		queue_delayed_work(rds_wq, &cp->cp_conn_w,
+				   rand % cp->cp_reconnect_jiffies);
+	rcu_read_unlock();
 
 	cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
 					rds_sysctl_reconnect_max_jiffies);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 6a5c4992cf61..083bd251406f 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1461,7 +1461,6 @@ static int rose_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rose_info_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 8ca3124df83f..178619ddab68 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -1156,7 +1156,6 @@ static int rose_nodes_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rose_nodes_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_nodes_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1240,7 +1239,6 @@ static int rose_neigh_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rose_neigh_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_neigh_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1326,7 +1324,6 @@ static int rose_route_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rose_routes_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_route_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 7421656963a9..f79f260c6ddc 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -125,7 +125,6 @@ static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rxrpc_call_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= rxrpc_call_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -217,7 +216,6 @@ static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rxrpc_connection_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= rxrpc_connection_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index c03d86a7775e..f24a6ae6819a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -857,17 +857,14 @@ config NET_ACT_TUNNEL_KEY
 config NET_IFE_SKBMARK
         tristate "Support to encoding decoding skb mark on IFE action"
         depends on NET_ACT_IFE
-        ---help---
 
 config NET_IFE_SKBPRIO
         tristate "Support to encoding decoding skb prio on IFE action"
         depends on NET_ACT_IFE
-        ---help---
 
 config NET_IFE_SKBTCINDEX
         tristate "Support to encoding decoding skb tcindex on IFE action"
         depends on NET_ACT_IFE
-        ---help---
 
 config NET_CLS_IND
 	bool "Incoming device classification"
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 4d33a50a8a6d..52622a3d2517 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -99,7 +99,7 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 		p->tcfa_refcnt--;
 		if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
 			if (p->ops->cleanup)
-				p->ops->cleanup(p, bind);
+				p->ops->cleanup(p);
 			tcf_idr_remove(p->idrinfo, p);
 			ret = ACT_P_DELETED;
 		}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 5ef8ce8c83d4..b3f2c15affa7 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -357,7 +357,7 @@ out:
 	return ret;
 }
 
-static void tcf_bpf_cleanup(struct tc_action *act, int bind)
+static void tcf_bpf_cleanup(struct tc_action *act)
 {
 	struct tcf_bpf_cfg tmp;
 
@@ -401,16 +401,14 @@ static __net_init int bpf_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_bpf_ops);
 }
 
-static void __net_exit bpf_exit_net(struct net *net)
+static void __net_exit bpf_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, bpf_net_id);
 }
 
 static struct pernet_operations bpf_net_ops = {
 	.init = bpf_init_net,
-	.exit = bpf_exit_net,
+	.exit_batch = bpf_exit_net,
 	.id   = &bpf_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 10b7a8855a6c..2b15ba84e0c8 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -209,16 +209,14 @@ static __net_init int connmark_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_connmark_ops);
 }
 
-static void __net_exit connmark_exit_net(struct net *net)
+static void __net_exit connmark_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, connmark_net_id);
 }
 
 static struct pernet_operations connmark_net_ops = {
 	.init = connmark_init_net,
-	.exit = connmark_exit_net,
+	.exit_batch = connmark_exit_net,
 	.id   = &connmark_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index d836f998117b..b7ba9b06b147 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -49,6 +49,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
 			 int bind)
 {
 	struct tc_action_net *tn = net_generic(net, csum_net_id);
+	struct tcf_csum_params *params_old, *params_new;
 	struct nlattr *tb[TCA_CSUM_MAX + 1];
 	struct tc_csum *parm;
 	struct tcf_csum *p;
@@ -67,7 +68,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
 
 	if (!tcf_idr_check(tn, parm->index, a, bind)) {
 		ret = tcf_idr_create(tn, parm->index, est, a,
-				     &act_csum_ops, bind, false);
+				     &act_csum_ops, bind, true);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -80,10 +81,21 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
 	}
 
 	p = to_tcf_csum(*a);
-	spin_lock_bh(&p->tcf_lock);
-	p->tcf_action = parm->action;
-	p->update_flags = parm->update_flags;
-	spin_unlock_bh(&p->tcf_lock);
+	ASSERT_RTNL();
+
+	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+	if (unlikely(!params_new)) {
+		if (ret == ACT_P_CREATED)
+			tcf_idr_release(*a, bind);
+		return -ENOMEM;
+	}
+	params_old = rtnl_dereference(p->params);
+
+	params_new->action = parm->action;
+	params_new->update_flags = parm->update_flags;
+	rcu_assign_pointer(p->params, params_new);
+	if (params_old)
+		kfree_rcu(params_old, rcu);
 
 	if (ret == ACT_P_CREATED)
 		tcf_idr_insert(tn, *a);
@@ -539,19 +551,21 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
 		    struct tcf_result *res)
 {
 	struct tcf_csum *p = to_tcf_csum(a);
-	int action;
+	struct tcf_csum_params *params;
 	u32 update_flags;
+	int action;
+
+	rcu_read_lock();
+	params = rcu_dereference(p->params);
 
-	spin_lock(&p->tcf_lock);
 	tcf_lastuse_update(&p->tcf_tm);
-	bstats_update(&p->tcf_bstats, skb);
-	action = p->tcf_action;
-	update_flags = p->update_flags;
-	spin_unlock(&p->tcf_lock);
+	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
 
+	action = params->action;
 	if (unlikely(action == TC_ACT_SHOT))
-		goto drop;
+		goto drop_stats;
 
+	update_flags = params->update_flags;
 	switch (tc_skb_protocol(skb)) {
 	case cpu_to_be16(ETH_P_IP):
 		if (!tcf_csum_ipv4(skb, update_flags))
@@ -563,13 +577,16 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
 		break;
 	}
 
+unlock:
+	rcu_read_unlock();
 	return action;
 
 drop:
-	spin_lock(&p->tcf_lock);
-	p->tcf_qstats.drops++;
-	spin_unlock(&p->tcf_lock);
-	return TC_ACT_SHOT;
+	action = TC_ACT_SHOT;
+
+drop_stats:
+	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
+	goto unlock;
 }
 
 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@ -577,15 +594,18 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_csum *p = to_tcf_csum(a);
+	struct tcf_csum_params *params;
 	struct tc_csum opt = {
-		.update_flags = p->update_flags,
 		.index   = p->tcf_index,
-		.action  = p->tcf_action,
 		.refcnt  = p->tcf_refcnt - ref,
 		.bindcnt = p->tcf_bindcnt - bind,
 	};
 	struct tcf_t t;
 
+	params = rtnl_dereference(p->params);
+	opt.action = params->action;
+	opt.update_flags = params->update_flags;
+
 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
 
@@ -600,6 +620,15 @@ nla_put_failure:
 	return -1;
 }
 
+static void tcf_csum_cleanup(struct tc_action *a)
+{
+	struct tcf_csum *p = to_tcf_csum(a);
+	struct tcf_csum_params *params;
+
+	params = rcu_dereference_protected(p->params, 1);
+	kfree_rcu(params, rcu);
+}
+
 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
 			   struct netlink_callback *cb, int type,
 			   const struct tc_action_ops *ops)
@@ -623,6 +652,7 @@ static struct tc_action_ops act_csum_ops = {
 	.act		= tcf_csum,
 	.dump		= tcf_csum_dump,
 	.init		= tcf_csum_init,
+	.cleanup	= tcf_csum_cleanup,
 	.walk		= tcf_csum_walker,
 	.lookup		= tcf_csum_search,
 	.size		= sizeof(struct tcf_csum),
@@ -635,16 +665,14 @@ static __net_init int csum_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_csum_ops);
 }
 
-static void __net_exit csum_exit_net(struct net *net)
+static void __net_exit csum_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, csum_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, csum_net_id);
 }
 
 static struct pernet_operations csum_net_ops = {
 	.init = csum_init_net,
-	.exit = csum_exit_net,
+	.exit_batch = csum_exit_net,
 	.id   = &csum_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index a0ac42b3ed06..b56986d41c87 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -235,16 +235,14 @@ static __net_init int gact_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_gact_ops);
 }
 
-static void __net_exit gact_exit_net(struct net *net)
+static void __net_exit gact_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, gact_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, gact_net_id);
 }
 
 static struct pernet_operations gact_net_ops = {
 	.init = gact_init_net,
-	.exit = gact_exit_net,
+	.exit_batch = gact_exit_net,
 	.id   = &gact_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 3007cb1310ea..5954e992685a 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -387,7 +387,7 @@ out_nlmsg_trim:
 }
 
 /* under ife->tcf_lock */
-static void _tcf_ife_cleanup(struct tc_action *a, int bind)
+static void _tcf_ife_cleanup(struct tc_action *a)
 {
 	struct tcf_ife_info *ife = to_ife(a);
 	struct tcf_meta_info *e, *n;
@@ -405,13 +405,13 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
 	}
 }
 
-static void tcf_ife_cleanup(struct tc_action *a, int bind)
+static void tcf_ife_cleanup(struct tc_action *a)
 {
 	struct tcf_ife_info *ife = to_ife(a);
 	struct tcf_ife_params *p;
 
 	spin_lock_bh(&ife->tcf_lock);
-	_tcf_ife_cleanup(a, bind);
+	_tcf_ife_cleanup(a);
 	spin_unlock_bh(&ife->tcf_lock);
 
 	p = rcu_dereference_protected(ife->params, 1);
@@ -546,7 +546,7 @@ metadata_parse_err:
 			if (exists)
 				tcf_idr_release(*a, bind);
 			if (ret == ACT_P_CREATED)
-				_tcf_ife_cleanup(*a, bind);
+				_tcf_ife_cleanup(*a);
 
 			if (exists)
 				spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +567,7 @@ metadata_parse_err:
 		err = use_all_metadata(ife);
 		if (err) {
 			if (ret == ACT_P_CREATED)
-				_tcf_ife_cleanup(*a, bind);
+				_tcf_ife_cleanup(*a);
 
 			if (exists)
 				spin_unlock_bh(&ife->tcf_lock);
@@ -858,16 +858,14 @@ static __net_init int ife_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_ife_ops);
 }
 
-static void __net_exit ife_exit_net(struct net *net)
+static void __net_exit ife_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, ife_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, ife_net_id);
 }
 
 static struct pernet_operations ife_net_ops = {
 	.init = ife_init_net,
-	.exit = ife_exit_net,
+	.exit_batch = ife_exit_net,
 	.id   = &ife_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d9e399a7e3d5..06e380ae0928 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -77,7 +77,7 @@ static void ipt_destroy_target(struct xt_entry_target *t)
 	module_put(par.target->me);
 }
 
-static void tcf_ipt_release(struct tc_action *a, int bind)
+static void tcf_ipt_release(struct tc_action *a)
 {
 	struct tcf_ipt *ipt = to_ipt(a);
 	ipt_destroy_target(ipt->tcfi_t);
@@ -337,16 +337,14 @@ static __net_init int ipt_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_ipt_ops);
 }
 
-static void __net_exit ipt_exit_net(struct net *net)
+static void __net_exit ipt_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, ipt_net_id);
 }
 
 static struct pernet_operations ipt_net_ops = {
 	.init = ipt_init_net,
-	.exit = ipt_exit_net,
+	.exit_batch = ipt_exit_net,
 	.id   = &ipt_net_id,
 	.size = sizeof(struct tc_action_net),
 };
@@ -387,16 +385,14 @@ static __net_init int xt_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_xt_ops);
 }
 
-static void __net_exit xt_exit_net(struct net *net)
+static void __net_exit xt_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, xt_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, xt_net_id);
 }
 
 static struct pernet_operations xt_net_ops = {
 	.init = xt_init_net,
-	.exit = xt_exit_net,
+	.exit_batch = xt_exit_net,
 	.id   = &xt_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 08b61849c2a2..e6ff88f72900 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -29,7 +29,6 @@
 #include <net/tc_act/tc_mirred.h>
 
 static LIST_HEAD(mirred_list);
-static DEFINE_SPINLOCK(mirred_list_lock);
 
 static bool tcf_mirred_is_act_redirect(int action)
 {
@@ -50,18 +49,15 @@ static bool tcf_mirred_act_wants_ingress(int action)
 	}
 }
 
-static void tcf_mirred_release(struct tc_action *a, int bind)
+static void tcf_mirred_release(struct tc_action *a)
 {
 	struct tcf_mirred *m = to_mirred(a);
 	struct net_device *dev;
 
-	/* We could be called either in a RCU callback or with RTNL lock held. */
-	spin_lock_bh(&mirred_list_lock);
 	list_del(&m->tcfm_list);
-	dev = rcu_dereference_protected(m->tcfm_dev, 1);
+	dev = rtnl_dereference(m->tcfm_dev);
 	if (dev)
 		dev_put(dev);
-	spin_unlock_bh(&mirred_list_lock);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -139,8 +135,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
 	m->tcf_action = parm->action;
 	m->tcfm_eaction = parm->eaction;
 	if (dev != NULL) {
-		m->tcfm_ifindex = parm->ifindex;
-		m->net = net;
 		if (ret != ACT_P_CREATED)
 			dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
 		dev_hold(dev);
@@ -149,9 +143,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
 	}
 
 	if (ret == ACT_P_CREATED) {
-		spin_lock_bh(&mirred_list_lock);
 		list_add(&m->tcfm_list, &mirred_list);
-		spin_unlock_bh(&mirred_list_lock);
 		tcf_idr_insert(tn, *a);
 	}
 
@@ -247,13 +239,14 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_mirred *m = to_mirred(a);
+	struct net_device *dev = rtnl_dereference(m->tcfm_dev);
 	struct tc_mirred opt = {
 		.index   = m->tcf_index,
 		.action  = m->tcf_action,
 		.refcnt  = m->tcf_refcnt - ref,
 		.bindcnt = m->tcf_bindcnt - bind,
 		.eaction = m->tcfm_eaction,
-		.ifindex = m->tcfm_ifindex,
+		.ifindex = dev ? dev->ifindex : 0,
 	};
 	struct tcf_t t;
 
@@ -294,7 +287,6 @@ static int mirred_device_event(struct notifier_block *unused,
 
 	ASSERT_RTNL();
 	if (event == NETDEV_UNREGISTER) {
-		spin_lock_bh(&mirred_list_lock);
 		list_for_each_entry(m, &mirred_list, tcfm_list) {
 			if (rcu_access_pointer(m->tcfm_dev) == dev) {
 				dev_put(dev);
@@ -304,7 +296,6 @@ static int mirred_device_event(struct notifier_block *unused,
 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
 			}
 		}
-		spin_unlock_bh(&mirred_list_lock);
 	}
 
 	return NOTIFY_DONE;
@@ -318,7 +309,7 @@ static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
 {
 	struct tcf_mirred *m = to_mirred(a);
 
-	return __dev_get_by_index(m->net, m->tcfm_ifindex);
+	return rtnl_dereference(m->tcfm_dev);
 }
 
 static struct tc_action_ops act_mirred_ops = {
@@ -343,16 +334,14 @@ static __net_init int mirred_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_mirred_ops);
 }
 
-static void __net_exit mirred_exit_net(struct net *net)
+static void __net_exit mirred_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, mirred_net_id);
 }
 
 static struct pernet_operations mirred_net_ops = {
 	.init = mirred_init_net,
-	.exit = mirred_exit_net,
+	.exit_batch = mirred_exit_net,
 	.id   = &mirred_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index c365d01b99c8..98c6a4b2f523 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -310,16 +310,14 @@ static __net_init int nat_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_nat_ops);
 }
 
-static void __net_exit nat_exit_net(struct net *net)
+static void __net_exit nat_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, nat_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, nat_net_id);
 }
 
 static struct pernet_operations nat_net_ops = {
 	.init = nat_init_net,
-	.exit = nat_exit_net,
+	.exit_batch = nat_exit_net,
 	.id   = &nat_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 491fe5deb09e..349beaffb29e 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -216,7 +216,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_pedit_cleanup(struct tc_action *a, int bind)
+static void tcf_pedit_cleanup(struct tc_action *a)
 {
 	struct tcf_pedit *p = to_pedit(a);
 	struct tc_pedit_key *keys = p->tcfp_keys;
@@ -453,16 +453,14 @@ static __net_init int pedit_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_pedit_ops);
 }
 
-static void __net_exit pedit_exit_net(struct net *net)
+static void __net_exit pedit_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, pedit_net_id);
 }
 
 static struct pernet_operations pedit_net_ops = {
 	.init = pedit_init_net,
-	.exit = pedit_exit_net,
+	.exit_batch = pedit_exit_net,
 	.id   = &pedit_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 3bb2ebf9e9ae..95d3c9097b25 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -118,13 +118,13 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
 	police = to_police(*a);
 	if (parm->rate.rate) {
 		err = -ENOMEM;
-		R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
+		R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
 		if (R_tab == NULL)
 			goto failure;
 
 		if (parm->peakrate.rate) {
 			P_tab = qdisc_get_rtab(&parm->peakrate,
-					       tb[TCA_POLICE_PEAKRATE]);
+					       tb[TCA_POLICE_PEAKRATE], NULL);
 			if (P_tab == NULL)
 				goto failure;
 		}
@@ -334,16 +334,14 @@ static __net_init int police_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_police_ops);
 }
 
-static void __net_exit police_exit_net(struct net *net)
+static void __net_exit police_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, police_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, police_net_id);
 }
 
 static struct pernet_operations police_net_ops = {
 	.init = police_init_net,
-	.exit = police_exit_net,
+	.exit_batch = police_exit_net,
 	.id   = &police_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 9438969290a6..1ba0df238756 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -96,7 +96,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_sample_cleanup(struct tc_action *a, int bind)
+static void tcf_sample_cleanup(struct tc_action *a)
 {
 	struct tcf_sample *s = to_sample(a);
 	struct psample_group *psample_group;
@@ -236,16 +236,14 @@ static __net_init int sample_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_sample_ops);
 }
 
-static void __net_exit sample_exit_net(struct net *net)
+static void __net_exit sample_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, sample_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, sample_net_id);
 }
 
 static struct pernet_operations sample_net_ops = {
 	.init = sample_init_net,
-	.exit = sample_exit_net,
+	.exit_batch = sample_exit_net,
 	.id   = &sample_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e7b57e5071a3..425eac11f6da 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
 	return d->tcf_action;
 }
 
-static void tcf_simp_release(struct tc_action *a, int bind)
+static void tcf_simp_release(struct tc_action *a)
 {
 	struct tcf_defact *d = to_defact(a);
 	kfree(d->tcfd_defdata);
@@ -204,16 +204,14 @@ static __net_init int simp_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_simp_ops);
 }
 
-static void __net_exit simp_exit_net(struct net *net)
+static void __net_exit simp_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, simp_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, simp_net_id);
 }
 
 static struct pernet_operations simp_net_ops = {
 	.init = simp_init_net,
-	.exit = simp_exit_net,
+	.exit_batch = simp_exit_net,
 	.id   = &simp_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 59949d61f20d..5a3f691bb545 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -241,16 +241,14 @@ static __net_init int skbedit_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_skbedit_ops);
 }
 
-static void __net_exit skbedit_exit_net(struct net *net)
+static void __net_exit skbedit_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, skbedit_net_id);
 }
 
 static struct pernet_operations skbedit_net_ops = {
 	.init = skbedit_init_net,
-	.exit = skbedit_exit_net,
+	.exit_batch = skbedit_exit_net,
 	.id   = &skbedit_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index b642ad3d39dd..fa975262dbac 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -184,7 +184,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+static void tcf_skbmod_cleanup(struct tc_action *a)
 {
 	struct tcf_skbmod *d = to_skbmod(a);
 	struct tcf_skbmod_params  *p;
@@ -266,16 +266,14 @@ static __net_init int skbmod_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_skbmod_ops);
 }
 
-static void __net_exit skbmod_exit_net(struct net *net)
+static void __net_exit skbmod_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, skbmod_net_id);
 }
 
 static struct pernet_operations skbmod_net_ops = {
 	.init = skbmod_init_net,
-	.exit = skbmod_exit_net,
+	.exit_batch = skbmod_exit_net,
 	.id   = &skbmod_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 30c96274c638..0e23aac09ad6 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -201,7 +201,7 @@ err_out:
 	return ret;
 }
 
-static void tunnel_key_release(struct tc_action *a, int bind)
+static void tunnel_key_release(struct tc_action *a)
 {
 	struct tcf_tunnel_key *t = to_tunnel_key(a);
 	struct tcf_tunnel_key_params *params;
@@ -325,16 +325,14 @@ static __net_init int tunnel_key_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_tunnel_key_ops);
 }
 
-static void __net_exit tunnel_key_exit_net(struct net *net)
+static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, tunnel_key_net_id);
 }
 
 static struct pernet_operations tunnel_key_net_ops = {
 	.init = tunnel_key_init_net,
-	.exit = tunnel_key_exit_net,
+	.exit_batch = tunnel_key_exit_net,
 	.id   = &tunnel_key_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 97f717a13ad5..e1a1b3f3983a 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -219,7 +219,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_vlan_cleanup(struct tc_action *a, int bind)
+static void tcf_vlan_cleanup(struct tc_action *a)
 {
 	struct tcf_vlan *v = to_vlan(a);
 	struct tcf_vlan_params *p;
@@ -301,16 +301,14 @@ static __net_init int vlan_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_vlan_ops);
 }
 
-static void __net_exit vlan_exit_net(struct net *net)
+static void __net_exit vlan_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, vlan_net_id);
 }
 
 static struct pernet_operations vlan_net_ops = {
 	.init = vlan_init_net,
-	.exit = vlan_exit_net,
+	.exit_batch = vlan_exit_net,
 	.id   = &vlan_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b9d63d2246e6..bcb4ccb5f894 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/kmod.h>
 #include <linux/slab.h>
+#include <linux/idr.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/netlink.h>
@@ -121,8 +122,8 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
 }
 
 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
-					  u32 prio, u32 parent, struct Qdisc *q,
-					  struct tcf_chain *chain)
+					  u32 prio, struct tcf_chain *chain,
+					  struct netlink_ext_ack *extack)
 {
 	struct tcf_proto *tp;
 	int err;
@@ -148,6 +149,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 			module_put(tp->ops->owner);
 			err = -EAGAIN;
 		} else {
+			NL_SET_ERR_MSG(extack, "TC classifier not found");
 			err = -ENOENT;
 		}
 		goto errout;
@@ -156,8 +158,6 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 	tp->classify = tp->ops->classify;
 	tp->protocol = protocol;
 	tp->prio = prio;
-	tp->classid = parent;
-	tp->q = q;
 	tp->chain = chain;
 
 	err = tp->ops->init(tp);
@@ -172,13 +172,20 @@ errout:
 	return ERR_PTR(err);
 }
 
-static void tcf_proto_destroy(struct tcf_proto *tp)
+static void tcf_proto_destroy(struct tcf_proto *tp,
+			      struct netlink_ext_ack *extack)
 {
-	tp->ops->destroy(tp);
+	tp->ops->destroy(tp, extack);
 	module_put(tp->ops->owner);
 	kfree_rcu(tp, rcu);
 }
 
+struct tcf_filter_chain_list_item {
+	struct list_head list;
+	tcf_chain_head_change_t *chain_head_change;
+	void *chain_head_change_priv;
+};
+
 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 					  u32 chain_index)
 {
@@ -187,6 +194,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
 	if (!chain)
 		return NULL;
+	INIT_LIST_HEAD(&chain->filter_chain_list);
 	list_add_tail(&chain->list, &block->chain_list);
 	chain->block = block;
 	chain->index = chain_index;
@@ -194,12 +202,19 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 	return chain;
 }
 
+static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
+				       struct tcf_proto *tp_head)
+{
+	if (item->chain_head_change)
+		item->chain_head_change(tp_head, item->chain_head_change_priv);
+}
 static void tcf_chain_head_change(struct tcf_chain *chain,
 				  struct tcf_proto *tp_head)
 {
-	if (chain->chain_head_change)
-		chain->chain_head_change(tp_head,
-					 chain->chain_head_change_priv);
+	struct tcf_filter_chain_list_item *item;
+
+	list_for_each_entry(item, &chain->filter_chain_list, list)
+		tcf_chain_head_change_item(item, tp_head);
 }
 
 static void tcf_chain_flush(struct tcf_chain *chain)
@@ -209,7 +224,7 @@ static void tcf_chain_flush(struct tcf_chain *chain)
 	tcf_chain_head_change(chain, NULL);
 	while (tp) {
 		RCU_INIT_POINTER(chain->filter_chain, tp->next);
-		tcf_proto_destroy(tp);
+		tcf_proto_destroy(tp, NULL);
 		tp = rtnl_dereference(chain->filter_chain);
 		tcf_chain_put(chain);
 	}
@@ -217,8 +232,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
 
 static void tcf_chain_destroy(struct tcf_chain *chain)
 {
+	struct tcf_block *block = chain->block;
+
 	list_del(&chain->list);
 	kfree(chain);
+	if (list_empty(&block->chain_list))
+		kfree(block);
 }
 
 static void tcf_chain_hold(struct tcf_chain *chain)
@@ -249,62 +268,300 @@ void tcf_chain_put(struct tcf_chain *chain)
 }
 EXPORT_SYMBOL(tcf_chain_put);
 
-static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q,
-				  struct tcf_block_ext_info *ei,
-				  enum tc_block_command command)
+static bool tcf_block_offload_in_use(struct tcf_block *block)
+{
+	return block->offloadcnt;
+}
+
+static int tcf_block_offload_cmd(struct tcf_block *block,
+				 struct net_device *dev,
+				 struct tcf_block_ext_info *ei,
+				 enum tc_block_command command)
 {
-	struct net_device *dev = q->dev_queue->dev;
 	struct tc_block_offload bo = {};
 
-	if (!dev->netdev_ops->ndo_setup_tc)
-		return;
 	bo.command = command;
 	bo.binder_type = ei->binder_type;
 	bo.block = block;
-	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 }
 
-static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-				   struct tcf_block_ext_info *ei)
+static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
+				  struct tcf_block_ext_info *ei)
 {
-	tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND);
+	struct net_device *dev = q->dev_queue->dev;
+	int err;
+
+	if (!dev->netdev_ops->ndo_setup_tc)
+		goto no_offload_dev_inc;
+
+	/* If tc offload feature is disabled and the block we try to bind
+	 * to already has some offloaded filters, forbid to bind.
+	 */
+	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
+		return -EOPNOTSUPP;
+
+	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
+	if (err == -EOPNOTSUPP)
+		goto no_offload_dev_inc;
+	return err;
+
+no_offload_dev_inc:
+	if (tcf_block_offload_in_use(block))
+		return -EOPNOTSUPP;
+	block->nooffloaddevcnt++;
+	return 0;
 }
 
 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 				     struct tcf_block_ext_info *ei)
 {
-	tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND);
+	struct net_device *dev = q->dev_queue->dev;
+	int err;
+
+	if (!dev->netdev_ops->ndo_setup_tc)
+		goto no_offload_dev_dec;
+	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
+	if (err == -EOPNOTSUPP)
+		goto no_offload_dev_dec;
+	return;
+
+no_offload_dev_dec:
+	WARN_ON(block->nooffloaddevcnt-- == 0);
 }
 
-int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei)
+static int
+tcf_chain_head_change_cb_add(struct tcf_chain *chain,
+			     struct tcf_block_ext_info *ei,
+			     struct netlink_ext_ack *extack)
+{
+	struct tcf_filter_chain_list_item *item;
+
+	item = kmalloc(sizeof(*item), GFP_KERNEL);
+	if (!item) {
+		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
+		return -ENOMEM;
+	}
+	item->chain_head_change = ei->chain_head_change;
+	item->chain_head_change_priv = ei->chain_head_change_priv;
+	if (chain->filter_chain)
+		tcf_chain_head_change_item(item, chain->filter_chain);
+	list_add(&item->list, &chain->filter_chain_list);
+	return 0;
+}
+
+static void
+tcf_chain_head_change_cb_del(struct tcf_chain *chain,
+			     struct tcf_block_ext_info *ei)
+{
+	struct tcf_filter_chain_list_item *item;
+
+	list_for_each_entry(item, &chain->filter_chain_list, list) {
+		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
+		    (item->chain_head_change == ei->chain_head_change &&
+		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
+			tcf_chain_head_change_item(item, NULL);
+			list_del(&item->list);
+			kfree(item);
+			return;
+		}
+	}
+	WARN_ON(1);
+}
+
+struct tcf_net {
+	struct idr idr;
+};
+
+static unsigned int tcf_net_id;
+
+static int tcf_block_insert(struct tcf_block *block, struct net *net,
+			    u32 block_index, struct netlink_ext_ack *extack)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+	int err;
+
+	err = idr_alloc_ext(&tn->idr, block, NULL, block_index,
+			    block_index + 1, GFP_KERNEL);
+	if (err)
+		return err;
+	block->index = block_index;
+	return 0;
+}
+
+static void tcf_block_remove(struct tcf_block *block, struct net *net)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	idr_remove_ext(&tn->idr, block->index);
+}
+
+static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
+					  struct netlink_ext_ack *extack)
 {
-	struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+	struct tcf_block *block;
 	struct tcf_chain *chain;
 	int err;
 
-	if (!block)
-		return -ENOMEM;
+	block = kzalloc(sizeof(*block), GFP_KERNEL);
+	if (!block) {
+		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
+		return ERR_PTR(-ENOMEM);
+	}
 	INIT_LIST_HEAD(&block->chain_list);
 	INIT_LIST_HEAD(&block->cb_list);
+	INIT_LIST_HEAD(&block->owner_list);
 
 	/* Create chain 0 by default, it has to be always present. */
 	chain = tcf_chain_create(block, 0);
 	if (!chain) {
+		NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
 		err = -ENOMEM;
 		goto err_chain_create;
 	}
-	WARN_ON(!ei->chain_head_change);
-	chain->chain_head_change = ei->chain_head_change;
-	chain->chain_head_change_priv = ei->chain_head_change_priv;
 	block->net = qdisc_net(q);
+	block->refcnt = 1;
+	block->net = net;
 	block->q = q;
-	tcf_block_offload_bind(block, q, ei);
-	*p_block = block;
-	return 0;
+	return block;
 
 err_chain_create:
 	kfree(block);
+	return ERR_PTR(err);
+}
+
+static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	return idr_find_ext(&tn->idr, block_index);
+}
+
+static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
+{
+	return list_first_entry(&block->chain_list, struct tcf_chain, list);
+}
+
+struct tcf_block_owner_item {
+	struct list_head list;
+	struct Qdisc *q;
+	enum tcf_block_binder_type binder_type;
+};
+
+static void
+tcf_block_owner_netif_keep_dst(struct tcf_block *block,
+			       struct Qdisc *q,
+			       enum tcf_block_binder_type binder_type)
+{
+	if (block->keep_dst &&
+	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+		netif_keep_dst(qdisc_dev(q));
+}
+
+void tcf_block_netif_keep_dst(struct tcf_block *block)
+{
+	struct tcf_block_owner_item *item;
+
+	block->keep_dst = true;
+	list_for_each_entry(item, &block->owner_list, list)
+		tcf_block_owner_netif_keep_dst(block, item->q,
+					       item->binder_type);
+}
+EXPORT_SYMBOL(tcf_block_netif_keep_dst);
+
+static int tcf_block_owner_add(struct tcf_block *block,
+			       struct Qdisc *q,
+			       enum tcf_block_binder_type binder_type)
+{
+	struct tcf_block_owner_item *item;
+
+	item = kmalloc(sizeof(*item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+	item->q = q;
+	item->binder_type = binder_type;
+	list_add(&item->list, &block->owner_list);
+	return 0;
+}
+
+static void tcf_block_owner_del(struct tcf_block *block,
+				struct Qdisc *q,
+				enum tcf_block_binder_type binder_type)
+{
+	struct tcf_block_owner_item *item;
+
+	list_for_each_entry(item, &block->owner_list, list) {
+		if (item->q == q && item->binder_type == binder_type) {
+			list_del(&item->list);
+			kfree(item);
+			return;
+		}
+	}
+	WARN_ON(1);
+}
+
+int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack)
+{
+	struct net *net = qdisc_net(q);
+	struct tcf_block *block = NULL;
+	bool created = false;
+	int err;
+
+	if (ei->block_index) {
+		/* block_index not 0 means the shared block is requested */
+		block = tcf_block_lookup(net, ei->block_index);
+		if (block)
+			block->refcnt++;
+	}
+
+	if (!block) {
+		block = tcf_block_create(net, q, extack);
+		if (IS_ERR(block))
+			return PTR_ERR(block);
+		created = true;
+		if (ei->block_index) {
+			err = tcf_block_insert(block, net,
+					       ei->block_index, extack);
+			if (err)
+				goto err_block_insert;
+		}
+	}
+
+	err = tcf_block_owner_add(block, q, ei->binder_type);
+	if (err)
+		goto err_block_owner_add;
+
+	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
+
+	err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
+					   ei, extack);
+	if (err)
+		goto err_chain_head_change_cb_add;
+
+	err = tcf_block_offload_bind(block, q, ei);
+	if (err)
+		goto err_block_offload_bind;
+
+	*p_block = block;
+	return 0;
+
+err_block_offload_bind:
+	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+err_chain_head_change_cb_add:
+	tcf_block_owner_del(block, q, ei->binder_type);
+err_block_owner_add:
+	if (created) {
+		if (tcf_block_shared(block))
+			tcf_block_remove(block, net);
+err_block_insert:
+		kfree(tcf_block_chain_zero(block));
+		kfree(block);
+	} else {
+		block->refcnt--;
+	}
 	return err;
 }
 EXPORT_SYMBOL(tcf_block_get_ext);
@@ -317,7 +574,8 @@ static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
 }
 
 int tcf_block_get(struct tcf_block **p_block,
-		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+		  struct netlink_ext_ack *extack)
 {
 	struct tcf_block_ext_info ei = {
 		.chain_head_change = tcf_chain_head_change_dflt,
@@ -325,53 +583,47 @@ int tcf_block_get(struct tcf_block **p_block,
 	};
 
 	WARN_ON(!p_filter_chain);
-	return tcf_block_get_ext(p_block, q, &ei);
+	return tcf_block_get_ext(p_block, q, &ei, extack);
 }
 EXPORT_SYMBOL(tcf_block_get);
 
-static void tcf_block_put_final(struct work_struct *work)
-{
-	struct tcf_block *block = container_of(work, struct tcf_block, work);
-	struct tcf_chain *chain, *tmp;
-
-	rtnl_lock();
-
-	/* At this point, all the chains should have refcnt == 1. */
-	list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
-		tcf_chain_put(chain);
-	rtnl_unlock();
-	kfree(block);
-}
-
 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
  * actions should be all removed after flushing.
  */
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 		       struct tcf_block_ext_info *ei)
 {
-	struct tcf_chain *chain;
+	struct tcf_chain *chain, *tmp;
 
 	if (!block)
 		return;
-	/* Hold a refcnt for all chains, except 0, so that they don't disappear
-	 * while we are iterating.
-	 */
-	list_for_each_entry(chain, &block->chain_list, list)
-		if (chain->index)
+	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+	tcf_block_owner_del(block, q, ei->binder_type);
+
+	if (--block->refcnt == 0) {
+		if (tcf_block_shared(block))
+			tcf_block_remove(block, block->net);
+
+		/* Hold a refcnt for all chains, so that they don't disappear
+		 * while we are iterating.
+		 */
+		list_for_each_entry(chain, &block->chain_list, list)
 			tcf_chain_hold(chain);
 
-	list_for_each_entry(chain, &block->chain_list, list)
-		tcf_chain_flush(chain);
+		list_for_each_entry(chain, &block->chain_list, list)
+			tcf_chain_flush(chain);
+	}
 
 	tcf_block_offload_unbind(block, q, ei);
 
-	INIT_WORK(&block->work, tcf_block_put_final);
-	/* Wait for existing RCU callbacks to cool down, make sure their works
-	 * have been queued before this. We can not flush pending works here
-	 * because we are holding the RTNL lock.
-	 */
-	rcu_barrier();
-	tcf_queue_work(&block->work);
+	if (block->refcnt == 0) {
+		/* At this point, all the chains should have refcnt >= 1. */
+		list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+			tcf_chain_put(chain);
+
+		/* Finally, put chain 0 and allow block to be freed. */
+		tcf_chain_put(tcf_block_chain_zero(block));
+	}
 }
 EXPORT_SYMBOL(tcf_block_put_ext);
 
@@ -429,9 +681,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
 {
 	struct tcf_block_cb *block_cb;
 
+	/* At this point, playback of previous block cb calls is not supported,
+	 * so forbid to register to block which already has some offloaded
+	 * filters present.
+	 */
+	if (tcf_block_offload_in_use(block))
+		return ERR_PTR(-EOPNOTSUPP);
+
 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
 	if (!block_cb)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	block_cb->cb = cb;
 	block_cb->cb_ident = cb_ident;
 	block_cb->cb_priv = cb_priv;
@@ -447,7 +706,7 @@ int tcf_block_cb_register(struct tcf_block *block,
 	struct tcf_block_cb *block_cb;
 
 	block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
-	return block_cb ? 0 : -ENOMEM;
+	return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
 }
 EXPORT_SYMBOL(tcf_block_cb_register);
 
@@ -477,6 +736,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
 	int ok_count = 0;
 	int err;
 
+	/* Make sure all netdevs sharing this block are offload-capable. */
+	if (block->nooffloaddevcnt && err_stop)
+		return -EOPNOTSUPP;
+
 	list_for_each_entry(block_cb, &block->cb_list, list) {
 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
 		if (err) {
@@ -530,8 +793,9 @@ reclassify:
 #ifdef CONFIG_NET_CLS_ACT
 reset:
 	if (unlikely(limit++ >= max_reclassify_loop)) {
-		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
-				       tp->q->ops->id, tp->prio & 0xffff,
+		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
+				       tp->chain->block->index,
+				       tp->prio & 0xffff,
 				       ntohs(tp->protocol));
 		return TC_ACT_SHOT;
 	}
@@ -604,8 +868,9 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
 }
 
 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
-			 struct tcf_proto *tp, struct Qdisc *q, u32 parent,
-			 void *fh, u32 portid, u32 seq, u16 flags, int event)
+			 struct tcf_proto *tp, struct tcf_block *block,
+			 struct Qdisc *q, u32 parent, void *fh,
+			 u32 portid, u32 seq, u16 flags, int event)
 {
 	struct tcmsg *tcm;
 	struct nlmsghdr  *nlh;
@@ -618,8 +883,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
 	tcm->tcm_family = AF_UNSPEC;
 	tcm->tcm__pad1 = 0;
 	tcm->tcm__pad2 = 0;
-	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
-	tcm->tcm_parent = parent;
+	if (q) {
+		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
+		tcm->tcm_parent = parent;
+	} else {
+		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
+		tcm->tcm_block_index = block->index;
+	}
 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
 		goto nla_put_failure;
@@ -642,8 +912,8 @@ nla_put_failure:
 
 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 			  struct nlmsghdr *n, struct tcf_proto *tp,
-			  struct Qdisc *q, u32 parent,
-			  void *fh, int event, bool unicast)
+			  struct tcf_block *block, struct Qdisc *q,
+			  u32 parent, void *fh, int event, bool unicast)
 {
 	struct sk_buff *skb;
 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -652,8 +922,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 	if (!skb)
 		return -ENOBUFS;
 
-	if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
-			  n->nlmsg_flags, event) <= 0) {
+	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+			  n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -667,8 +937,9 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 
 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
 			      struct nlmsghdr *n, struct tcf_proto *tp,
-			      struct Qdisc *q, u32 parent,
-			      void *fh, bool unicast, bool *last)
+			      struct tcf_block *block, struct Qdisc *q,
+			      u32 parent, void *fh, bool unicast, bool *last,
+			      struct netlink_ext_ack *extack)
 {
 	struct sk_buff *skb;
 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -678,13 +949,14 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
 	if (!skb)
 		return -ENOBUFS;
 
-	if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
-			  n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
+	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
+		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
 		kfree_skb(skb);
 		return -EINVAL;
 	}
 
-	err = tp->ops->delete(tp, fh, last);
+	err = tp->ops->delete(tp, fh, last, extack);
 	if (err) {
 		kfree_skb(skb);
 		return err;
@@ -693,20 +965,24 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
 	if (unicast)
 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
 
-	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
-			      n->nlmsg_flags & NLM_F_ECHO);
+	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+			     n->nlmsg_flags & NLM_F_ECHO);
+	if (err < 0)
+		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
+	return err;
 }
 
 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
-				 struct Qdisc *q, u32 parent,
-				 struct nlmsghdr *n,
+				 struct tcf_block *block, struct Qdisc *q,
+				 u32 parent, struct nlmsghdr *n,
 				 struct tcf_chain *chain, int event)
 {
 	struct tcf_proto *tp;
 
 	for (tp = rtnl_dereference(chain->filter_chain);
 	     tp; tp = rtnl_dereference(tp->next))
-		tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false);
+		tfilter_notify(net, oskb, n, tp, block,
+			       q, parent, 0, event, false);
 }
 
 /* Add/change/delete/get a filter node */
@@ -722,13 +998,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 	bool prio_allocate;
 	u32 parent;
 	u32 chain_index;
-	struct net_device *dev;
-	struct Qdisc  *q;
+	struct Qdisc *q = NULL;
 	struct tcf_chain_info chain_info;
 	struct tcf_chain *chain = NULL;
 	struct tcf_block *block;
 	struct tcf_proto *tp;
-	const struct Qdisc_class_ops *cops;
 	unsigned long cl;
 	void *fh;
 	int err;
@@ -755,8 +1029,10 @@ replay:
 	if (prio == 0) {
 		switch (n->nlmsg_type) {
 		case RTM_DELTFILTER:
-			if (protocol || t->tcm_handle || tca[TCA_KIND])
+			if (protocol || t->tcm_handle || tca[TCA_KIND]) {
+				NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
 				return -ENOENT;
+			}
 			break;
 		case RTM_NEWTFILTER:
 			/* If no priority is provided by the user,
@@ -769,63 +1045,91 @@ replay:
 			}
 			/* fall-through */
 		default:
+			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
 			return -ENOENT;
 		}
 	}
 
 	/* Find head of filter chain. */
 
-	/* Find link */
-	dev = __dev_get_by_index(net, t->tcm_ifindex);
-	if (dev == NULL)
-		return -ENODEV;
-
-	/* Find qdisc */
-	if (!parent) {
-		q = dev->qdisc;
-		parent = q->handle;
+	if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+		block = tcf_block_lookup(net, t->tcm_block_index);
+		if (!block) {
+			NL_SET_ERR_MSG(extack, "Block of given index was not found");
+			err = -EINVAL;
+			goto errout;
+		}
 	} else {
-		q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
-		if (q == NULL)
-			return -EINVAL;
-	}
+		const struct Qdisc_class_ops *cops;
+		struct net_device *dev;
 
-	/* Is it classful? */
-	cops = q->ops->cl_ops;
-	if (!cops)
-		return -EINVAL;
+		/* Find link */
+		dev = __dev_get_by_index(net, t->tcm_ifindex);
+		if (!dev)
+			return -ENODEV;
 
-	if (!cops->tcf_block)
-		return -EOPNOTSUPP;
+		/* Find qdisc */
+		if (!parent) {
+			q = dev->qdisc;
+			parent = q->handle;
+		} else {
+			q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
+			if (!q) {
+				NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
+				return -EINVAL;
+			}
+		}
 
-	/* Do we search for filter, attached to class? */
-	if (TC_H_MIN(parent)) {
-		cl = cops->find(q, parent);
-		if (cl == 0)
-			return -ENOENT;
-	}
+		/* Is it classful? */
+		cops = q->ops->cl_ops;
+		if (!cops) {
+			NL_SET_ERR_MSG(extack, "Qdisc not classful");
+			return -EINVAL;
+		}
 
-	/* And the last stroke */
-	block = cops->tcf_block(q, cl);
-	if (!block) {
-		err = -EINVAL;
-		goto errout;
+		if (!cops->tcf_block) {
+			NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
+			return -EOPNOTSUPP;
+		}
+
+		/* Do we search for filter, attached to class? */
+		if (TC_H_MIN(parent)) {
+			cl = cops->find(q, parent);
+			if (cl == 0) {
+				NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
+				return -ENOENT;
+			}
+		}
+
+		/* And the last stroke */
+		block = cops->tcf_block(q, cl, extack);
+		if (!block) {
+			err = -EINVAL;
+			goto errout;
+		}
+		if (tcf_block_shared(block)) {
+			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
+			err = -EOPNOTSUPP;
+			goto errout;
+		}
 	}
 
 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
+		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
 		err = -EINVAL;
 		goto errout;
 	}
 	chain = tcf_chain_get(block, chain_index,
 			      n->nlmsg_type == RTM_NEWTFILTER);
 	if (!chain) {
+		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
 		err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
 		goto errout;
 	}
 
 	if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
-		tfilter_notify_chain(net, skb, q, parent, n,
+		tfilter_notify_chain(net, skb, block, q, parent, n,
 				     chain, RTM_DELTFILTER);
 		tcf_chain_flush(chain);
 		err = 0;
@@ -835,6 +1139,7 @@ replay:
 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
 			       prio, prio_allocate);
 	if (IS_ERR(tp)) {
+		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
 		err = PTR_ERR(tp);
 		goto errout;
 	}
@@ -843,12 +1148,14 @@ replay:
 		/* Proto-tcf does not exist, create new one */
 
 		if (tca[TCA_KIND] == NULL || !protocol) {
+			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
 			err = -EINVAL;
 			goto errout;
 		}
 
 		if (n->nlmsg_type != RTM_NEWTFILTER ||
 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
+			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
 			err = -ENOENT;
 			goto errout;
 		}
@@ -857,13 +1164,14 @@ replay:
 			prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
 
 		tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
-				      protocol, prio, parent, q, chain);
+				      protocol, prio, chain, extack);
 		if (IS_ERR(tp)) {
 			err = PTR_ERR(tp);
 			goto errout;
 		}
 		tp_created = 1;
 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
+		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
 		err = -EINVAL;
 		goto errout;
 	}
@@ -873,15 +1181,16 @@ replay:
 	if (!fh) {
 		if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
 			tcf_chain_tp_remove(chain, &chain_info, tp);
-			tfilter_notify(net, skb, n, tp, q, parent, fh,
+			tfilter_notify(net, skb, n, tp, block, q, parent, fh,
 				       RTM_DELTFILTER, false);
-			tcf_proto_destroy(tp);
+			tcf_proto_destroy(tp, extack);
 			err = 0;
 			goto errout;
 		}
 
 		if (n->nlmsg_type != RTM_NEWTFILTER ||
 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
+			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
 			err = -ENOENT;
 			goto errout;
 		}
@@ -892,41 +1201,47 @@ replay:
 		case RTM_NEWTFILTER:
 			if (n->nlmsg_flags & NLM_F_EXCL) {
 				if (tp_created)
-					tcf_proto_destroy(tp);
+					tcf_proto_destroy(tp, NULL);
+				NL_SET_ERR_MSG(extack, "Filter already exists");
 				err = -EEXIST;
 				goto errout;
 			}
 			break;
 		case RTM_DELTFILTER:
-			err = tfilter_del_notify(net, skb, n, tp, q, parent,
-						 fh, false, &last);
+			err = tfilter_del_notify(net, skb, n, tp, block,
+						 q, parent, fh, false, &last,
+						 extack);
 			if (err)
 				goto errout;
 			if (last) {
 				tcf_chain_tp_remove(chain, &chain_info, tp);
-				tcf_proto_destroy(tp);
+				tcf_proto_destroy(tp, extack);
 			}
 			goto errout;
 		case RTM_GETTFILTER:
-			err = tfilter_notify(net, skb, n, tp, q, parent, fh,
-					     RTM_NEWTFILTER, true);
+			err = tfilter_notify(net, skb, n, tp, block, q, parent,
+					     fh, RTM_NEWTFILTER, true);
+			if (err < 0)
+				NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
 			goto errout;
 		default:
+			NL_SET_ERR_MSG(extack, "Invalid netlink message type");
 			err = -EINVAL;
 			goto errout;
 		}
 	}
 
 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
-			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
+			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
+			      extack);
 	if (err == 0) {
 		if (tp_created)
 			tcf_chain_tp_insert(chain, &chain_info, tp);
-		tfilter_notify(net, skb, n, tp, q, parent, fh,
+		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
 			       RTM_NEWTFILTER, false);
 	} else {
 		if (tp_created)
-			tcf_proto_destroy(tp);
+			tcf_proto_destroy(tp, NULL);
 	}
 
 errout:
@@ -942,6 +1257,7 @@ struct tcf_dump_args {
 	struct tcf_walker w;
 	struct sk_buff *skb;
 	struct netlink_callback *cb;
+	struct tcf_block *block;
 	struct Qdisc *q;
 	u32 parent;
 };
@@ -951,7 +1267,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
 	struct tcf_dump_args *a = (void *)arg;
 	struct net *net = sock_net(a->skb->sk);
 
-	return tcf_fill_node(net, a->skb, tp, a->q, a->parent,
+	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
 			     n, NETLINK_CB(a->cb->skb).portid,
 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
 			     RTM_NEWTFILTER);
@@ -962,6 +1278,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
 			   long index_start, long *p_index)
 {
 	struct net *net = sock_net(skb->sk);
+	struct tcf_block *block = chain->block;
 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
 	struct tcf_dump_args arg;
 	struct tcf_proto *tp;
@@ -980,7 +1297,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
 			memset(&cb->args[1], 0,
 			       sizeof(cb->args) - sizeof(cb->args[0]));
 		if (cb->args[1] == 0) {
-			if (tcf_fill_node(net, skb, tp, q, parent, 0,
+			if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
 					  NETLINK_CB(cb->skb).portid,
 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
 					  RTM_NEWTFILTER) <= 0)
@@ -993,6 +1310,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
 		arg.w.fn = tcf_node_dump;
 		arg.skb = skb;
 		arg.cb = cb;
+		arg.block = block;
 		arg.q = q;
 		arg.parent = parent;
 		arg.w.stop = 0;
@@ -1011,13 +1329,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
 	struct nlattr *tca[TCA_MAX + 1];
-	struct net_device *dev;
-	struct Qdisc *q;
+	struct Qdisc *q = NULL;
 	struct tcf_block *block;
 	struct tcf_chain *chain;
 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
-	unsigned long cl = 0;
-	const struct Qdisc_class_ops *cops;
 	long index_start;
 	long index;
 	u32 parent;
@@ -1030,32 +1345,51 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 	if (err)
 		return err;
 
-	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
-	if (!dev)
-		return skb->len;
-
-	parent = tcm->tcm_parent;
-	if (!parent) {
-		q = dev->qdisc;
-		parent = q->handle;
+	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+		block = tcf_block_lookup(net, tcm->tcm_block_index);
+		if (!block)
+			goto out;
+		/* If we work with block index, q is NULL and parent value
+		 * will never be used in the following code. The check
+		 * in tcf_fill_node prevents it. However, compiler does not
+		 * see that far, so set parent to zero to silence the warning
+		 * about parent being uninitialized.
+		 */
+		parent = 0;
 	} else {
-		q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
-	}
-	if (!q)
-		goto out;
-	cops = q->ops->cl_ops;
-	if (!cops)
-		goto out;
-	if (!cops->tcf_block)
-		goto out;
-	if (TC_H_MIN(tcm->tcm_parent)) {
-		cl = cops->find(q, tcm->tcm_parent);
-		if (cl == 0)
+		const struct Qdisc_class_ops *cops;
+		struct net_device *dev;
+		unsigned long cl = 0;
+
+		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+		if (!dev)
+			return skb->len;
+
+		parent = tcm->tcm_parent;
+		if (!parent) {
+			q = dev->qdisc;
+			parent = q->handle;
+		} else {
+			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
+		}
+		if (!q)
+			goto out;
+		cops = q->ops->cl_ops;
+		if (!cops)
+			goto out;
+		if (!cops->tcf_block)
+			goto out;
+		if (TC_H_MIN(tcm->tcm_parent)) {
+			cl = cops->find(q, tcm->tcm_parent);
+			if (cl == 0)
+				goto out;
+		}
+		block = cops->tcf_block(q, cl, NULL);
+		if (!block)
 			goto out;
+		if (tcf_block_shared(block))
+			q = NULL;
 	}
-	block = cops->tcf_block(q, cl);
-	if (!block)
-		goto out;
 
 	index_start = cb->args[0];
 	index = 0;
@@ -1090,7 +1424,8 @@ void tcf_exts_destroy(struct tcf_exts *exts)
 EXPORT_SYMBOL(tcf_exts_destroy);
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
-		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
+		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
+		      struct netlink_ext_ack *extack)
 {
 #ifdef CONFIG_NET_CLS_ACT
 	{
@@ -1123,8 +1458,10 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 	}
 #else
 	if ((exts->action && tb[exts->action]) ||
-	    (exts->police && tb[exts->police]))
+	    (exts->police && tb[exts->police])) {
+		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
 		return -EOPNOTSUPP;
+	}
 #endif
 
 	return 0;
@@ -1258,18 +1595,50 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
 }
 EXPORT_SYMBOL(tc_setup_cb_call);
 
+static __net_init int tcf_net_init(struct net *net)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	idr_init(&tn->idr);
+	return 0;
+}
+
+static void __net_exit tcf_net_exit(struct net *net)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	idr_destroy(&tn->idr);
+}
+
+static struct pernet_operations tcf_net_ops = {
+	.init = tcf_net_init,
+	.exit = tcf_net_exit,
+	.id   = &tcf_net_id,
+	.size = sizeof(struct tcf_net),
+};
+
 static int __init tc_filter_init(void)
 {
+	int err;
+
 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
 	if (!tc_filter_wq)
 		return -ENOMEM;
 
+	err = register_pernet_subsys(&tcf_net_ops);
+	if (err)
+		goto err_register_pernet_subsys;
+
 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
 		      tc_dump_tfilter, 0);
 
 	return 0;
+
+err_register_pernet_subsys:
+	destroy_workqueue(tc_filter_wq);
+	return err;
 }
 
 subsys_initcall(tc_filter_init);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 5f169ded347e..d333f5c5101d 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -112,7 +112,7 @@ static void basic_delete_filter(struct rcu_head *head)
 	tcf_queue_work(&f->work);
 }
 
-static void basic_destroy(struct tcf_proto *tp)
+static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct basic_head *head = rtnl_dereference(tp->root);
 	struct basic_filter *f, *n;
@@ -130,7 +130,8 @@ static void basic_destroy(struct tcf_proto *tp)
 	kfree_rcu(head, rcu);
 }
 
-static int basic_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
+			struct netlink_ext_ack *extack)
 {
 	struct basic_head *head = rtnl_dereference(tp->root);
 	struct basic_filter *f = arg;
@@ -152,11 +153,12 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
 static int basic_set_parms(struct net *net, struct tcf_proto *tp,
 			   struct basic_filter *f, unsigned long base,
 			   struct nlattr **tb,
-			   struct nlattr *est, bool ovr)
+			   struct nlattr *est, bool ovr,
+			   struct netlink_ext_ack *extack)
 {
 	int err;
 
-	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
 	if (err < 0)
 		return err;
 
@@ -175,7 +177,8 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
 
 static int basic_change(struct net *net, struct sk_buff *in_skb,
 			struct tcf_proto *tp, unsigned long base, u32 handle,
-			struct nlattr **tca, void **arg, bool ovr)
+			struct nlattr **tca, void **arg, bool ovr,
+			struct netlink_ext_ack *extack)
 {
 	int err;
 	struct basic_head *head = rtnl_dereference(tp->root);
@@ -221,7 +224,8 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
 		fnew->handle = idr_index;
 	}
 
-	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr);
+	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
+			      extack);
 	if (err < 0) {
 		if (!fold)
 			idr_remove_ext(&head->handle_idr, fnew->handle);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index a62586e2dbdb..8e5326bc6440 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -147,7 +147,8 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 }
 
 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-			       struct cls_bpf_prog *oldprog)
+			       struct cls_bpf_prog *oldprog,
+			       struct netlink_ext_ack *extack)
 {
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_bpf_offload cls_bpf = {};
@@ -158,22 +159,25 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 	skip_sw = prog && tc_skip_sw(prog->gen_flags);
 	obj = prog ?: oldprog;
 
-	tc_cls_common_offload_init(&cls_bpf.common, tp);
+	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
+				   extack);
 	cls_bpf.command = TC_CLSBPF_OFFLOAD;
 	cls_bpf.exts = &obj->exts;
 	cls_bpf.prog = prog ? prog->filter : NULL;
 	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
 	cls_bpf.name = obj->bpf_name;
 	cls_bpf.exts_integrated = obj->exts_integrated;
-	cls_bpf.gen_flags = obj->gen_flags;
+
+	if (oldprog)
+		tcf_block_offload_dec(block, &oldprog->gen_flags);
 
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
 	if (prog) {
 		if (err < 0) {
-			cls_bpf_offload_cmd(tp, oldprog, prog);
+			cls_bpf_offload_cmd(tp, oldprog, prog, extack);
 			return err;
 		} else if (err > 0) {
-			prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
+			tcf_block_offload_inc(block, &prog->gen_flags);
 		}
 	}
 
@@ -189,7 +193,8 @@ static u32 cls_bpf_flags(u32 flags)
 }
 
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-			   struct cls_bpf_prog *oldprog)
+			   struct cls_bpf_prog *oldprog,
+			   struct netlink_ext_ack *extack)
 {
 	if (prog && oldprog &&
 	    cls_bpf_flags(prog->gen_flags) !=
@@ -203,15 +208,16 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 	if (!prog && !oldprog)
 		return 0;
 
-	return cls_bpf_offload_cmd(tp, prog, oldprog);
+	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
 }
 
 static void cls_bpf_stop_offload(struct tcf_proto *tp,
-				 struct cls_bpf_prog *prog)
+				 struct cls_bpf_prog *prog,
+				 struct netlink_ext_ack *extack)
 {
 	int err;
 
-	err = cls_bpf_offload_cmd(tp, NULL, prog);
+	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
 	if (err)
 		pr_err("Stopping hardware offload failed: %d\n", err);
 }
@@ -222,13 +228,12 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_bpf_offload cls_bpf = {};
 
-	tc_cls_common_offload_init(&cls_bpf.common, tp);
+	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
 	cls_bpf.command = TC_CLSBPF_STATS;
 	cls_bpf.exts = &prog->exts;
 	cls_bpf.prog = prog->filter;
 	cls_bpf.name = prog->bpf_name;
 	cls_bpf.exts_integrated = prog->exts_integrated;
-	cls_bpf.gen_flags = prog->gen_flags;
 
 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
 }
@@ -285,12 +290,13 @@ static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
 	tcf_queue_work(&prog->work);
 }
 
-static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
+static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+			     struct netlink_ext_ack *extack)
 {
 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
 
 	idr_remove_ext(&head->handle_idr, prog->handle);
-	cls_bpf_stop_offload(tp, prog);
+	cls_bpf_stop_offload(tp, prog, extack);
 	list_del_rcu(&prog->link);
 	tcf_unbind_filter(tp, &prog->res);
 	if (tcf_exts_get_net(&prog->exts))
@@ -299,22 +305,24 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 		__cls_bpf_delete_prog(prog);
 }
 
-static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
+			  struct netlink_ext_ack *extack)
 {
 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
 
-	__cls_bpf_delete(tp, arg);
+	__cls_bpf_delete(tp, arg, extack);
 	*last = list_empty(&head->plist);
 	return 0;
 }
 
-static void cls_bpf_destroy(struct tcf_proto *tp)
+static void cls_bpf_destroy(struct tcf_proto *tp,
+			    struct netlink_ext_ack *extack)
 {
 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
 	struct cls_bpf_prog *prog, *tmp;
 
 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
-		__cls_bpf_delete(tp, prog);
+		__cls_bpf_delete(tp, prog, extack);
 
 	idr_destroy(&head->handle_idr);
 	kfree_rcu(head, rcu);
@@ -399,15 +407,16 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
 	prog->bpf_name = name;
 	prog->filter = fp;
 
-	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
-		netif_keep_dst(qdisc_dev(tp->q));
+	if (fp->dst_needed)
+		tcf_block_netif_keep_dst(tp->chain->block);
 
 	return 0;
 }
 
 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
 			     struct cls_bpf_prog *prog, unsigned long base,
-			     struct nlattr **tb, struct nlattr *est, bool ovr)
+			     struct nlattr **tb, struct nlattr *est, bool ovr,
+			     struct netlink_ext_ack *extack)
 {
 	bool is_bpf, is_ebpf, have_exts = false;
 	u32 gen_flags = 0;
@@ -418,7 +427,7 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
 		return -EINVAL;
 
-	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
+	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
 	if (ret < 0)
 		return ret;
 
@@ -456,7 +465,7 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 			  struct tcf_proto *tp, unsigned long base,
 			  u32 handle, struct nlattr **tca,
-			  void **arg, bool ovr)
+			  void **arg, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
 	struct cls_bpf_prog *oldprog = *arg;
@@ -504,11 +513,12 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 		prog->handle = handle;
 	}
 
-	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
+	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
+				extack);
 	if (ret < 0)
 		goto errout_idr;
 
-	ret = cls_bpf_offload(tp, prog, oldprog);
+	ret = cls_bpf_offload(tp, prog, oldprog, extack);
 	if (ret)
 		goto errout_parms;
 
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 309d5899265f..762da5c0cf5e 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -91,7 +91,8 @@ static void cls_cgroup_destroy_rcu(struct rcu_head *root)
 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
 			     struct tcf_proto *tp, unsigned long base,
 			     u32 handle, struct nlattr **tca,
-			     void **arg, bool ovr)
+			     void **arg, bool ovr,
+			     struct netlink_ext_ack *extack)
 {
 	struct nlattr *tb[TCA_CGROUP_MAX + 1];
 	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
@@ -121,7 +122,8 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
 	if (err < 0)
 		goto errout;
 
-	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr,
+				extack);
 	if (err < 0)
 		goto errout;
 
@@ -141,7 +143,8 @@ errout:
 	return err;
 }
 
-static void cls_cgroup_destroy(struct tcf_proto *tp)
+static void cls_cgroup_destroy(struct tcf_proto *tp,
+			       struct netlink_ext_ack *extack)
 {
 	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 
@@ -154,7 +157,8 @@ static void cls_cgroup_destroy(struct tcf_proto *tp)
 	}
 }
 
-static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last,
+			     struct netlink_ext_ack *extack)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 25c2a888e1f0..cd5fe383afdd 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -401,7 +401,7 @@ static void flow_destroy_filter(struct rcu_head *head)
 static int flow_change(struct net *net, struct sk_buff *in_skb,
 		       struct tcf_proto *tp, unsigned long base,
 		       u32 handle, struct nlattr **tca,
-		       void **arg, bool ovr)
+		       void **arg, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct flow_head *head = rtnl_dereference(tp->root);
 	struct flow_filter *fold, *fnew;
@@ -454,7 +454,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 	if (err < 0)
 		goto err2;
 
-	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
+				extack);
 	if (err < 0)
 		goto err2;
 
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 
 	timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
 
-	netif_keep_dst(qdisc_dev(tp->q));
+	tcf_block_netif_keep_dst(tp->chain->block);
 
 	if (tb[TCA_FLOW_KEYS]) {
 		fnew->keymask = keymask;
@@ -574,7 +575,8 @@ err1:
 	return err;
 }
 
-static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
+		       struct netlink_ext_ack *extack)
 {
 	struct flow_head *head = rtnl_dereference(tp->root);
 	struct flow_filter *f = arg;
@@ -598,7 +600,7 @@ static int flow_init(struct tcf_proto *tp)
 	return 0;
 }
 
-static void flow_destroy(struct tcf_proto *tp)
+static void flow_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct flow_head *head = rtnl_dereference(tp->root);
 	struct flow_filter *f, *next;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 543a3e875d05..dc9acaafc0a8 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -166,6 +166,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 	 * so do it rather here.
 	 */
 	skb_key.basic.n_proto = skb->protocol;
+	skb_flow_dissect_tunnel_info(skb, &head->dissector, &skb_key);
 	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
 
 	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
@@ -217,30 +218,33 @@ static void fl_destroy_filter(struct rcu_head *head)
 	tcf_queue_work(&f->work);
 }
 
-static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
+static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
+				 struct netlink_ext_ack *extack)
 {
 	struct tc_cls_flower_offload cls_flower = {};
 	struct tcf_block *block = tp->chain->block;
 
-	tc_cls_common_offload_init(&cls_flower.common, tp);
+	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 	cls_flower.command = TC_CLSFLOWER_DESTROY;
 	cls_flower.cookie = (unsigned long) f;
 
 	tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
 			 &cls_flower, false);
+	tcf_block_offload_dec(block, &f->flags);
 }
 
 static int fl_hw_replace_filter(struct tcf_proto *tp,
 				struct flow_dissector *dissector,
 				struct fl_flow_key *mask,
-				struct cls_fl_filter *f)
+				struct cls_fl_filter *f,
+				struct netlink_ext_ack *extack)
 {
 	struct tc_cls_flower_offload cls_flower = {};
 	struct tcf_block *block = tp->chain->block;
 	bool skip_sw = tc_skip_sw(f->flags);
 	int err;
 
-	tc_cls_common_offload_init(&cls_flower.common, tp);
+	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 	cls_flower.command = TC_CLSFLOWER_REPLACE;
 	cls_flower.cookie = (unsigned long) f;
 	cls_flower.dissector = dissector;
@@ -252,10 +256,10 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
 	err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
 			       &cls_flower, skip_sw);
 	if (err < 0) {
-		fl_hw_destroy_filter(tp, f);
+		fl_hw_destroy_filter(tp, f, NULL);
 		return err;
 	} else if (err > 0) {
-		f->flags |= TCA_CLS_FLAGS_IN_HW;
+		tcf_block_offload_inc(block, &f->flags);
 	}
 
 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
@@ -269,7 +273,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
 	struct tc_cls_flower_offload cls_flower = {};
 	struct tcf_block *block = tp->chain->block;
 
-	tc_cls_common_offload_init(&cls_flower.common, tp);
+	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
 	cls_flower.command = TC_CLSFLOWER_STATS;
 	cls_flower.cookie = (unsigned long) f;
 	cls_flower.exts = &f->exts;
@@ -279,14 +283,15 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
 			 &cls_flower, false);
 }
 
-static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
+static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
+			struct netlink_ext_ack *extack)
 {
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 
 	idr_remove_ext(&head->handle_idr, f->handle);
 	list_del_rcu(&f->list);
 	if (!tc_skip_hw(f->flags))
-		fl_hw_destroy_filter(tp, f);
+		fl_hw_destroy_filter(tp, f, extack);
 	tcf_unbind_filter(tp, &f->res);
 	if (tcf_exts_get_net(&f->exts))
 		call_rcu(&f->rcu, fl_destroy_filter);
@@ -312,13 +317,13 @@ static void fl_destroy_rcu(struct rcu_head *rcu)
 	schedule_work(&head->work);
 }
 
-static void fl_destroy(struct tcf_proto *tp)
+static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 	struct cls_fl_filter *f, *next;
 
 	list_for_each_entry_safe(f, next, &head->filters, list)
-		__fl_delete(tp, f);
+		__fl_delete(tp, f, extack);
 	idr_destroy(&head->handle_idr);
 
 	__module_get(THIS_MODULE);
@@ -524,13 +529,14 @@ static void fl_set_key_ip(struct nlattr **tb,
 }
 
 static int fl_set_key(struct net *net, struct nlattr **tb,
-		      struct fl_flow_key *key, struct fl_flow_key *mask)
+		      struct fl_flow_key *key, struct fl_flow_key *mask,
+		      struct netlink_ext_ack *extack)
 {
 	__be16 ethertype;
 	int ret = 0;
 #ifdef CONFIG_NET_CLS_IND
 	if (tb[TCA_FLOWER_INDEV]) {
-		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
+		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
 		if (err < 0)
 			return err;
 		key->indev_ifindex = err;
@@ -825,11 +831,12 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
 			unsigned long base, struct nlattr **tb,
-			struct nlattr *est, bool ovr)
+			struct nlattr *est, bool ovr,
+			struct netlink_ext_ack *extack)
 {
 	int err;
 
-	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
 	if (err < 0)
 		return err;
 
@@ -838,7 +845,7 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
 		tcf_bind_filter(tp, &f->res, base);
 	}
 
-	err = fl_set_key(net, tb, &f->key, &mask->key);
+	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
 	if (err)
 		return err;
 
@@ -851,7 +858,7 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
 static int fl_change(struct net *net, struct sk_buff *in_skb,
 		     struct tcf_proto *tp, unsigned long base,
 		     u32 handle, struct nlattr **tca,
-		     void **arg, bool ovr)
+		     void **arg, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 	struct cls_fl_filter *fold = *arg;
@@ -914,7 +921,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 		}
 	}
 
-	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
+	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
+			   extack);
 	if (err)
 		goto errout_idr;
 
@@ -938,7 +946,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 		err = fl_hw_replace_filter(tp,
 					   &head->dissector,
 					   &mask.key,
-					   fnew);
+					   fnew,
+					   extack);
 		if (err)
 			goto errout_idr;
 	}
@@ -951,7 +960,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 			rhashtable_remove_fast(&head->ht, &fold->ht_node,
 					       head->ht_params);
 		if (!tc_skip_hw(fold->flags))
-			fl_hw_destroy_filter(tp, fold);
+			fl_hw_destroy_filter(tp, fold, NULL);
 	}
 
 	*arg = fnew;
@@ -981,7 +990,8 @@ errout_tb:
 	return err;
 }
 
-static int fl_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
+		     struct netlink_ext_ack *extack)
 {
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 	struct cls_fl_filter *f = arg;
@@ -989,7 +999,7 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last)
 	if (!tc_skip_sw(f->flags))
 		rhashtable_remove_fast(&head->ht, &f->ht_node,
 				       head->ht_params);
-	__fl_delete(tp, f);
+	__fl_delete(tp, f, extack);
 	*last = list_empty(&head->filters);
 	return 0;
 }
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 20f0de1a960a..8b207723fbc2 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -149,7 +149,7 @@ static void fw_delete_filter(struct rcu_head *head)
 	tcf_queue_work(&f->work);
 }
 
-static void fw_destroy(struct tcf_proto *tp)
+static void fw_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct fw_head *head = rtnl_dereference(tp->root);
 	struct fw_filter *f;
@@ -172,7 +172,8 @@ static void fw_destroy(struct tcf_proto *tp)
 	kfree_rcu(head, rcu);
 }
 
-static int fw_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int fw_delete(struct tcf_proto *tp, void *arg, bool *last,
+		     struct netlink_ext_ack *extack)
 {
 	struct fw_head *head = rtnl_dereference(tp->root);
 	struct fw_filter *f = arg;
@@ -218,13 +219,15 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
 
 static int fw_set_parms(struct net *net, struct tcf_proto *tp,
 			struct fw_filter *f, struct nlattr **tb,
-			struct nlattr **tca, unsigned long base, bool ovr)
+			struct nlattr **tca, unsigned long base, bool ovr,
+			struct netlink_ext_ack *extack)
 {
 	struct fw_head *head = rtnl_dereference(tp->root);
 	u32 mask;
 	int err;
 
-	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr,
+				extack);
 	if (err < 0)
 		return err;
 
@@ -236,7 +239,7 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
 #ifdef CONFIG_NET_CLS_IND
 	if (tb[TCA_FW_INDEV]) {
 		int ret;
-		ret = tcf_change_indev(net, tb[TCA_FW_INDEV]);
+		ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
 		if (ret < 0)
 			return ret;
 		f->ifindex = ret;
@@ -257,7 +260,7 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
 static int fw_change(struct net *net, struct sk_buff *in_skb,
 		     struct tcf_proto *tp, unsigned long base,
 		     u32 handle, struct nlattr **tca, void **arg,
-		     bool ovr)
+		     bool ovr, struct netlink_ext_ack *extack)
 {
 	struct fw_head *head = rtnl_dereference(tp->root);
 	struct fw_filter *f = *arg;
@@ -296,7 +299,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
 			return err;
 		}
 
-		err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr);
+		err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack);
 		if (err < 0) {
 			tcf_exts_destroy(&fnew->exts);
 			kfree(fnew);
@@ -345,7 +348,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
 	f->id = handle;
 	f->tp = tp;
 
-	err = fw_set_parms(net, tp, f, tb, tca, base, ovr);
+	err = fw_set_parms(net, tp, f, tb, tca, base, ovr, extack);
 	if (err < 0)
 		goto errout;
 
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 66d4e0099158..2ba721a590a7 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -71,28 +71,31 @@ static void mall_destroy_rcu(struct rcu_head *rcu)
 
 static void mall_destroy_hw_filter(struct tcf_proto *tp,
 				   struct cls_mall_head *head,
-				   unsigned long cookie)
+				   unsigned long cookie,
+				   struct netlink_ext_ack *extack)
 {
 	struct tc_cls_matchall_offload cls_mall = {};
 	struct tcf_block *block = tp->chain->block;
 
-	tc_cls_common_offload_init(&cls_mall.common, tp);
+	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 	cls_mall.command = TC_CLSMATCHALL_DESTROY;
 	cls_mall.cookie = cookie;
 
 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+	tcf_block_offload_dec(block, &head->flags);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
 				  struct cls_mall_head *head,
-				  unsigned long cookie)
+				  unsigned long cookie,
+				  struct netlink_ext_ack *extack)
 {
 	struct tc_cls_matchall_offload cls_mall = {};
 	struct tcf_block *block = tp->chain->block;
 	bool skip_sw = tc_skip_sw(head->flags);
 	int err;
 
-	tc_cls_common_offload_init(&cls_mall.common, tp);
+	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 	cls_mall.command = TC_CLSMATCHALL_REPLACE;
 	cls_mall.exts = &head->exts;
 	cls_mall.cookie = cookie;
@@ -100,10 +103,10 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL,
 			       &cls_mall, skip_sw);
 	if (err < 0) {
-		mall_destroy_hw_filter(tp, head, cookie);
+		mall_destroy_hw_filter(tp, head, cookie, NULL);
 		return err;
 	} else if (err > 0) {
-		head->flags |= TCA_CLS_FLAGS_IN_HW;
+		tcf_block_offload_inc(block, &head->flags);
 	}
 
 	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
@@ -112,7 +115,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 	return 0;
 }
 
-static void mall_destroy(struct tcf_proto *tp)
+static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct cls_mall_head *head = rtnl_dereference(tp->root);
 
@@ -120,7 +123,7 @@ static void mall_destroy(struct tcf_proto *tp)
 		return;
 
 	if (!tc_skip_hw(head->flags))
-		mall_destroy_hw_filter(tp, head, (unsigned long) head);
+		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
 
 	if (tcf_exts_get_net(&head->exts))
 		call_rcu(&head->rcu, mall_destroy_rcu);
@@ -141,11 +144,12 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
 			  struct cls_mall_head *head,
 			  unsigned long base, struct nlattr **tb,
-			  struct nlattr *est, bool ovr)
+			  struct nlattr *est, bool ovr,
+			  struct netlink_ext_ack *extack)
 {
 	int err;
 
-	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, extack);
 	if (err < 0)
 		return err;
 
@@ -159,7 +163,7 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
 static int mall_change(struct net *net, struct sk_buff *in_skb,
 		       struct tcf_proto *tp, unsigned long base,
 		       u32 handle, struct nlattr **tca,
-		       void **arg, bool ovr)
+		       void **arg, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct cls_mall_head *head = rtnl_dereference(tp->root);
 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
@@ -197,12 +201,14 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
 	new->handle = handle;
 	new->flags = flags;
 
-	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
+	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
+			     extack);
 	if (err)
 		goto err_set_parms;
 
 	if (!tc_skip_hw(new->flags)) {
-		err = mall_replace_hw_filter(tp, new, (unsigned long) new);
+		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
+					     extack);
 		if (err)
 			goto err_replace_hw_filter;
 	}
@@ -222,7 +228,8 @@ err_exts_init:
 	return err;
 }
 
-static int mall_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
+		       struct netlink_ext_ack *extack)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index ac9a5b8825b9..21a03a8ee029 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -281,7 +281,7 @@ static void route4_delete_filter(struct rcu_head *head)
 	tcf_queue_work(&f->work);
 }
 
-static void route4_destroy(struct tcf_proto *tp)
+static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct route4_head *head = rtnl_dereference(tp->root);
 	int h1, h2;
@@ -316,7 +316,8 @@ static void route4_destroy(struct tcf_proto *tp)
 	kfree_rcu(head, rcu);
 }
 
-static int route4_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
+			 struct netlink_ext_ack *extack)
 {
 	struct route4_head *head = rtnl_dereference(tp->root);
 	struct route4_filter *f = arg;
@@ -389,7 +390,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
 			    unsigned long base, struct route4_filter *f,
 			    u32 handle, struct route4_head *head,
 			    struct nlattr **tb, struct nlattr *est, int new,
-			    bool ovr)
+			    bool ovr, struct netlink_ext_ack *extack)
 {
 	u32 id = 0, to = 0, nhandle = 0x8000;
 	struct route4_filter *fp;
@@ -397,7 +398,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
 	struct route4_bucket *b;
 	int err;
 
-	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
 	if (err < 0)
 		return err;
 
@@ -471,7 +472,8 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
 
 static int route4_change(struct net *net, struct sk_buff *in_skb,
 			 struct tcf_proto *tp, unsigned long base, u32 handle,
-			 struct nlattr **tca, void **arg, bool ovr)
+			 struct nlattr **tca, void **arg, bool ovr,
+			 struct netlink_ext_ack *extack)
 {
 	struct route4_head *head = rtnl_dereference(tp->root);
 	struct route4_filter __rcu **fp;
@@ -515,7 +517,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
 	}
 
 	err = route4_set_parms(net, tp, base, f, handle, head, tb,
-			       tca[TCA_RATE], new, ovr);
+			       tca[TCA_RATE], new, ovr, extack);
 	if (err < 0)
 		goto errout;
 
@@ -527,7 +529,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
 		if (f->handle < f1->handle)
 			break;
 
-	netif_keep_dst(qdisc_dev(tp->q));
+	tcf_block_netif_keep_dst(tp->chain->block);
 	rcu_assign_pointer(f->next, f1);
 	rcu_assign_pointer(*fp, f);
 
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index cf325625c99d..4f1297657c27 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -322,7 +322,7 @@ static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 		__rsvp_delete_filter(f);
 }
 
-static void rsvp_destroy(struct tcf_proto *tp)
+static void rsvp_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct rsvp_head *data = rtnl_dereference(tp->root);
 	int h1, h2;
@@ -350,7 +350,8 @@ static void rsvp_destroy(struct tcf_proto *tp)
 	kfree_rcu(data, rcu);
 }
 
-static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
+		       struct netlink_ext_ack *extack)
 {
 	struct rsvp_head *head = rtnl_dereference(tp->root);
 	struct rsvp_filter *nfp, *f = arg;
@@ -486,7 +487,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
 		       struct tcf_proto *tp, unsigned long base,
 		       u32 handle,
 		       struct nlattr **tca,
-		       void **arg, bool ovr)
+		       void **arg, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct rsvp_head *data = rtnl_dereference(tp->root);
 	struct rsvp_filter *f, *nfp;
@@ -511,7 +512,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
 	err = tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
 	if (err < 0)
 		return err;
-	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, extack);
 	if (err < 0)
 		goto errout2;
 
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 67467ae24c97..b49cc990a000 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -193,7 +193,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
 	tcf_queue_work(&f->work);
 }
 
-static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
+			  struct netlink_ext_ack *extack)
 {
 	struct tcindex_data *p = rtnl_dereference(tp->root);
 	struct tcindex_filter_result *r = arg;
@@ -246,7 +247,7 @@ static int tcindex_destroy_element(struct tcf_proto *tp,
 {
 	bool last;
 
-	return tcindex_delete(tp, arg, &last);
+	return tcindex_delete(tp, arg, &last, NULL);
 }
 
 static void __tcindex_destroy(struct rcu_head *head)
@@ -322,7 +323,7 @@ static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 		  u32 handle, struct tcindex_data *p,
 		  struct tcindex_filter_result *r, struct nlattr **tb,
-		  struct nlattr *est, bool ovr)
+		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct tcindex_filter_result new_filter_result, *old_r = r;
 	struct tcindex_filter_result cr;
@@ -334,7 +335,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 	err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
 	if (err < 0)
 		return err;
-	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, extack);
 	if (err < 0)
 		goto errout;
 
@@ -520,7 +521,8 @@ errout:
 static int
 tcindex_change(struct net *net, struct sk_buff *in_skb,
 	       struct tcf_proto *tp, unsigned long base, u32 handle,
-	       struct nlattr **tca, void **arg, bool ovr)
+	       struct nlattr **tca, void **arg, bool ovr,
+	       struct netlink_ext_ack *extack)
 {
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -540,7 +542,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
 		return err;
 
 	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
-				 tca[TCA_RATE], ovr);
+				 tca[TCA_RATE], ovr, extack);
 }
 
 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
@@ -579,7 +581,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
 	}
 }
 
-static void tcindex_destroy(struct tcf_proto *tp)
+static void tcindex_destroy(struct tcf_proto *tp,
+			    struct netlink_ext_ack *extack)
 {
 	struct tcindex_data *p = rtnl_dereference(tp->root);
 	struct tcf_walker walker;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 507859cdd1cb..60c892c36a60 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -87,6 +87,7 @@ struct tc_u_hnode {
 	unsigned int		divisor;
 	struct idr		handle_idr;
 	struct rcu_head		rcu;
+	u32			flags;
 	/* The 'ht' field MUST be the last field in structure to allow for
 	 * more entries allocated at end of structure.
 	 */
@@ -486,12 +487,13 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
 	return 0;
 }
 
-static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
+static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
+			       struct netlink_ext_ack *extack)
 {
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_u32_offload cls_u32 = {};
 
-	tc_cls_common_offload_init(&cls_u32.common, tp);
+	tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
 	cls_u32.command = TC_CLSU32_DELETE_HNODE;
 	cls_u32.hnode.divisor = h->divisor;
 	cls_u32.hnode.handle = h->handle;
@@ -501,7 +503,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
 }
 
 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
-				u32 flags)
+				u32 flags, struct netlink_ext_ack *extack)
 {
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_u32_offload cls_u32 = {};
@@ -509,7 +511,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 	bool offloaded = false;
 	int err;
 
-	tc_cls_common_offload_init(&cls_u32.common, tp);
+	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
 	cls_u32.command = TC_CLSU32_NEW_HNODE;
 	cls_u32.hnode.divisor = h->divisor;
 	cls_u32.hnode.handle = h->handle;
@@ -517,7 +519,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
 	if (err < 0) {
-		u32_clear_hw_hnode(tp, h);
+		u32_clear_hw_hnode(tp, h, NULL);
 		return err;
 	} else if (err > 0) {
 		offloaded = true;
@@ -529,27 +531,29 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 	return 0;
 }
 
-static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
+static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+				struct netlink_ext_ack *extack)
 {
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_u32_offload cls_u32 = {};
 
-	tc_cls_common_offload_init(&cls_u32.common, tp);
+	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
 	cls_u32.command = TC_CLSU32_DELETE_KNODE;
-	cls_u32.knode.handle = handle;
+	cls_u32.knode.handle = n->handle;
 
 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
+	tcf_block_offload_dec(block, &n->flags);
 }
 
 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
-				u32 flags)
+				u32 flags, struct netlink_ext_ack *extack)
 {
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_u32_offload cls_u32 = {};
 	bool skip_sw = tc_skip_sw(flags);
 	int err;
 
-	tc_cls_common_offload_init(&cls_u32.common, tp);
+	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
 	cls_u32.command = TC_CLSU32_REPLACE_KNODE;
 	cls_u32.knode.handle = n->handle;
 	cls_u32.knode.fshift = n->fshift;
@@ -567,10 +571,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
 	if (err < 0) {
-		u32_remove_hw_knode(tp, n->handle);
+		u32_remove_hw_knode(tp, n, NULL);
 		return err;
 	} else if (err > 0) {
-		n->flags |= TCA_CLS_FLAGS_IN_HW;
+		tcf_block_offload_inc(block, &n->flags);
 	}
 
 	if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
@@ -579,7 +583,8 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 	return 0;
 }
 
-static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
+static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
+			    struct netlink_ext_ack *extack)
 {
 	struct tc_u_knode *n;
 	unsigned int h;
@@ -589,7 +594,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 			RCU_INIT_POINTER(ht->ht[h],
 					 rtnl_dereference(n->next));
 			tcf_unbind_filter(tp, &n->res);
-			u32_remove_hw_knode(tp, n->handle);
+			u32_remove_hw_knode(tp, n, extack);
 			idr_remove_ext(&ht->handle_idr, n->handle);
 			if (tcf_exts_get_net(&n->exts))
 				call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
@@ -599,7 +604,8 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 	}
 }
 
-static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
+static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
+			     struct netlink_ext_ack *extack)
 {
 	struct tc_u_common *tp_c = tp->data;
 	struct tc_u_hnode __rcu **hn;
@@ -607,14 +613,14 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 
 	WARN_ON(ht->refcnt);
 
-	u32_clear_hnode(tp, ht);
+	u32_clear_hnode(tp, ht, extack);
 
 	hn = &tp_c->hlist;
 	for (phn = rtnl_dereference(*hn);
 	     phn;
 	     hn = &phn->next, phn = rtnl_dereference(*hn)) {
 		if (phn == ht) {
-			u32_clear_hw_hnode(tp, ht);
+			u32_clear_hw_hnode(tp, ht, extack);
 			idr_destroy(&ht->handle_idr);
 			idr_remove_ext(&tp_c->handle_idr, ht->handle);
 			RCU_INIT_POINTER(*hn, ht->next);
@@ -637,7 +643,7 @@ static bool ht_empty(struct tc_u_hnode *ht)
 	return true;
 }
 
-static void u32_destroy(struct tcf_proto *tp)
+static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
 	struct tc_u_common *tp_c = tp->data;
 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
@@ -645,7 +651,7 @@ static void u32_destroy(struct tcf_proto *tp)
 	WARN_ON(root_ht == NULL);
 
 	if (root_ht && --root_ht->refcnt == 0)
-		u32_destroy_hnode(tp, root_ht);
+		u32_destroy_hnode(tp, root_ht, extack);
 
 	if (--tp_c->refcnt == 0) {
 		struct tc_u_hnode *ht;
@@ -656,7 +662,7 @@ static void u32_destroy(struct tcf_proto *tp)
 		     ht;
 		     ht = rtnl_dereference(ht->next)) {
 			ht->refcnt--;
-			u32_clear_hnode(tp, ht);
+			u32_clear_hnode(tp, ht, extack);
 		}
 
 		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
@@ -671,7 +677,8 @@ static void u32_destroy(struct tcf_proto *tp)
 	tp->data = NULL;
 }
 
-static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
+		      struct netlink_ext_ack *extack)
 {
 	struct tc_u_hnode *ht = arg;
 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
@@ -682,18 +689,21 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
 		goto out;
 
 	if (TC_U32_KEY(ht->handle)) {
-		u32_remove_hw_knode(tp, ht->handle);
+		u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
 		ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
 		goto out;
 	}
 
-	if (root_ht == ht)
+	if (root_ht == ht) {
+		NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
 		return -EINVAL;
+	}
 
 	if (ht->refcnt == 1) {
 		ht->refcnt--;
-		u32_destroy_hnode(tp, ht);
+		u32_destroy_hnode(tp, ht, extack);
 	} else {
+		NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
 		return -EBUSY;
 	}
 
@@ -764,11 +774,12 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
 			 unsigned long base, struct tc_u_hnode *ht,
 			 struct tc_u_knode *n, struct nlattr **tb,
-			 struct nlattr *est, bool ovr)
+			 struct nlattr *est, bool ovr,
+			 struct netlink_ext_ack *extack)
 {
 	int err;
 
-	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr);
+	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
 	if (err < 0)
 		return err;
 
@@ -776,14 +787,18 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
 		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
 		struct tc_u_hnode *ht_down = NULL, *ht_old;
 
-		if (TC_U32_KEY(handle))
+		if (TC_U32_KEY(handle)) {
+			NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
 			return -EINVAL;
+		}
 
 		if (handle) {
 			ht_down = u32_lookup_ht(ht->tp_c, handle);
 
-			if (ht_down == NULL)
+			if (!ht_down) {
+				NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
 				return -EINVAL;
+			}
 			ht_down->refcnt++;
 		}
 
@@ -801,7 +816,7 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
 #ifdef CONFIG_NET_CLS_IND
 	if (tb[TCA_U32_INDEV]) {
 		int ret;
-		ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
+		ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
 		if (ret < 0)
 			return -EINVAL;
 		n->ifindex = ret;
@@ -892,7 +907,8 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
 
 static int u32_change(struct net *net, struct sk_buff *in_skb,
 		      struct tcf_proto *tp, unsigned long base, u32 handle,
-		      struct nlattr **tca, void **arg, bool ovr)
+		      struct nlattr **tca, void **arg, bool ovr,
+		      struct netlink_ext_ack *extack)
 {
 	struct tc_u_common *tp_c = tp->data;
 	struct tc_u_hnode *ht;
@@ -906,28 +922,40 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 	size_t size;
 #endif
 
-	if (opt == NULL)
-		return handle ? -EINVAL : 0;
+	if (!opt) {
+		if (handle) {
+			NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
 
-	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, NULL);
+	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack);
 	if (err < 0)
 		return err;
 
 	if (tb[TCA_U32_FLAGS]) {
 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
-		if (!tc_flags_valid(flags))
+		if (!tc_flags_valid(flags)) {
+			NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
 			return -EINVAL;
+		}
 	}
 
 	n = *arg;
 	if (n) {
 		struct tc_u_knode *new;
 
-		if (TC_U32_KEY(n->handle) == 0)
+		if (TC_U32_KEY(n->handle) == 0) {
+			NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
 			return -EINVAL;
+		}
 
-		if (n->flags != flags)
+		if (n->flags != flags) {
+			NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
 			return -EINVAL;
+		}
 
 		new = u32_init_knode(tp, n);
 		if (!new)
@@ -935,14 +963,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 
 		err = u32_set_parms(net, tp, base,
 				    rtnl_dereference(n->ht_up), new, tb,
-				    tca[TCA_RATE], ovr);
+				    tca[TCA_RATE], ovr, extack);
 
 		if (err) {
 			u32_destroy_key(tp, new, false);
 			return err;
 		}
 
-		err = u32_replace_hw_knode(tp, new, flags);
+		err = u32_replace_hw_knode(tp, new, flags, extack);
 		if (err) {
 			u32_destroy_key(tp, new, false);
 			return err;
@@ -961,10 +989,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 	if (tb[TCA_U32_DIVISOR]) {
 		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 
-		if (--divisor > 0x100)
+		if (--divisor > 0x100) {
+			NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
 			return -EINVAL;
-		if (TC_U32_KEY(handle))
+		}
+		if (TC_U32_KEY(handle)) {
+			NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
 			return -EINVAL;
+		}
 		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
 		if (ht == NULL)
 			return -ENOBUFS;
@@ -988,8 +1020,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 		ht->handle = handle;
 		ht->prio = tp->prio;
 		idr_init(&ht->handle_idr);
+		ht->flags = flags;
 
-		err = u32_replace_hw_hnode(tp, ht, flags);
+		err = u32_replace_hw_hnode(tp, ht, flags, extack);
 		if (err) {
 			idr_remove_ext(&tp_c->handle_idr, handle);
 			kfree(ht);
@@ -1010,20 +1043,26 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 			htid = ht->handle;
 		} else {
 			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
-			if (ht == NULL)
+			if (!ht) {
+				NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
 				return -EINVAL;
+			}
 		}
 	} else {
 		ht = rtnl_dereference(tp->root);
 		htid = ht->handle;
 	}
 
-	if (ht->divisor < TC_U32_HASH(htid))
+	if (ht->divisor < TC_U32_HASH(htid)) {
+		NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
 		return -EINVAL;
+	}
 
 	if (handle) {
-		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
+		if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
+			NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
 			return -EINVAL;
+		}
 		handle = htid | TC_U32_NODE(handle);
 		err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
 				    handle, handle + 1,
@@ -1034,6 +1073,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 		handle = gen_new_kid(ht, htid);
 
 	if (tb[TCA_U32_SEL] == NULL) {
+		NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
 		err = -EINVAL;
 		goto erridr;
 	}
@@ -1082,12 +1122,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 	}
 #endif
 
-	err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
+	err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
+			    extack);
 	if (err == 0) {
 		struct tc_u_knode __rcu **ins;
 		struct tc_u_knode *pins;
 
-		err = u32_replace_hw_knode(tp, n, flags);
+		err = u32_replace_hw_knode(tp, n, flags, extack);
 		if (err)
 			goto errhw;
 
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 52529b7f8d96..d512f49ee83c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -393,13 +393,16 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-					struct nlattr *tab)
+					struct nlattr *tab,
+					struct netlink_ext_ack *extack)
 {
 	struct qdisc_rate_table *rtab;
 
 	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
-	    nla_len(tab) != TC_RTAB_SIZE)
+	    nla_len(tab) != TC_RTAB_SIZE) {
+		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
 		return NULL;
+	}
 
 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
@@ -418,6 +421,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 			r->linklayer = __detect_linklayer(r, rtab->data);
 		rtab->next = qdisc_rtab_list;
 		qdisc_rtab_list = rtab;
+	} else {
+		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
 	}
 	return rtab;
 }
@@ -449,7 +454,8 @@ static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 };
 
-static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
+					       struct netlink_ext_ack *extack)
 {
 	struct nlattr *tb[TCA_STAB_MAX + 1];
 	struct qdisc_size_table *stab;
@@ -458,23 +464,29 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
 	u16 *tab = NULL;
 	int err;
 
-	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
+	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
 	if (err < 0)
 		return ERR_PTR(err);
-	if (!tb[TCA_STAB_BASE])
+	if (!tb[TCA_STAB_BASE]) {
+		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
 		return ERR_PTR(-EINVAL);
+	}
 
 	s = nla_data(tb[TCA_STAB_BASE]);
 
 	if (s->tsize > 0) {
-		if (!tb[TCA_STAB_DATA])
+		if (!tb[TCA_STAB_DATA]) {
+			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
 			return ERR_PTR(-EINVAL);
+		}
 		tab = nla_data(tb[TCA_STAB_DATA]);
 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 	}
 
-	if (tsize != s->tsize || (!tab && tsize > 0))
+	if (tsize != s->tsize || (!tab && tsize > 0)) {
+		NL_SET_ERR_MSG(extack, "Invalid size of size table");
 		return ERR_PTR(-EINVAL);
+	}
 
 	list_for_each_entry(stab, &qdisc_stab_list, list) {
 		if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -669,7 +681,7 @@ int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 	unsigned int size = 4;
 
 	clhash->hash = qdisc_class_hash_alloc(size);
-	if (clhash->hash == NULL)
+	if (!clhash->hash)
 		return -ENOMEM;
 	clhash->hashsize  = size;
 	clhash->hashmask  = size - 1;
@@ -779,6 +791,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 	unsigned char *b = skb_tail_pointer(skb);
 	struct gnet_dump d;
 	struct qdisc_size_table *stab;
+	u32 block_index;
 	__u32 qlen;
 
 	cond_resched();
@@ -795,11 +808,23 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 	tcm->tcm_info = refcount_read(&q->refcnt);
 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
 		goto nla_put_failure;
-	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
-		goto nla_put_failure;
+	if (q->ops->ingress_block_get) {
+		block_index = q->ops->ingress_block_get(q);
+		if (block_index &&
+		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
+			goto nla_put_failure;
+	}
+	if (q->ops->egress_block_get) {
+		block_index = q->ops->egress_block_get(q);
+		if (block_index &&
+		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
+			goto nla_put_failure;
+	}
 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 		goto nla_put_failure;
-	qlen = q->q.qlen;
+	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
+		goto nla_put_failure;
+	qlen = qdisc_qlen_sum(q);
 
 	stab = rtnl_dereference(q->stab);
 	if (stab && qdisc_dump_stab(skb, stab) < 0)
@@ -898,7 +923,8 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 
 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
-		       struct Qdisc *new, struct Qdisc *old)
+		       struct Qdisc *new, struct Qdisc *old,
+		       struct netlink_ext_ack *extack)
 {
 	struct Qdisc *q = old;
 	struct net *net = dev_net(dev);
@@ -913,8 +939,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 		    (new && new->flags & TCQ_F_INGRESS)) {
 			num_q = 1;
 			ingress = 1;
-			if (!dev_ingress_queue(dev))
+			if (!dev_ingress_queue(dev)) {
+				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
 				return -ENOENT;
+			}
 		}
 
 		if (dev->flags & IFF_UP)
@@ -956,14 +984,22 @@ skip:
 	} else {
 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 
+		/* Only support running class lockless if parent is lockless */
+		if (new && (new->flags & TCQ_F_NOLOCK) &&
+		    parent && !(parent->flags & TCQ_F_NOLOCK))
+			new->flags &= ~TCQ_F_NOLOCK;
+
 		err = -EOPNOTSUPP;
 		if (cops && cops->graft) {
 			unsigned long cl = cops->find(parent, classid);
 
-			if (cl)
-				err = cops->graft(parent, cl, new, &old);
-			else
+			if (cl) {
+				err = cops->graft(parent, cl, new, &old,
+						  extack);
+			} else {
+				NL_SET_ERR_MSG(extack, "Specified class not found");
 				err = -ENOENT;
+			}
 		}
 		if (!err)
 			notify_and_destroy(net, skb, n, classid, old, new);
@@ -971,6 +1007,40 @@ skip:
 	return err;
 }
 
+static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
+				   struct netlink_ext_ack *extack)
+{
+	u32 block_index;
+
+	if (tca[TCA_INGRESS_BLOCK]) {
+		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
+
+		if (!block_index) {
+			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
+			return -EINVAL;
+		}
+		if (!sch->ops->ingress_block_set) {
+			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
+			return -EOPNOTSUPP;
+		}
+		sch->ops->ingress_block_set(sch, block_index);
+	}
+	if (tca[TCA_EGRESS_BLOCK]) {
+		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
+
+		if (!block_index) {
+			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
+			return -EINVAL;
+		}
+		if (!sch->ops->egress_block_set) {
+			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
+			return -EOPNOTSUPP;
+		}
+		sch->ops->egress_block_set(sch, block_index);
+	}
+	return 0;
+}
+
 /* lockdep annotation is needed for ingress; egress gets it only for name */
 static struct lock_class_key qdisc_tx_lock;
 static struct lock_class_key qdisc_rx_lock;
@@ -984,7 +1054,8 @@ static struct lock_class_key qdisc_rx_lock;
 static struct Qdisc *qdisc_create(struct net_device *dev,
 				  struct netdev_queue *dev_queue,
 				  struct Qdisc *p, u32 parent, u32 handle,
-				  struct nlattr **tca, int *errp)
+				  struct nlattr **tca, int *errp,
+				  struct netlink_ext_ack *extack)
 {
 	int err;
 	struct nlattr *kind = tca[TCA_KIND];
@@ -1022,10 +1093,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 #endif
 
 	err = -ENOENT;
-	if (ops == NULL)
+	if (!ops) {
+		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
 		goto err_out;
+	}
 
-	sch = qdisc_alloc(dev_queue, ops);
+	sch = qdisc_alloc(dev_queue, ops, extack);
 	if (IS_ERR(sch)) {
 		err = PTR_ERR(sch);
 		goto err_out2;
@@ -1062,43 +1135,57 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
 	}
 
-	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
-		if (tca[TCA_STAB]) {
-			stab = qdisc_get_stab(tca[TCA_STAB]);
-			if (IS_ERR(stab)) {
-				err = PTR_ERR(stab);
-				goto err_out4;
-			}
-			rcu_assign_pointer(sch->stab, stab);
-		}
-		if (tca[TCA_RATE]) {
-			seqcount_t *running;
-
-			err = -EOPNOTSUPP;
-			if (sch->flags & TCQ_F_MQROOT)
-				goto err_out4;
-
-			if ((sch->parent != TC_H_ROOT) &&
-			    !(sch->flags & TCQ_F_INGRESS) &&
-			    (!p || !(p->flags & TCQ_F_MQROOT)))
-				running = qdisc_root_sleeping_running(sch);
-			else
-				running = &sch->running;
-
-			err = gen_new_estimator(&sch->bstats,
-						sch->cpu_bstats,
-						&sch->rate_est,
-						NULL,
-						running,
-						tca[TCA_RATE]);
-			if (err)
-				goto err_out4;
+	err = qdisc_block_indexes_set(sch, tca, extack);
+	if (err)
+		goto err_out3;
+
+	if (ops->init) {
+		err = ops->init(sch, tca[TCA_OPTIONS], extack);
+		if (err != 0)
+			goto err_out5;
+	}
+
+	if (tca[TCA_STAB]) {
+		stab = qdisc_get_stab(tca[TCA_STAB], extack);
+		if (IS_ERR(stab)) {
+			err = PTR_ERR(stab);
+			goto err_out4;
 		}
+		rcu_assign_pointer(sch->stab, stab);
+	}
+	if (tca[TCA_RATE]) {
+		seqcount_t *running;
 
-		qdisc_hash_add(sch, false);
+		err = -EOPNOTSUPP;
+		if (sch->flags & TCQ_F_MQROOT) {
+			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
+			goto err_out4;
+		}
 
-		return sch;
+		if (sch->parent != TC_H_ROOT &&
+		    !(sch->flags & TCQ_F_INGRESS) &&
+		    (!p || !(p->flags & TCQ_F_MQROOT)))
+			running = qdisc_root_sleeping_running(sch);
+		else
+			running = &sch->running;
+
+		err = gen_new_estimator(&sch->bstats,
+					sch->cpu_bstats,
+					&sch->rate_est,
+					NULL,
+					running,
+					tca[TCA_RATE]);
+		if (err) {
+			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
+			goto err_out4;
+		}
 	}
+
+	qdisc_hash_add(sch, false);
+
+	return sch;
+
+err_out5:
 	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
 	if (ops->destroy)
 		ops->destroy(sch);
@@ -1122,21 +1209,28 @@ err_out4:
 	goto err_out3;
 }
 
-static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
+static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
+			struct netlink_ext_ack *extack)
 {
 	struct qdisc_size_table *ostab, *stab = NULL;
 	int err = 0;
 
 	if (tca[TCA_OPTIONS]) {
-		if (sch->ops->change == NULL)
+		if (!sch->ops->change) {
+			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
 			return -EINVAL;
-		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
+		}
+		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
+			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+			return -EOPNOTSUPP;
+		}
+		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
 		if (err)
 			return err;
 	}
 
 	if (tca[TCA_STAB]) {
-		stab = qdisc_get_stab(tca[TCA_STAB]);
+		stab = qdisc_get_stab(tca[TCA_STAB], extack);
 		if (IS_ERR(stab))
 			return PTR_ERR(stab);
 	}
@@ -1234,8 +1328,10 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 		if (clid != TC_H_ROOT) {
 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
-				if (!p)
+				if (!p) {
+					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
 					return -ENOENT;
+				}
 				q = qdisc_leaf(p, clid);
 			} else if (dev_ingress_queue(dev)) {
 				q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1243,26 +1339,38 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 		} else {
 			q = dev->qdisc;
 		}
-		if (!q)
+		if (!q) {
+			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
 			return -ENOENT;
+		}
 
-		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
+		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
+			NL_SET_ERR_MSG(extack, "Invalid handle");
 			return -EINVAL;
+		}
 	} else {
 		q = qdisc_lookup(dev, tcm->tcm_handle);
-		if (!q)
+		if (!q) {
+			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
 			return -ENOENT;
+		}
 	}
 
-	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
 		return -EINVAL;
+	}
 
 	if (n->nlmsg_type == RTM_DELQDISC) {
-		if (!clid)
+		if (!clid) {
+			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
 			return -EINVAL;
-		if (q->handle == 0)
+		}
+		if (q->handle == 0) {
+			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
 			return -ENOENT;
-		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+		}
+		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
 		if (err != 0)
 			return err;
 	} else {
@@ -1308,8 +1416,10 @@ replay:
 		if (clid != TC_H_ROOT) {
 			if (clid != TC_H_INGRESS) {
 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
-				if (!p)
+				if (!p) {
+					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
 					return -ENOENT;
+				}
 				q = qdisc_leaf(p, clid);
 			} else if (dev_ingress_queue_create(dev)) {
 				q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1324,20 +1434,31 @@ replay:
 
 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
 			if (tcm->tcm_handle) {
-				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
+				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
+					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
 					return -EEXIST;
-				if (TC_H_MIN(tcm->tcm_handle))
+				}
+				if (TC_H_MIN(tcm->tcm_handle)) {
+					NL_SET_ERR_MSG(extack, "Invalid minor handle");
 					return -EINVAL;
+				}
 				q = qdisc_lookup(dev, tcm->tcm_handle);
 				if (!q)
 					goto create_n_graft;
-				if (n->nlmsg_flags & NLM_F_EXCL)
+				if (n->nlmsg_flags & NLM_F_EXCL) {
+					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
 					return -EEXIST;
-				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+				}
+				if (tca[TCA_KIND] &&
+				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
 					return -EINVAL;
+				}
 				if (q == p ||
-				    (p && check_loop(q, p, 0)))
+				    (p && check_loop(q, p, 0))) {
+					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
 					return -ELOOP;
+				}
 				qdisc_refcount_inc(q);
 				goto graft;
 			} else {
@@ -1372,33 +1493,45 @@ replay:
 			}
 		}
 	} else {
-		if (!tcm->tcm_handle)
+		if (!tcm->tcm_handle) {
+			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
 			return -EINVAL;
+		}
 		q = qdisc_lookup(dev, tcm->tcm_handle);
 	}
 
 	/* Change qdisc parameters */
-	if (q == NULL)
+	if (!q) {
+		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
 		return -ENOENT;
-	if (n->nlmsg_flags & NLM_F_EXCL)
+	}
+	if (n->nlmsg_flags & NLM_F_EXCL) {
+		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
 		return -EEXIST;
-	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+	}
+	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
 		return -EINVAL;
-	err = qdisc_change(q, tca);
+	}
+	err = qdisc_change(q, tca, extack);
 	if (err == 0)
 		qdisc_notify(net, skb, n, clid, NULL, q);
 	return err;
 
 create_n_graft:
-	if (!(n->nlmsg_flags & NLM_F_CREATE))
+	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
 		return -ENOENT;
+	}
 	if (clid == TC_H_INGRESS) {
-		if (dev_ingress_queue(dev))
+		if (dev_ingress_queue(dev)) {
 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
 					 tcm->tcm_parent, tcm->tcm_parent,
-					 tca, &err);
-		else
+					 tca, &err, extack);
+		} else {
+			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
 			err = -ENOENT;
+		}
 	} else {
 		struct netdev_queue *dev_queue;
 
@@ -1411,7 +1544,7 @@ create_n_graft:
 
 		q = qdisc_create(dev, dev_queue, p,
 				 tcm->tcm_parent, tcm->tcm_handle,
-				 tca, &err);
+				 tca, &err, extack);
 	}
 	if (q == NULL) {
 		if (err == -EAGAIN)
@@ -1420,7 +1553,7 @@ create_n_graft:
 	}
 
 graft:
-	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
 	if (err) {
 		if (q)
 			qdisc_destroy(q);
@@ -1672,7 +1805,7 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
 	cl = cops->find(q, portid);
 	if (!cl)
 		return;
-	block = cops->tcf_block(q, cl);
+	block = cops->tcf_block(q, cl, NULL);
 	if (!block)
 		return;
 	list_for_each_entry(chain, &block->chain_list, list) {
@@ -1816,10 +1949,15 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
 		}
 	}
 
+	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
+		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
+		return -EOPNOTSUPP;
+	}
+
 	new_cl = cl;
 	err = -EOPNOTSUPP;
 	if (cops->change)
-		err = cops->change(q, clid, portid, tca, &new_cl);
+		err = cops->change(q, clid, portid, tca, &new_cl, extack);
 	if (err == 0) {
 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
 		/* We just create a new class, need to do reverse binding. */
@@ -1955,7 +2093,6 @@ static int psched_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations psched_fops = {
-	.owner = THIS_MODULE,
 	.open = psched_open,
 	.read  = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2dbd249c0b2f..cd49afca9617 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -82,7 +82,8 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
 }
 
 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
-			struct Qdisc *new, struct Qdisc **old)
+			struct Qdisc *new, struct Qdisc **old,
+			struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
@@ -191,7 +192,8 @@ static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
 };
 
 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
-			 struct nlattr **tca, unsigned long *arg)
+			 struct nlattr **tca, unsigned long *arg,
+			 struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
@@ -281,13 +283,15 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
 		goto err_out;
 	}
 
-	error = tcf_block_get(&flow->block, &flow->filter_list, sch);
+	error = tcf_block_get(&flow->block, &flow->filter_list, sch,
+			      extack);
 	if (error) {
 		kfree(flow);
 		goto err_out;
 	}
 
-	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+				    extack);
 	if (!flow->q)
 		flow->q = &noop_qdisc;
 	pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -356,7 +360,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 	}
 }
 
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
@@ -531,7 +536,8 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
 	return p->link.q->ops->peek(p->link.q);
 }
 
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
+static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	int err;
@@ -541,12 +547,13 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
 	INIT_LIST_HEAD(&p->link.list);
 	list_add(&p->link.list, &p->flows);
 	p->link.q = qdisc_create_dflt(sch->dev_queue,
-				      &pfifo_qdisc_ops, sch->handle);
+				      &pfifo_qdisc_ops, sch->handle, extack);
 	if (!p->link.q)
 		p->link.q = &noop_qdisc;
 	pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
 
-	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch);
+	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
+			    extack);
 	if (err)
 		return err;
 
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 525eb3a6d625..f42025d53cfe 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1132,7 +1132,8 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
 	[TCA_CBQ_POLICE]	= { .len = sizeof(struct tc_cbq_police) },
 };
 
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_CBQ_MAX + 1];
@@ -1143,22 +1144,27 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
 	hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 	q->delay_timer.function = cbq_undelay;
 
-	if (!opt)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
 		return -EINVAL;
+	}
 
-	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
+	if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
+		NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
 		return -EINVAL;
+	}
 
 	r = nla_data(tb[TCA_CBQ_RATE]);
 
-	if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
+	q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
+	if (!q->link.R_tab)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
 	if (err)
 		goto put_rtab;
 
@@ -1170,7 +1176,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
 	q->link.common.classid = sch->handle;
 	q->link.qdisc = sch;
 	q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-				      sch->handle);
+				      sch->handle, NULL);
 	if (!q->link.q)
 		q->link.q = &noop_qdisc;
 	else
@@ -1369,13 +1375,13 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 }
 
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev_queue,
-					&pfifo_qdisc_ops, cl->common.classid);
+		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					cl->common.classid, extack);
 		if (new == NULL)
 			return -ENOBUFS;
 	}
@@ -1450,7 +1456,7 @@ static void cbq_destroy(struct Qdisc *sch)
 
 static int
 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
-		 unsigned long *arg)
+		 unsigned long *arg, struct netlink_ext_ack *extack)
 {
 	int err;
 	struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1460,29 +1466,37 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	struct cbq_class *parent;
 	struct qdisc_rate_table *rtab = NULL;
 
-	if (opt == NULL)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
 		return -EINVAL;
+	}
 
-	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
+	if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
+		NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
 		return -EOPNOTSUPP;
+	}
 
 	if (cl) {
 		/* Check parent */
 		if (parentid) {
 			if (cl->tparent &&
-			    cl->tparent->common.classid != parentid)
+			    cl->tparent->common.classid != parentid) {
+				NL_SET_ERR_MSG(extack, "Invalid parent id");
 				return -EINVAL;
-			if (!cl->tparent && parentid != TC_H_ROOT)
+			}
+			if (!cl->tparent && parentid != TC_H_ROOT) {
+				NL_SET_ERR_MSG(extack, "Parent must be root");
 				return -EINVAL;
+			}
 		}
 
 		if (tb[TCA_CBQ_RATE]) {
 			rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
-					      tb[TCA_CBQ_RTAB]);
+					      tb[TCA_CBQ_RTAB], extack);
 			if (rtab == NULL)
 				return -EINVAL;
 		}
@@ -1494,6 +1508,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 						    qdisc_root_sleeping_running(sch),
 						    tca[TCA_RATE]);
 			if (err) {
+				NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
 				qdisc_put_rtab(rtab);
 				return err;
 			}
@@ -1532,19 +1547,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	if (parentid == TC_H_ROOT)
 		return -EINVAL;
 
-	if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
-	    tb[TCA_CBQ_LSSOPT] == NULL)
+	if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
+		NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
 		return -EINVAL;
+	}
 
-	rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+	rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
+			      extack);
 	if (rtab == NULL)
 		return -EINVAL;
 
 	if (classid) {
 		err = -EINVAL;
 		if (TC_H_MAJ(classid ^ sch->handle) ||
-		    cbq_class_lookup(q, classid))
+		    cbq_class_lookup(q, classid)) {
+			NL_SET_ERR_MSG(extack, "Specified class not found");
 			goto failure;
+		}
 	} else {
 		int i;
 		classid = TC_H_MAKE(sch->handle, 0x8000);
@@ -1556,8 +1575,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 				break;
 		}
 		err = -ENOSR;
-		if (i >= 0x8000)
+		if (i >= 0x8000) {
+			NL_SET_ERR_MSG(extack, "Unable to generate classid");
 			goto failure;
+		}
 		classid = classid|q->hgenerator;
 	}
 
@@ -1565,8 +1586,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	if (parentid) {
 		parent = cbq_class_lookup(q, parentid);
 		err = -EINVAL;
-		if (parent == NULL)
+		if (!parent) {
+			NL_SET_ERR_MSG(extack, "Failed to find parentid");
 			goto failure;
+		}
 	}
 
 	err = -ENOBUFS;
@@ -1574,7 +1597,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	if (cl == NULL)
 		goto failure;
 
-	err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 	if (err) {
 		kfree(cl);
 		return err;
@@ -1586,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 					qdisc_root_sleeping_running(sch),
 					tca[TCA_RATE]);
 		if (err) {
+			NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
 			tcf_block_put(cl->block);
 			kfree(cl);
 			goto failure;
@@ -1594,7 +1618,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
 	cl->R_tab = rtab;
 	rtab = NULL;
-	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+				  NULL);
 	if (!cl->q)
 		cl->q = &noop_qdisc;
 	else
@@ -1678,7 +1703,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 	return 0;
 }
 
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
+				       struct netlink_ext_ack *extack)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct cbq_class *cl = (struct cbq_class *)arg;
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 7a72980c1509..cdd96b9a27bc 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -219,14 +219,17 @@ static void cbs_disable_offload(struct net_device *dev,
 }
 
 static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-			      const struct tc_cbs_qopt *opt)
+			      const struct tc_cbs_qopt *opt,
+			      struct netlink_ext_ack *extack)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
 	struct tc_cbs_qopt_offload cbs = { };
 	int err;
 
-	if (!ops->ndo_setup_tc)
+	if (!ops->ndo_setup_tc) {
+		NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
 		return -EOPNOTSUPP;
+	}
 
 	cbs.queue = q->queue;
 
@@ -237,8 +240,10 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
 	cbs.sendslope = opt->sendslope;
 
 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
-	if (err < 0)
+	if (err < 0) {
+		NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
 		return err;
+	}
 
 	q->enqueue = cbs_enqueue_offload;
 	q->dequeue = cbs_dequeue_offload;
@@ -246,7 +251,8 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
 	return 0;
 }
 
-static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct cbs_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
@@ -254,12 +260,14 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 	struct tc_cbs_qopt *qopt;
 	int err;
 
-	err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, NULL);
+	err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (!tb[TCA_CBS_PARMS])
+	if (!tb[TCA_CBS_PARMS]) {
+		NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
 		return -EINVAL;
+	}
 
 	qopt = nla_data(tb[TCA_CBS_PARMS]);
 
@@ -276,7 +284,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 
 		cbs_disable_offload(dev, q);
 	} else {
-		err = cbs_enable_offload(dev, q, qopt);
+		err = cbs_enable_offload(dev, q, qopt, extack);
 		if (err < 0)
 			return err;
 	}
@@ -291,13 +299,16 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct cbs_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
 
-	if (!opt)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "Missing CBS qdisc options  which are mandatory");
 		return -EINVAL;
+	}
 
 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
 
@@ -306,7 +317,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	return cbs_change(sch, opt);
+	return cbs_change(sch, opt, extack);
 }
 
 static void cbs_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 531250fceb9e..eafc0d17d174 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -344,7 +344,8 @@ static void choke_free(void *addr)
 	kvfree(addr);
 }
 
-static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct choke_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_CHOKE_MAX + 1];
@@ -431,9 +432,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+static int choke_init(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
-	return choke_change(sch, opt);
+	return choke_change(sch, opt, extack);
 }
 
 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index c518a1efcb9d..17cd81f84b5d 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -130,7 +130,8 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
 	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 };
 
-static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct codel_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_CODEL_MAX + 1];
@@ -184,7 +185,8 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int codel_init(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct codel_sched_data *q = qdisc_priv(sch);
 
@@ -196,7 +198,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
 	q->params.mtu = psched_mtu(qdisc_dev(sch));
 
 	if (opt) {
-		int err = codel_change(sch, opt);
+		int err = codel_change(sch, opt, extack);
 
 		if (err)
 			return err;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 5bbcef3dcd8c..e0b0cf8a9939 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -64,7 +64,8 @@ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
 };
 
 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct nlattr **tca, unsigned long *arg)
+			    struct nlattr **tca, unsigned long *arg,
+			    struct netlink_ext_ack *extack)
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	struct drr_class *cl = (struct drr_class *)*arg;
@@ -73,17 +74,21 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	u32 quantum;
 	int err;
 
-	if (!opt)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
 		return -EINVAL;
+	}
 
-	err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
+	err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
 	if (err < 0)
 		return err;
 
 	if (tb[TCA_DRR_QUANTUM]) {
 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
-		if (quantum == 0)
+		if (quantum == 0) {
+			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
 			return -EINVAL;
+		}
 	} else
 		quantum = psched_mtu(qdisc_dev(sch));
 
@@ -94,8 +99,10 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 						    NULL,
 						    qdisc_root_sleeping_running(sch),
 						    tca[TCA_RATE]);
-			if (err)
+			if (err) {
+				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
 				return err;
+			}
 		}
 
 		sch_tree_lock(sch);
@@ -113,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->common.classid = classid;
 	cl->quantum	   = quantum;
 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
-					       &pfifo_qdisc_ops, classid);
+					       &pfifo_qdisc_ops, classid,
+					       NULL);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 	else
@@ -125,6 +133,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 					    qdisc_root_sleeping_running(sch),
 					    tca[TCA_RATE]);
 		if (err) {
+			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
 			qdisc_destroy(cl->qdisc);
 			kfree(cl);
 			return err;
@@ -172,12 +181,15 @@ static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
 	return (unsigned long)drr_find_class(sch, classid);
 }
 
-static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct drr_sched *q = qdisc_priv(sch);
 
-	if (cl)
+	if (cl) {
+		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
 		return NULL;
+	}
 
 	return q->block;
 }
@@ -201,13 +213,14 @@ static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
-			   struct Qdisc *new, struct Qdisc **old)
+			   struct Qdisc *new, struct Qdisc **old,
+			   struct netlink_ext_ack *extack)
 {
 	struct drr_class *cl = (struct drr_class *)arg;
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev_queue,
-					&pfifo_qdisc_ops, cl->common.classid);
+		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					cl->common.classid, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -408,12 +421,13 @@ out:
 	return NULL;
 }
 
-static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+			  struct netlink_ext_ack *extack)
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	int err;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 	err = qdisc_class_hash_init(&q->clhash);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index fb4fb71c68cf..049714c57075 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -61,7 +61,8 @@ static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
 /* ------------------------- Class/flow operations ------------------------- */
 
 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
-			struct Qdisc *new, struct Qdisc **old)
+			struct Qdisc *new, struct Qdisc **old,
+			struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -70,7 +71,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
 
 	if (new == NULL) {
 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					sch->handle);
+					sch->handle, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -112,7 +113,8 @@ static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
 };
 
 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
-			 struct nlattr **tca, unsigned long *arg)
+			 struct nlattr **tca, unsigned long *arg,
+			 struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 	struct nlattr *opt = tca[TCA_OPTIONS];
@@ -184,7 +186,8 @@ ignore:
 	}
 }
 
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -330,7 +333,8 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
 	return p->q->ops->peek(p->q);
 }
 
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
+static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 	struct nlattr *tb[TCA_DSMARK_MAX + 1];
@@ -344,7 +348,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
 	if (!opt)
 		goto errout;
 
-	err = tcf_block_get(&p->block, &p->filter_list, sch);
+	err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -377,7 +381,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
 	p->default_index = default_index;
 	p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 
-	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
+	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
+				 NULL);
 	if (p->q == NULL)
 		p->q = &noop_qdisc;
 	else
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 1e37247656f8..24893d3b5d22 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -55,7 +55,8 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	return NET_XMIT_CN;
 }
 
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	bool bypass;
 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -157,7 +158,7 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
 		nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
 		((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
 
-		ret = q->ops->change(q, nla);
+		ret = q->ops->change(q, nla, NULL);
 		kfree(nla);
 	}
 	return ret;
@@ -165,12 +166,14 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
 EXPORT_SYMBOL(fifo_set_limit);
 
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-			       unsigned int limit)
+			       unsigned int limit,
+			       struct netlink_ext_ack *extack)
 {
 	struct Qdisc *q;
 	int err = -ENOMEM;
 
-	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
+	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
+			      extack);
 	if (q) {
 		err = fifo_set_limit(q, limit);
 		if (err < 0) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 263d16e3219e..a366e4c9413a 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -685,7 +685,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
 };
 
-static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_FQ_MAX + 1];
@@ -788,7 +789,8 @@ static void fq_destroy(struct Qdisc *sch)
 	qdisc_watchdog_cancel(&q->watchdog);
 }
 
-static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_init(struct Qdisc *sch, struct nlattr *opt,
+		   struct netlink_ext_ack *extack)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
 	int err;
@@ -811,7 +813,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
 	qdisc_watchdog_init(&q->watchdog, sch);
 
 	if (opt)
-		err = fq_change(sch, opt);
+		err = fq_change(sch, opt, extack);
 	else
 		err = fq_resize(sch, q->fq_trees_log);
 
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 0305d791ea94..22fa13cf5d8b 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -377,7 +377,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
 	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 };
 
-static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+			   struct netlink_ext_ack *extack)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
@@ -458,7 +459,8 @@ static void fq_codel_destroy(struct Qdisc *sch)
 	kvfree(q->flows);
 }
 
-static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
+			 struct netlink_ext_ack *extack)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 	int i;
@@ -477,12 +479,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
 	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
 	if (opt) {
-		int err = fq_codel_change(sch, opt);
+		int err = fq_codel_change(sch, opt, extack);
 		if (err)
 			return err;
 	}
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -595,7 +597,8 @@ static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
+					    struct netlink_ext_ack *extack)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cac003fddf3e..190570f21b20 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -26,11 +26,13 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/if_vlan.h>
+#include <linux/skb_array.h>
 #include <linux/if_macvlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
 #include <trace/events/qdisc.h>
+#include <net/xfrm.h>
 
 /* Qdisc to use by default */
 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -47,17 +49,115 @@ EXPORT_SYMBOL(default_qdisc_ops);
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+{
+	const struct netdev_queue *txq = q->dev_queue;
+	spinlock_t *lock = NULL;
+	struct sk_buff *skb;
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		lock = qdisc_lock(q);
+		spin_lock(lock);
+	}
+
+	skb = skb_peek(&q->skb_bad_txq);
+	if (skb) {
+		/* check the reason of requeuing without tx lock first */
+		txq = skb_get_tx_queue(txq->dev, skb);
+		if (!netif_xmit_frozen_or_stopped(txq)) {
+			skb = __skb_dequeue(&q->skb_bad_txq);
+			if (qdisc_is_percpu_stats(q)) {
+				qdisc_qstats_cpu_backlog_dec(q, skb);
+				qdisc_qstats_cpu_qlen_dec(q);
+			} else {
+				qdisc_qstats_backlog_dec(q, skb);
+				q->q.qlen--;
+			}
+		} else {
+			skb = NULL;
+		}
+	}
+
+	if (lock)
+		spin_unlock(lock);
+
+	return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
 {
-	q->gso_skb = skb;
-	q->qstats.requeues++;
-	qdisc_qstats_backlog_inc(q, skb);
-	q->q.qlen++;	/* it's still part of the queue */
+	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
+
+	if (unlikely(skb))
+		skb = __skb_dequeue_bad_txq(q);
+
+	return skb;
+}
+
+static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
+					     struct sk_buff *skb)
+{
+	spinlock_t *lock = NULL;
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		lock = qdisc_lock(q);
+		spin_lock(lock);
+	}
+
+	__skb_queue_tail(&q->skb_bad_txq, skb);
+
+	if (lock)
+		spin_unlock(lock);
+}
+
+static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+{
+	while (skb) {
+		struct sk_buff *next = skb->next;
+
+		__skb_queue_tail(&q->gso_skb, skb);
+		q->qstats.requeues++;
+		qdisc_qstats_backlog_inc(q, skb);
+		q->q.qlen++;	/* it's still part of the queue */
+
+		skb = next;
+	}
+	__netif_schedule(q);
+
+	return 0;
+}
+
+static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
+{
+	spinlock_t *lock = qdisc_lock(q);
+
+	spin_lock(lock);
+	while (skb) {
+		struct sk_buff *next = skb->next;
+
+		__skb_queue_tail(&q->gso_skb, skb);
+
+		qdisc_qstats_cpu_requeues_inc(q);
+		qdisc_qstats_cpu_backlog_inc(q, skb);
+		qdisc_qstats_cpu_qlen_inc(q);
+
+		skb = next;
+	}
+	spin_unlock(lock);
+
 	__netif_schedule(q);
 
 	return 0;
 }
 
+static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+{
+	if (q->flags & TCQ_F_NOLOCK)
+		return dev_requeue_skb_locked(skb, q);
+	else
+		return __dev_requeue_skb(skb, q);
+}
+
 static void try_bulk_dequeue_skb(struct Qdisc *q,
 				 struct sk_buff *skb,
 				 const struct netdev_queue *txq,
@@ -95,9 +195,15 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 		if (!nskb)
 			break;
 		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
-			q->skb_bad_txq = nskb;
-			qdisc_qstats_backlog_inc(q, nskb);
-			q->q.qlen++;
+			qdisc_enqueue_skb_bad_txq(q, nskb);
+
+			if (qdisc_is_percpu_stats(q)) {
+				qdisc_qstats_cpu_backlog_inc(q, nskb);
+				qdisc_qstats_cpu_qlen_inc(q);
+			} else {
+				qdisc_qstats_backlog_inc(q, nskb);
+				q->q.qlen++;
+			}
 			break;
 		}
 		skb->next = nskb;
@@ -113,40 +219,62 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 				   int *packets)
 {
-	struct sk_buff *skb = q->gso_skb;
 	const struct netdev_queue *txq = q->dev_queue;
+	struct sk_buff *skb = NULL;
 
 	*packets = 1;
-	if (unlikely(skb)) {
+	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
+		spinlock_t *lock = NULL;
+
+		if (q->flags & TCQ_F_NOLOCK) {
+			lock = qdisc_lock(q);
+			spin_lock(lock);
+		}
+
+		skb = skb_peek(&q->gso_skb);
+
+		/* skb may be null if another cpu pulls gso_skb off in between
+		 * empty check and lock.
+		 */
+		if (!skb) {
+			if (lock)
+				spin_unlock(lock);
+			goto validate;
+		}
+
 		/* skb in gso_skb were already validated */
 		*validate = false;
+		if (xfrm_offload(skb))
+			*validate = true;
 		/* check the reason of requeuing without tx lock first */
 		txq = skb_get_tx_queue(txq->dev, skb);
 		if (!netif_xmit_frozen_or_stopped(txq)) {
-			q->gso_skb = NULL;
-			qdisc_qstats_backlog_dec(q, skb);
-			q->q.qlen--;
-		} else
+			skb = __skb_dequeue(&q->gso_skb);
+			if (qdisc_is_percpu_stats(q)) {
+				qdisc_qstats_cpu_backlog_dec(q, skb);
+				qdisc_qstats_cpu_qlen_dec(q);
+			} else {
+				qdisc_qstats_backlog_dec(q, skb);
+				q->q.qlen--;
+			}
+		} else {
 			skb = NULL;
-		goto trace;
-	}
-	*validate = true;
-	skb = q->skb_bad_txq;
-	if (unlikely(skb)) {
-		/* check the reason of requeuing without tx lock first */
-		txq = skb_get_tx_queue(txq->dev, skb);
-		if (!netif_xmit_frozen_or_stopped(txq)) {
-			q->skb_bad_txq = NULL;
-			qdisc_qstats_backlog_dec(q, skb);
-			q->q.qlen--;
-			goto bulk;
 		}
-		skb = NULL;
+		if (lock)
+			spin_unlock(lock);
 		goto trace;
 	}
-	if (!(q->flags & TCQ_F_ONETXQUEUE) ||
-	    !netif_xmit_frozen_or_stopped(txq))
-		skb = q->dequeue(q);
+validate:
+	*validate = true;
+
+	if ((q->flags & TCQ_F_ONETXQUEUE) &&
+	    netif_xmit_frozen_or_stopped(txq))
+		return skb;
+
+	skb = qdisc_dequeue_skb_bad_txq(q);
+	if (unlikely(skb))
+		goto bulk;
+	skb = q->dequeue(q);
 	if (skb) {
 bulk:
 		if (qdisc_may_bulk(q))
@@ -165,21 +293,33 @@ trace:
  * only one CPU can execute this function.
  *
  * Returns to the caller:
- *				0  - queue is empty or throttled.
- *				>0 - queue is not empty.
+ *				false  - hardware queue frozen backoff
+ *				true   - feel free to send more pkts
  */
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-		    struct net_device *dev, struct netdev_queue *txq,
-		    spinlock_t *root_lock, bool validate)
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+		     struct net_device *dev, struct netdev_queue *txq,
+		     spinlock_t *root_lock, bool validate)
 {
 	int ret = NETDEV_TX_BUSY;
+	bool again = false;
 
 	/* And release qdisc */
-	spin_unlock(root_lock);
+	if (root_lock)
+		spin_unlock(root_lock);
 
 	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
 	if (validate)
-		skb = validate_xmit_skb_list(skb, dev);
+		skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+	if (unlikely(again)) {
+		if (root_lock)
+			spin_lock(root_lock);
+
+		dev_requeue_skb(skb, q);
+		return false;
+	}
+#endif
 
 	if (likely(skb)) {
 		HARD_TX_LOCK(dev, txq, smp_processor_id());
@@ -188,27 +328,28 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 
 		HARD_TX_UNLOCK(dev, txq);
 	} else {
-		spin_lock(root_lock);
-		return qdisc_qlen(q);
+		if (root_lock)
+			spin_lock(root_lock);
+		return true;
 	}
-	spin_lock(root_lock);
 
-	if (dev_xmit_complete(ret)) {
-		/* Driver sent out skb successfully or skb was consumed */
-		ret = qdisc_qlen(q);
-	} else {
+	if (root_lock)
+		spin_lock(root_lock);
+
+	if (!dev_xmit_complete(ret)) {
 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 		if (unlikely(ret != NETDEV_TX_BUSY))
 			net_warn_ratelimited("BUG %s code %d qlen %d\n",
 					     dev->name, ret, q->q.qlen);
 
-		ret = dev_requeue_skb(skb, q);
+		dev_requeue_skb(skb, q);
+		return false;
 	}
 
 	if (ret && netif_xmit_frozen_or_stopped(txq))
-		ret = 0;
+		return false;
 
-	return ret;
+	return true;
 }
 
 /*
@@ -230,20 +371,22 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
  *				>0 - queue is not empty.
  *
  */
-static inline int qdisc_restart(struct Qdisc *q, int *packets)
+static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 {
+	spinlock_t *root_lock = NULL;
 	struct netdev_queue *txq;
 	struct net_device *dev;
-	spinlock_t *root_lock;
 	struct sk_buff *skb;
 	bool validate;
 
 	/* Dequeue packet */
 	skb = dequeue_skb(q, &validate, packets);
 	if (unlikely(!skb))
-		return 0;
+		return false;
+
+	if (!(q->flags & TCQ_F_NOLOCK))
+		root_lock = qdisc_lock(q);
 
-	root_lock = qdisc_lock(q);
 	dev = qdisc_dev(q);
 	txq = skb_get_tx_queue(dev, skb);
 
@@ -267,8 +410,6 @@ void __qdisc_run(struct Qdisc *q)
 			break;
 		}
 	}
-
-	qdisc_run_end(q);
 }
 
 unsigned long dev_trans_start(struct net_device *dev)
@@ -369,7 +510,7 @@ void netif_carrier_on(struct net_device *dev)
 	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 		if (dev->reg_state == NETREG_UNINITIALIZED)
 			return;
-		atomic_inc(&dev->carrier_changes);
+		atomic_inc(&dev->carrier_up_count);
 		linkwatch_fire_event(dev);
 		if (netif_running(dev))
 			__netdev_watchdog_up(dev);
@@ -388,7 +529,7 @@ void netif_carrier_off(struct net_device *dev)
 	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 		if (dev->reg_state == NETREG_UNINITIALIZED)
 			return;
-		atomic_inc(&dev->carrier_changes);
+		atomic_inc(&dev->carrier_down_count);
 		linkwatch_fire_event(dev);
 	}
 }
@@ -437,7 +578,8 @@ struct Qdisc noop_qdisc = {
 };
 EXPORT_SYMBOL(noop_qdisc);
 
-static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	/* register_qdisc() assigns a default of noop_enqueue if unset,
 	 * but __dev_queue_xmit() treats noqueue only as such
@@ -468,93 +610,99 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
 
 /*
  * Private data for a pfifo_fast scheduler containing:
- * 	- queues for the three band
- * 	- bitmap indicating which of the bands contain skbs
+ *	- rings for priority bands
  */
 struct pfifo_fast_priv {
-	u32 bitmap;
-	struct qdisc_skb_head q[PFIFO_FAST_BANDS];
+	struct skb_array q[PFIFO_FAST_BANDS];
 };
 
-/*
- * Convert a bitmap to the first band number where an skb is queued, where:
- * 	bitmap=0 means there are no skbs on any band.
- * 	bitmap=1 means there is an skb on band 0.
- *	bitmap=7 means there are skbs on all 3 bands, etc.
- */
-static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
-
-static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
-					     int band)
+static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
+					  int band)
 {
-	return priv->q + band;
+	return &priv->q[band];
 }
 
 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 			      struct sk_buff **to_free)
 {
-	if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
-		int band = prio2band[skb->priority & TC_PRIO_MAX];
-		struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
-		struct qdisc_skb_head *list = band2list(priv, band);
-
-		priv->bitmap |= (1 << band);
-		qdisc->q.qlen++;
-		return __qdisc_enqueue_tail(skb, qdisc, list);
-	}
+	int band = prio2band[skb->priority & TC_PRIO_MAX];
+	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+	struct skb_array *q = band2list(priv, band);
+	int err;
+
+	err = skb_array_produce(q, skb);
 
-	return qdisc_drop(skb, qdisc, to_free);
+	if (unlikely(err))
+		return qdisc_drop_cpu(skb, qdisc, to_free);
+
+	qdisc_qstats_cpu_qlen_inc(qdisc);
+	qdisc_qstats_cpu_backlog_inc(qdisc, skb);
+	return NET_XMIT_SUCCESS;
 }
 
 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 {
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
-	int band = bitmap2band[priv->bitmap];
-
-	if (likely(band >= 0)) {
-		struct qdisc_skb_head *qh = band2list(priv, band);
-		struct sk_buff *skb = __qdisc_dequeue_head(qh);
+	struct sk_buff *skb = NULL;
+	int band;
 
-		if (likely(skb != NULL)) {
-			qdisc_qstats_backlog_dec(qdisc, skb);
-			qdisc_bstats_update(qdisc, skb);
-		}
+	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+		struct skb_array *q = band2list(priv, band);
 
-		qdisc->q.qlen--;
-		if (qh->qlen == 0)
-			priv->bitmap &= ~(1 << band);
+		if (__skb_array_empty(q))
+			continue;
 
-		return skb;
+		skb = skb_array_consume_bh(q);
+	}
+	if (likely(skb)) {
+		qdisc_qstats_cpu_backlog_dec(qdisc, skb);
+		qdisc_bstats_cpu_update(qdisc, skb);
+		qdisc_qstats_cpu_qlen_dec(qdisc);
 	}
 
-	return NULL;
+	return skb;
 }
 
 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 {
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
-	int band = bitmap2band[priv->bitmap];
+	struct sk_buff *skb = NULL;
+	int band;
 
-	if (band >= 0) {
-		struct qdisc_skb_head *qh = band2list(priv, band);
+	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+		struct skb_array *q = band2list(priv, band);
 
-		return qh->head;
+		skb = __skb_array_peek(q);
 	}
 
-	return NULL;
+	return skb;
 }
 
 static void pfifo_fast_reset(struct Qdisc *qdisc)
 {
-	int prio;
+	int i, band;
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 
-	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
-		__qdisc_reset_queue(band2list(priv, prio));
+	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
+		struct skb_array *q = band2list(priv, band);
+		struct sk_buff *skb;
 
-	priv->bitmap = 0;
-	qdisc->qstats.backlog = 0;
-	qdisc->q.qlen = 0;
+		/* NULL ring is possible if destroy path is due to a failed
+		 * skb_array_init() in pfifo_fast_init() case.
+		 */
+		if (!q->ring.queue)
+			continue;
+
+		while ((skb = skb_array_consume_bh(q)) != NULL)
+			kfree_skb(skb);
+	}
+
+	for_each_possible_cpu(i) {
+		struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+
+		q->backlog = 0;
+		q->qlen = 0;
+	}
 }
 
 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
@@ -570,19 +718,68 @@ nla_put_failure:
 	return -1;
 }
 
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
+			   struct netlink_ext_ack *extack)
 {
-	int prio;
+	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+	int prio;
+
+	/* guard against zero length rings */
+	if (!qlen)
+		return -EINVAL;
 
-	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
-		qdisc_skb_head_init(band2list(priv, prio));
+	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+		struct skb_array *q = band2list(priv, prio);
+		int err;
+
+		err = skb_array_init(q, qlen, GFP_KERNEL);
+		if (err)
+			return -ENOMEM;
+	}
 
 	/* Can by-pass the queue discipline */
 	qdisc->flags |= TCQ_F_CAN_BYPASS;
 	return 0;
 }
 
+static void pfifo_fast_destroy(struct Qdisc *sch)
+{
+	struct pfifo_fast_priv *priv = qdisc_priv(sch);
+	int prio;
+
+	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+		struct skb_array *q = band2list(priv, prio);
+
+		/* NULL ring is possible if destroy path is due to a failed
+		 * skb_array_init() in pfifo_fast_init() case.
+		 */
+		if (!q->ring.queue)
+			continue;
+		/* Destroy ring but no need to kfree_skb because a call to
+		 * pfifo_fast_reset() has already done that work.
+		 */
+		ptr_ring_cleanup(&q->ring, NULL);
+	}
+}
+
+static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
+					  unsigned int new_len)
+{
+	struct pfifo_fast_priv *priv = qdisc_priv(sch);
+	struct skb_array *bands[PFIFO_FAST_BANDS];
+	int prio;
+
+	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+		struct skb_array *q = band2list(priv, prio);
+
+		bands[prio] = q;
+	}
+
+	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
+					 GFP_KERNEL);
+}
+
 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 	.id		=	"pfifo_fast",
 	.priv_size	=	sizeof(struct pfifo_fast_priv),
@@ -590,9 +787,12 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 	.dequeue	=	pfifo_fast_dequeue,
 	.peek		=	pfifo_fast_peek,
 	.init		=	pfifo_fast_init,
+	.destroy	=	pfifo_fast_destroy,
 	.reset		=	pfifo_fast_reset,
 	.dump		=	pfifo_fast_dump,
+	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
 	.owner		=	THIS_MODULE,
+	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
@@ -600,7 +800,8 @@ static struct lock_class_key qdisc_tx_busylock;
 static struct lock_class_key qdisc_running_key;
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-			  const struct Qdisc_ops *ops)
+			  const struct Qdisc_ops *ops,
+			  struct netlink_ext_ack *extack)
 {
 	void *p;
 	struct Qdisc *sch;
@@ -609,6 +810,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 	struct net_device *dev;
 
 	if (!dev_queue) {
+		NL_SET_ERR_MSG(extack, "No device queue given");
 		err = -EINVAL;
 		goto errout;
 	}
@@ -630,6 +832,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 		sch->padded = (char *) sch - (char *) p;
 	}
+	__skb_queue_head_init(&sch->gso_skb);
+	__skb_queue_head_init(&sch->skb_bad_txq);
 	qdisc_skb_head_init(&sch->q);
 	spin_lock_init(&sch->q.lock);
 
@@ -671,21 +875,24 @@ errout:
 
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 				const struct Qdisc_ops *ops,
-				unsigned int parentid)
+				unsigned int parentid,
+				struct netlink_ext_ack *extack)
 {
 	struct Qdisc *sch;
 
-	if (!try_module_get(ops->owner))
+	if (!try_module_get(ops->owner)) {
+		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
 		return NULL;
+	}
 
-	sch = qdisc_alloc(dev_queue, ops);
+	sch = qdisc_alloc(dev_queue, ops, extack);
 	if (IS_ERR(sch)) {
 		module_put(ops->owner);
 		return NULL;
 	}
 	sch->parent = parentid;
 
-	if (!ops->init || ops->init(sch, NULL) == 0)
+	if (!ops->init || ops->init(sch, NULL, extack) == 0)
 		return sch;
 
 	qdisc_destroy(sch);
@@ -698,17 +905,21 @@ EXPORT_SYMBOL(qdisc_create_dflt);
 void qdisc_reset(struct Qdisc *qdisc)
 {
 	const struct Qdisc_ops *ops = qdisc->ops;
+	struct sk_buff *skb, *tmp;
 
 	if (ops->reset)
 		ops->reset(qdisc);
 
-	kfree_skb(qdisc->skb_bad_txq);
-	qdisc->skb_bad_txq = NULL;
+	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
+		__skb_unlink(skb, &qdisc->gso_skb);
+		kfree_skb_list(skb);
+	}
 
-	if (qdisc->gso_skb) {
-		kfree_skb_list(qdisc->gso_skb);
-		qdisc->gso_skb = NULL;
+	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
+		__skb_unlink(skb, &qdisc->skb_bad_txq);
+		kfree_skb_list(skb);
 	}
+
 	qdisc->q.qlen = 0;
 	qdisc->qstats.backlog = 0;
 }
@@ -727,6 +938,7 @@ void qdisc_free(struct Qdisc *qdisc)
 void qdisc_destroy(struct Qdisc *qdisc)
 {
 	const struct Qdisc_ops  *ops = qdisc->ops;
+	struct sk_buff *skb, *tmp;
 
 	if (qdisc->flags & TCQ_F_BUILTIN ||
 	    !refcount_dec_and_test(&qdisc->refcnt))
@@ -746,8 +958,16 @@ void qdisc_destroy(struct Qdisc *qdisc)
 	module_put(ops->owner);
 	dev_put(qdisc_dev(qdisc));
 
-	kfree_skb_list(qdisc->gso_skb);
-	kfree_skb(qdisc->skb_bad_txq);
+	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
+		__skb_unlink(skb, &qdisc->gso_skb);
+		kfree_skb_list(skb);
+	}
+
+	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
+		__skb_unlink(skb, &qdisc->skb_bad_txq);
+		kfree_skb_list(skb);
+	}
+
 	qdisc_free(qdisc);
 }
 EXPORT_SYMBOL(qdisc_destroy);
@@ -762,10 +982,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 	root_lock = qdisc_lock(oqdisc);
 	spin_lock_bh(root_lock);
 
-	/* Prune old scheduler */
-	if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
-		qdisc_reset(oqdisc);
-
 	/* ... and graft new one */
 	if (qdisc == NULL)
 		qdisc = &noop_qdisc;
@@ -788,7 +1004,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
 	if (dev->priv_flags & IFF_NO_QUEUE)
 		ops = &noqueue_qdisc_ops;
 
-	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
 	if (!qdisc) {
 		netdev_info(dev, "activation failed\n");
 		return;
@@ -811,7 +1027,7 @@ static void attach_default_qdiscs(struct net_device *dev)
 		dev->qdisc = txq->qdisc_sleeping;
 		qdisc_refcount_inc(dev->qdisc);
 	} else {
-		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
+		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
 		if (qdisc) {
 			dev->qdisc = qdisc;
 			qdisc->ops->attach(qdisc);
@@ -901,14 +1117,18 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 
 		dev_queue = netdev_get_tx_queue(dev, i);
 		q = dev_queue->qdisc_sleeping;
-		root_lock = qdisc_lock(q);
 
-		spin_lock_bh(root_lock);
+		if (q->flags & TCQ_F_NOLOCK) {
+			val = test_bit(__QDISC_STATE_SCHED, &q->state);
+		} else {
+			root_lock = qdisc_lock(q);
+			spin_lock_bh(root_lock);
 
-		val = (qdisc_is_running(q) ||
-		       test_bit(__QDISC_STATE_SCHED, &q->state));
+			val = (qdisc_is_running(q) ||
+			       test_bit(__QDISC_STATE_SCHED, &q->state));
 
-		spin_unlock_bh(root_lock);
+			spin_unlock_bh(root_lock);
+		}
 
 		if (val)
 			return true;
@@ -916,6 +1136,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 	return false;
 }
 
+static void dev_qdisc_reset(struct net_device *dev,
+			    struct netdev_queue *dev_queue,
+			    void *none)
+{
+	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+	if (qdisc)
+		qdisc_reset(qdisc);
+}
+
 /**
  * 	dev_deactivate_many - deactivate transmissions on several devices
  * 	@head: list of devices to deactivate
@@ -926,7 +1156,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 void dev_deactivate_many(struct list_head *head)
 {
 	struct net_device *dev;
-	bool sync_needed = false;
 
 	list_for_each_entry(dev, head, close_list) {
 		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -936,20 +1165,25 @@ void dev_deactivate_many(struct list_head *head)
 					     &noop_qdisc);
 
 		dev_watchdog_down(dev);
-		sync_needed |= !dev->dismantle;
 	}
 
 	/* Wait for outstanding qdisc-less dev_queue_xmit calls.
 	 * This is avoided if all devices are in dismantle phase :
 	 * Caller will call synchronize_net() for us
 	 */
-	if (sync_needed)
-		synchronize_net();
+	synchronize_net();
 
 	/* Wait for outstanding qdisc_run calls. */
-	list_for_each_entry(dev, head, close_list)
+	list_for_each_entry(dev, head, close_list) {
 		while (some_qdisc_is_busy(dev))
 			yield();
+		/* The new qdisc is assigned at this point so we can safely
+		 * unwind stale skb lists and qdisc statistics
+		 */
+		netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
+		if (dev_ingress_queue(dev))
+			dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
+	}
 }
 
 void dev_deactivate(struct net_device *dev)
@@ -962,6 +1196,39 @@ void dev_deactivate(struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_deactivate);
 
+static int qdisc_change_tx_queue_len(struct net_device *dev,
+				     struct netdev_queue *dev_queue)
+{
+	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+	const struct Qdisc_ops *ops = qdisc->ops;
+
+	if (ops->change_tx_queue_len)
+		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
+	return 0;
+}
+
+int dev_qdisc_change_tx_queue_len(struct net_device *dev)
+{
+	bool up = dev->flags & IFF_UP;
+	unsigned int i;
+	int ret = 0;
+
+	if (up)
+		dev_deactivate(dev);
+
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
+
+		/* TODO: revert changes on a partial failure */
+		if (ret)
+			break;
+	}
+
+	if (up)
+		dev_activate(dev);
+	return ret;
+}
+
 static void dev_init_scheduler_queue(struct net_device *dev,
 				     struct netdev_queue *dev_queue,
 				     void *_qdisc)
@@ -970,6 +1237,8 @@ static void dev_init_scheduler_queue(struct net_device *dev,
 
 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
 	dev_queue->qdisc_sleeping = qdisc;
+	__skb_queue_head_init(&qdisc->gso_skb);
+	__skb_queue_head_init(&qdisc->skb_bad_txq);
 }
 
 void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index bc30f9186ac6..cbe4831f46f4 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -306,12 +306,13 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 	struct tc_gred_sopt *sopt;
 	int i;
 
-	if (dps == NULL)
+	if (!dps)
 		return -EINVAL;
 
 	sopt = nla_data(dps);
 
-	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
+	    sopt->def_DP >= sopt->DPs)
 		return -EINVAL;
 
 	sch_tree_lock(sch);
@@ -391,7 +392,8 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
 	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
 };
 
-static int gred_change(struct Qdisc *sch, struct nlattr *opt)
+static int gred_change(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct gred_sched *table = qdisc_priv(sch);
 	struct tc_gred_qopt *ctl;
@@ -465,12 +467,13 @@ errout:
 	return err;
 }
 
-static int gred_init(struct Qdisc *sch, struct nlattr *opt)
+static int gred_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct nlattr *tb[TCA_GRED_MAX + 1];
 	int err;
 
-	if (opt == NULL)
+	if (!opt)
 		return -EINVAL;
 
 	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d04068a97d81..3ae9877ea205 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -921,7 +921,8 @@ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
 
 static int
 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-		  struct nlattr **tca, unsigned long *arg)
+		  struct nlattr **tca, unsigned long *arg,
+		  struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct hfsc_class *cl = (struct hfsc_class *)*arg;
@@ -1033,7 +1034,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	if (cl == NULL)
 		return -ENOBUFS;
 
-	err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 	if (err) {
 		kfree(cl);
 		return err;
@@ -1061,8 +1062,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->cl_common.classid = classid;
 	cl->sched     = q;
 	cl->cl_parent = parent;
-	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-				      &pfifo_qdisc_ops, classid);
+	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+				      classid, NULL);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 	else
@@ -1176,7 +1177,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 
 static int
 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		 struct Qdisc **old)
+		 struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct hfsc_class *cl = (struct hfsc_class *)arg;
 
@@ -1184,7 +1185,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		return -EINVAL;
 	if (new == NULL) {
 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					cl->cl_common.classid);
+					cl->cl_common.classid, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -1246,7 +1247,8 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 	cl->filter_cnt--;
 }
 
-static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
+					struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1388,7 +1390,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
 }
 
 static int
-hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+		struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct tc_hfsc_qopt *qopt;
@@ -1396,7 +1399,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
+	if (!opt || nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
 	qopt = nla_data(opt);
 
@@ -1406,14 +1409,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 		return err;
 	q->eligible = RB_ROOT;
 
-	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch);
+	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
 	if (err)
 		return err;
 
 	q->root.cl_common.classid = sch->handle;
 	q->root.sched   = q;
 	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					  sch->handle);
+					  sch->handle, NULL);
 	if (q->root.qdisc == NULL)
 		q->root.qdisc = &noop_qdisc;
 	else
@@ -1429,7 +1432,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 }
 
 static int
-hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
+		  struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct tc_hfsc_qopt *qopt;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 73a53c08091b..bce2632212d3 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -504,7 +504,8 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
 	[TCA_HHF_NON_HH_WEIGHT]	 = { .type = NLA_U32 },
 };
 
-static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct hhf_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_HHF_MAX + 1];
@@ -571,7 +572,8 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct hhf_sched_data *q = qdisc_priv(sch);
 	int i;
@@ -589,7 +591,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
 	q->hhf_non_hh_weight = 2;
 
 	if (opt) {
-		int err = hhf_change(sch, opt);
+		int err = hhf_change(sch, opt, extack);
 
 		if (err)
 			return err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index fa0380730ff0..1ea9846cc6ce 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1017,7 +1017,8 @@ static void htb_work_func(struct work_struct *work)
 	rcu_read_unlock();
 }
 
-static int htb_init(struct Qdisc *sch, struct nlattr *opt)
+static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct htb_sched *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_HTB_MAX + 1];
@@ -1031,7 +1032,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 	if (!opt)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -1171,7 +1172,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 }
 
 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct htb_class *cl = (struct htb_class *)arg;
 
@@ -1179,7 +1180,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		return -EINVAL;
 	if (new == NULL &&
 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-				     cl->common.classid)) == NULL)
+				     cl->common.classid, extack)) == NULL)
 		return -ENOBUFS;
 
 	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
@@ -1289,7 +1290,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
 	if (!cl->level && htb_parent_last_child(cl)) {
 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					  cl->parent->common.classid);
+					  cl->parent->common.classid,
+					  NULL);
 		last_child = 1;
 	}
 
@@ -1326,7 +1328,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
 static int htb_change_class(struct Qdisc *sch, u32 classid,
 			    u32 parentid, struct nlattr **tca,
-			    unsigned long *arg)
+			    unsigned long *arg, struct netlink_ext_ack *extack)
 {
 	int err = -EINVAL;
 	struct htb_sched *q = qdisc_priv(sch);
@@ -1356,10 +1358,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
 	/* Keeping backward compatible with rate_table based iproute2 tc */
 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
-		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
+		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
+					      NULL));
 
 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
-		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
+		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
+					      NULL));
 
 	if (!cl) {		/* new class */
 		struct Qdisc *new_q;
@@ -1394,7 +1398,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 		if (!cl)
 			goto failure;
 
-		err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 		if (err) {
 			kfree(cl);
 			goto failure;
@@ -1423,8 +1427,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 		 * so that can't be used inside of sch_tree_lock
 		 * -- thanks to Karlis Peisenieks
 		 */
-		new_q = qdisc_create_dflt(sch->dev_queue,
-					  &pfifo_qdisc_ops, classid);
+		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					  classid, NULL);
 		sch_tree_lock(sch);
 		if (parent && !parent->level) {
 			unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1524,7 +1528,8 @@ failure:
 	return err;
 }
 
-static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
+				       struct netlink_ext_ack *extack)
 {
 	struct htb_sched *q = qdisc_priv(sch);
 	struct htb_class *cl = (struct htb_class *)arg;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 003e1b063447..ce3f55259d0d 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -48,7 +48,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 {
 }
 
-static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
+					   struct netlink_ext_ack *extack)
 {
 	struct ingress_sched_data *q = qdisc_priv(sch);
 
@@ -60,9 +61,24 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
 	struct mini_Qdisc_pair *miniqp = priv;
 
 	mini_qdisc_pair_swap(miniqp, tp_head);
+};
+
+static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
+{
+	struct ingress_sched_data *q = qdisc_priv(sch);
+
+	q->block_info.block_index = block_index;
 }
 
-static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+static u32 ingress_ingress_block_get(struct Qdisc *sch)
+{
+	struct ingress_sched_data *q = qdisc_priv(sch);
+
+	return q->block_info.block_index;
+}
+
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct ingress_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
@@ -75,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
 	q->block_info.chain_head_change = clsact_chain_head_change;
 	q->block_info.chain_head_change_priv = &q->miniqp;
 
-	return tcf_block_get_ext(&q->block, sch, &q->block_info);
+	return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
 }
 
 static void ingress_destroy(struct Qdisc *sch)
@@ -111,14 +127,16 @@ static const struct Qdisc_class_ops ingress_class_ops = {
 };
 
 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
-	.cl_ops		=	&ingress_class_ops,
-	.id		=	"ingress",
-	.priv_size	=	sizeof(struct ingress_sched_data),
-	.static_flags	=	TCQ_F_CPUSTATS,
-	.init		=	ingress_init,
-	.destroy	=	ingress_destroy,
-	.dump		=	ingress_dump,
-	.owner		=	THIS_MODULE,
+	.cl_ops			=	&ingress_class_ops,
+	.id			=	"ingress",
+	.priv_size		=	sizeof(struct ingress_sched_data),
+	.static_flags		=	TCQ_F_CPUSTATS,
+	.init			=	ingress_init,
+	.destroy		=	ingress_destroy,
+	.dump			=	ingress_dump,
+	.ingress_block_set	=	ingress_ingress_block_set,
+	.ingress_block_get	=	ingress_ingress_block_get,
+	.owner			=	THIS_MODULE,
 };
 
 struct clsact_sched_data {
@@ -147,7 +165,8 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch,
 	return clsact_find(sch, classid);
 }
 
-static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct clsact_sched_data *q = qdisc_priv(sch);
 
@@ -161,7 +180,36 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
 	}
 }
 
-static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
+static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	q->ingress_block_info.block_index = block_index;
+}
+
+static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	q->egress_block_info.block_index = block_index;
+}
+
+static u32 clsact_ingress_block_get(struct Qdisc *sch)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	return q->ingress_block_info.block_index;
+}
+
+static u32 clsact_egress_block_get(struct Qdisc *sch)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	return q->egress_block_info.block_index;
+}
+
+static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct clsact_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
@@ -176,7 +224,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
 	q->ingress_block_info.chain_head_change = clsact_chain_head_change;
 	q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
 
-	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info);
+	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
+				extack);
 	if (err)
 		return err;
 
@@ -186,7 +235,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
 	q->egress_block_info.chain_head_change = clsact_chain_head_change;
 	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
 
-	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
+	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
 }
 
 static void clsact_destroy(struct Qdisc *sch)
@@ -210,14 +259,18 @@ static const struct Qdisc_class_ops clsact_class_ops = {
 };
 
 static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
-	.cl_ops		=	&clsact_class_ops,
-	.id		=	"clsact",
-	.priv_size	=	sizeof(struct clsact_sched_data),
-	.static_flags	=	TCQ_F_CPUSTATS,
-	.init		=	clsact_init,
-	.destroy	=	clsact_destroy,
-	.dump		=	ingress_dump,
-	.owner		=	THIS_MODULE,
+	.cl_ops			=	&clsact_class_ops,
+	.id			=	"clsact",
+	.priv_size		=	sizeof(struct clsact_sched_data),
+	.static_flags		=	TCQ_F_CPUSTATS,
+	.init			=	clsact_init,
+	.destroy		=	clsact_destroy,
+	.dump			=	ingress_dump,
+	.ingress_block_set	=	clsact_ingress_block_set,
+	.egress_block_set	=	clsact_egress_block_set,
+	.ingress_block_get	=	clsact_ingress_block_get,
+	.egress_block_get	=	clsact_egress_block_get,
+	.owner			=	THIS_MODULE,
 };
 
 static int __init ingress_module_init(void)
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 213b586a06a0..f062a18e9162 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -17,6 +17,7 @@
 #include <linux/skbuff.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/sch_generic.h>
 
 struct mq_sched {
 	struct Qdisc		**qdiscs;
@@ -35,7 +36,8 @@ static void mq_destroy(struct Qdisc *sch)
 	kfree(priv->qdiscs);
 }
 
-static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+static int mq_init(struct Qdisc *sch, struct nlattr *opt,
+		   struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct mq_sched *priv = qdisc_priv(sch);
@@ -59,7 +61,8 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
 		dev_queue = netdev_get_tx_queue(dev, ntx);
 		qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
-						    TC_H_MIN(ntx + 1)));
+						    TC_H_MIN(ntx + 1)),
+					  extack);
 		if (!qdisc)
 			return -ENOMEM;
 		priv->qdiscs[ntx] = qdisc;
@@ -97,23 +100,42 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct net_device *dev = qdisc_dev(sch);
 	struct Qdisc *qdisc;
 	unsigned int ntx;
+	__u32 qlen = 0;
 
 	sch->q.qlen = 0;
 	memset(&sch->bstats, 0, sizeof(sch->bstats));
 	memset(&sch->qstats, 0, sizeof(sch->qstats));
 
+	/* MQ supports lockless qdiscs. However, statistics accounting needs
+	 * to account for all, none, or a mix of locked and unlocked child
+	 * qdiscs. Percpu stats are added to counters in-band and locking
+	 * qdisc totals are added at end.
+	 */
 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
 		spin_lock_bh(qdisc_lock(qdisc));
-		sch->q.qlen		+= qdisc->q.qlen;
-		sch->bstats.bytes	+= qdisc->bstats.bytes;
-		sch->bstats.packets	+= qdisc->bstats.packets;
-		sch->qstats.backlog	+= qdisc->qstats.backlog;
-		sch->qstats.drops	+= qdisc->qstats.drops;
-		sch->qstats.requeues	+= qdisc->qstats.requeues;
-		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+
+		if (qdisc_is_percpu_stats(qdisc)) {
+			qlen = qdisc_qlen_sum(qdisc);
+			__gnet_stats_copy_basic(NULL, &sch->bstats,
+						qdisc->cpu_bstats,
+						&qdisc->bstats);
+			__gnet_stats_copy_queue(&sch->qstats,
+						qdisc->cpu_qstats,
+						&qdisc->qstats, qlen);
+		} else {
+			sch->q.qlen		+= qdisc->q.qlen;
+			sch->bstats.bytes	+= qdisc->bstats.bytes;
+			sch->bstats.packets	+= qdisc->bstats.packets;
+			sch->qstats.backlog	+= qdisc->qstats.backlog;
+			sch->qstats.drops	+= qdisc->qstats.drops;
+			sch->qstats.requeues	+= qdisc->qstats.requeues;
+			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+		}
+
 		spin_unlock_bh(qdisc_lock(qdisc));
 	}
+
 	return 0;
 }
 
@@ -134,7 +156,7 @@ static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
 }
 
 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-		    struct Qdisc **old)
+		    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 	struct net_device *dev = qdisc_dev(sch);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b85885a9d8a1..0e9d761cdd80 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 	return 0;
 }
 
-static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct mqprio_sched *priv = qdisc_priv(sch);
@@ -229,7 +230,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
 		qdisc = qdisc_create_dflt(dev_queue,
 					  get_default_qdisc_ops(dev, i),
 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
-						    TC_H_MIN(i + 1)));
+						    TC_H_MIN(i + 1)), extack);
 		if (!qdisc)
 			return -ENOMEM;
 
@@ -319,7 +320,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
 }
 
 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-		    struct Qdisc **old)
+			struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
@@ -388,22 +389,40 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
 	struct tc_mqprio_qopt opt = { 0 };
 	struct Qdisc *qdisc;
-	unsigned int i;
+	unsigned int ntx, tc;
 
 	sch->q.qlen = 0;
 	memset(&sch->bstats, 0, sizeof(sch->bstats));
 	memset(&sch->qstats, 0, sizeof(sch->qstats));
 
-	for (i = 0; i < dev->num_tx_queues; i++) {
-		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+	/* MQ supports lockless qdiscs. However, statistics accounting needs
+	 * to account for all, none, or a mix of locked and unlocked child
+	 * qdiscs. Percpu stats are added to counters in-band and locking
+	 * qdisc totals are added at end.
+	 */
+	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
 		spin_lock_bh(qdisc_lock(qdisc));
-		sch->q.qlen		+= qdisc->q.qlen;
-		sch->bstats.bytes	+= qdisc->bstats.bytes;
-		sch->bstats.packets	+= qdisc->bstats.packets;
-		sch->qstats.backlog	+= qdisc->qstats.backlog;
-		sch->qstats.drops	+= qdisc->qstats.drops;
-		sch->qstats.requeues	+= qdisc->qstats.requeues;
-		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+
+		if (qdisc_is_percpu_stats(qdisc)) {
+			__u32 qlen = qdisc_qlen_sum(qdisc);
+
+			__gnet_stats_copy_basic(NULL, &sch->bstats,
+						qdisc->cpu_bstats,
+						&qdisc->bstats);
+			__gnet_stats_copy_queue(&sch->qstats,
+						qdisc->cpu_qstats,
+						&qdisc->qstats, qlen);
+		} else {
+			sch->q.qlen		+= qdisc->q.qlen;
+			sch->bstats.bytes	+= qdisc->bstats.bytes;
+			sch->bstats.packets	+= qdisc->bstats.packets;
+			sch->qstats.backlog	+= qdisc->qstats.backlog;
+			sch->qstats.drops	+= qdisc->qstats.drops;
+			sch->qstats.requeues	+= qdisc->qstats.requeues;
+			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+		}
+
 		spin_unlock_bh(qdisc_lock(qdisc));
 	}
 
@@ -411,9 +430,9 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
 	opt.hw = priv->hw_offload;
 
-	for (i = 0; i < netdev_get_num_tc(dev); i++) {
-		opt.count[i] = dev->tc_to_txq[i].count;
-		opt.offset[i] = dev->tc_to_txq[i].offset;
+	for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
+		opt.count[tc] = dev->tc_to_txq[tc].count;
+		opt.offset[tc] = dev->tc_to_txq[tc].offset;
 	}
 
 	if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
@@ -495,7 +514,6 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 	if (cl >= TC_H_MIN_PRIORITY) {
 		int i;
 		__u32 qlen = 0;
-		struct Qdisc *qdisc;
 		struct gnet_stats_queue qstats = {0};
 		struct gnet_stats_basic_packed bstats = {0};
 		struct net_device *dev = qdisc_dev(sch);
@@ -511,18 +529,26 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
 			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
+			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
+			struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+			struct gnet_stats_queue __percpu *cpu_qstats = NULL;
 
-			qdisc = rtnl_dereference(q->qdisc);
 			spin_lock_bh(qdisc_lock(qdisc));
-			qlen		  += qdisc->q.qlen;
-			bstats.bytes      += qdisc->bstats.bytes;
-			bstats.packets    += qdisc->bstats.packets;
-			qstats.backlog    += qdisc->qstats.backlog;
-			qstats.drops      += qdisc->qstats.drops;
-			qstats.requeues   += qdisc->qstats.requeues;
-			qstats.overlimits += qdisc->qstats.overlimits;
+			if (qdisc_is_percpu_stats(qdisc)) {
+				cpu_bstats = qdisc->cpu_bstats;
+				cpu_qstats = qdisc->cpu_qstats;
+			}
+
+			qlen = qdisc_qlen_sum(qdisc);
+			__gnet_stats_copy_basic(NULL, &sch->bstats,
+						cpu_bstats, &qdisc->bstats);
+			__gnet_stats_copy_queue(&sch->qstats,
+						cpu_qstats,
+						&qdisc->qstats,
+						qlen);
 			spin_unlock_bh(qdisc_lock(qdisc));
 		}
+
 		/* Reclaim root sleeping lock before completing stats */
 		if (d->lock)
 			spin_lock_bh(d->lock);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 012216386c0b..1da7ea8de0ad 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -180,7 +180,8 @@ multiq_destroy(struct Qdisc *sch)
 	kfree(q->queues);
 }
 
-static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 	struct tc_multiq_qopt *qopt;
@@ -215,7 +216,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
 			child = qdisc_create_dflt(sch->dev_queue,
 						  &pfifo_qdisc_ops,
 						  TC_H_MAKE(sch->handle,
-							    i + 1));
+							    i + 1), extack);
 			if (child) {
 				sch_tree_lock(sch);
 				old = q->queues[i];
@@ -236,17 +237,18 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 	int i, err;
 
 	q->queues = NULL;
 
-	if (opt == NULL)
+	if (!opt)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -258,7 +260,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
 	for (i = 0; i < q->max_bands; i++)
 		q->queues[i] = &noop_qdisc;
 
-	return multiq_tune(sch, opt);
+	return multiq_tune(sch, opt, extack);
 }
 
 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -281,7 +283,7 @@ nla_put_failure:
 }
 
 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		      struct Qdisc **old)
+			struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 	unsigned long band = arg - 1;
@@ -369,7 +371,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 	}
 }
 
-static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index dd70924cbcdf..7bbc13b8ca47 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -893,7 +893,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 }
 
 /* Parse netlink message to set options */
-static int netem_change(struct Qdisc *sch, struct nlattr *opt)
+static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_NETEM_MAX + 1];
@@ -984,7 +985,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 	return ret;
 }
 
-static int netem_init(struct Qdisc *sch, struct nlattr *opt)
+static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
@@ -995,7 +997,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 		return -EINVAL;
 
 	q->loss_model = CLG_RANDOM;
-	ret = netem_change(sch, opt);
+	ret = netem_change(sch, opt, extack);
 	if (ret)
 		pr_info("netem: change failed\n");
 	return ret;
@@ -1157,7 +1159,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 776c694c77c7..18d30bb86881 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -181,7 +181,8 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
 	[TCA_PIE_BYTEMODE] = {.type = NLA_U32},
 };
 
-static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct pie_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_PIE_MAX + 1];
@@ -439,7 +440,8 @@ static void pie_timer(struct timer_list *t)
 
 }
 
-static int pie_init(struct Qdisc *sch, struct nlattr *opt)
+static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct pie_sched_data *q = qdisc_priv(sch);
 
@@ -451,7 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
 	timer_setup(&q->adapt_timer, pie_timer, 0);
 
 	if (opt) {
-		int err = pie_change(sch, opt);
+		int err = pie_change(sch, opt, extack);
 
 		if (err)
 			return err;
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 1c6cbab3e7b9..5619d2eb17b6 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -123,7 +123,8 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
 	return qdisc_dequeue_head(sch);
 }
 
-static int plug_init(struct Qdisc *sch, struct nlattr *opt)
+static int plug_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct plug_sched_data *q = qdisc_priv(sch);
 
@@ -158,7 +159,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
  *   command is received (just act as a pass-thru queue).
  * TCQ_PLUG_LIMIT: Increase/decrease queue size
  */
-static int plug_change(struct Qdisc *sch, struct nlattr *opt)
+static int plug_change(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct plug_sched_data *q = qdisc_priv(sch);
 	struct tc_plug_qopt *msg;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2c79559a0d31..efbf51f35778 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -142,6 +142,31 @@ prio_reset(struct Qdisc *sch)
 	sch->q.qlen = 0;
 }
 
+static int prio_offload(struct Qdisc *sch, bool enable)
+{
+	struct prio_sched_data *q = qdisc_priv(sch);
+	struct net_device *dev = qdisc_dev(sch);
+	struct tc_prio_qopt_offload opt = {
+		.handle = sch->handle,
+		.parent = sch->parent,
+	};
+
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+		return -EOPNOTSUPP;
+
+	if (enable) {
+		opt.command = TC_PRIO_REPLACE;
+		opt.replace_params.bands = q->bands;
+		memcpy(&opt.replace_params.priomap, q->prio2band,
+		       TC_PRIO_MAX + 1);
+		opt.replace_params.qstats = &sch->qstats;
+	} else {
+		opt.command = TC_PRIO_DESTROY;
+	}
+
+	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
+}
+
 static void
 prio_destroy(struct Qdisc *sch)
 {
@@ -149,11 +174,13 @@ prio_destroy(struct Qdisc *sch)
 	struct prio_sched_data *q = qdisc_priv(sch);
 
 	tcf_block_put(q->block);
+	prio_offload(sch, false);
 	for (prio = 0; prio < q->bands; prio++)
 		qdisc_destroy(q->queues[prio]);
 }
 
-static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *queues[TCQ_PRIO_BANDS];
@@ -175,7 +202,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 	/* Before commit, make sure we can allocate all new qdiscs */
 	for (i = oldbands; i < qopt->bands; i++) {
 		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					      TC_H_MAKE(sch->handle, i + 1));
+					      TC_H_MAKE(sch->handle, i + 1),
+					      extack);
 		if (!queues[i]) {
 			while (i > oldbands)
 				qdisc_destroy(queues[--i]);
@@ -202,10 +230,12 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 	}
 
 	sch_tree_unlock(sch);
+	prio_offload(sch, true);
 	return 0;
 }
 
-static int prio_init(struct Qdisc *sch, struct nlattr *opt)
+static int prio_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int err;
@@ -213,11 +243,42 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 	if (!opt)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
-	return prio_tune(sch, opt);
+	return prio_tune(sch, opt, extack);
+}
+
+static int prio_dump_offload(struct Qdisc *sch)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct tc_prio_qopt_offload hw_stats = {
+		.command = TC_PRIO_STATS,
+		.handle = sch->handle,
+		.parent = sch->parent,
+		{
+			.stats = {
+				.bstats = &sch->bstats,
+				.qstats = &sch->qstats,
+			},
+		},
+	};
+	int err;
+
+	sch->flags &= ~TCQ_F_OFFLOADED;
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+		return 0;
+
+	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
+					    &hw_stats);
+	if (err == -EOPNOTSUPP)
+		return 0;
+
+	if (!err)
+		sch->flags |= TCQ_F_OFFLOADED;
+
+	return err;
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -225,10 +286,15 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct prio_sched_data *q = qdisc_priv(sch);
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_prio_qopt opt;
+	int err;
 
 	opt.bands = q->bands;
 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
+	err = prio_dump_offload(sch);
+	if (err)
+		goto nla_put_failure;
+
 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 		goto nla_put_failure;
 
@@ -240,7 +306,7 @@ nla_put_failure:
 }
 
 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		      struct Qdisc **old)
+		      struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	unsigned long band = arg - 1;
@@ -327,7 +393,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 	}
 }
 
-static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
+					struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6962b37a3ad3..bb1a9c11fc54 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -402,7 +402,8 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
 }
 
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct nlattr **tca, unsigned long *arg)
+			    struct nlattr **tca, unsigned long *arg,
+			    struct netlink_ext_ack *extack)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_class *cl = (struct qfq_class *)*arg;
@@ -479,8 +480,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->common.classid = classid;
 	cl->deficit = lmax;
 
-	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-				      &pfifo_qdisc_ops, classid);
+	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+				      classid, NULL);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 
@@ -564,7 +565,8 @@ static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
 	return (unsigned long)qfq_find_class(sch, classid);
 }
 
-static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 
@@ -593,13 +595,14 @@ static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
-			   struct Qdisc *new, struct Qdisc **old)
+			   struct Qdisc *new, struct Qdisc **old,
+			   struct netlink_ext_ack *extack)
 {
 	struct qfq_class *cl = (struct qfq_class *)arg;
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev_queue,
-					&pfifo_qdisc_ops, cl->common.classid);
+		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					cl->common.classid, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -1413,14 +1416,15 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
 	qfq_deactivate_class(q, cl);
 }
 
-static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+			  struct netlink_ext_ack *extack)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_group *grp;
 	int i, j, err;
 	u32 max_cl_shift, maxbudg_shift, max_classes;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index f0747eb87dc4..16644b3d2362 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -157,7 +157,6 @@ static int red_offload(struct Qdisc *sch, bool enable)
 		.handle = sch->handle,
 		.parent = sch->parent,
 	};
-	int err;
 
 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 		return -EOPNOTSUPP;
@@ -168,18 +167,12 @@ static int red_offload(struct Qdisc *sch, bool enable)
 		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
 		opt.set.probability = q->parms.max_P;
 		opt.set.is_ecn = red_use_ecn(q);
+		opt.set.qstats = &sch->qstats;
 	} else {
 		opt.command = TC_RED_DESTROY;
 	}
 
-	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
-
-	if (!err && enable)
-		sch->flags |= TCQ_F_OFFLOADED;
-	else
-		sch->flags &= ~TCQ_F_OFFLOADED;
-
-	return err;
+	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
 }
 
 static void red_destroy(struct Qdisc *sch)
@@ -197,7 +190,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
 	[TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
-static int red_change(struct Qdisc *sch, struct nlattr *opt)
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_RED_MAX + 1];
@@ -224,7 +218,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
 		return -EINVAL;
 
 	if (ctl->limit > 0) {
-		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
+					 extack);
 		if (IS_ERR(child))
 			return PTR_ERR(child);
 	}
@@ -272,14 +267,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
 	spin_unlock(root_lock);
 }
 
-static int red_init(struct Qdisc *sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
 	q->qdisc = &noop_qdisc;
 	q->sch = sch;
 	timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
-	return red_change(sch, opt);
+	return red_change(sch, opt, extack);
 }
 
 static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
@@ -294,12 +290,22 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
 			.stats.qstats = &sch->qstats,
 		},
 	};
+	int err;
 
-	if (!(sch->flags & TCQ_F_OFFLOADED))
+	sch->flags &= ~TCQ_F_OFFLOADED;
+
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 		return 0;
 
-	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
-					     &hw_stats);
+	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+					    &hw_stats);
+	if (err == -EOPNOTSUPP)
+		return 0;
+
+	if (!err)
+		sch->flags |= TCQ_F_OFFLOADED;
+
+	return err;
 }
 
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -317,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
 	};
 	int err;
 
-	sch->qstats.backlog = q->qdisc->qstats.backlog;
 	err = red_dump_offload_stats(sch, &opt);
 	if (err)
 		goto nla_put_failure;
@@ -339,32 +344,24 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
-	struct tc_red_xstats st = {
-		.early	= q->stats.prob_drop + q->stats.forced_drop,
-		.pdrop	= q->stats.pdrop,
-		.other	= q->stats.other,
-		.marked	= q->stats.prob_mark + q->stats.forced_mark,
-	};
+	struct tc_red_xstats st = {0};
 
 	if (sch->flags & TCQ_F_OFFLOADED) {
-		struct red_stats hw_stats = {0};
 		struct tc_red_qopt_offload hw_stats_request = {
 			.command = TC_RED_XSTATS,
 			.handle = sch->handle,
 			.parent = sch->parent,
 			{
-				.xstats = &hw_stats,
+				.xstats = &q->stats,
 			},
 		};
-		if (!dev->netdev_ops->ndo_setup_tc(dev,
-						   TC_SETUP_QDISC_RED,
-						   &hw_stats_request)) {
-			st.early += hw_stats.prob_drop + hw_stats.forced_drop;
-			st.pdrop += hw_stats.pdrop;
-			st.other += hw_stats.other;
-			st.marked += hw_stats.prob_mark + hw_stats.forced_mark;
-		}
+		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+					      &hw_stats_request);
 	}
+	st.early = q->stats.prob_drop + q->stats.forced_drop;
+	st.pdrop = q->stats.pdrop;
+	st.other = q->stats.other;
+	st.marked = q->stats.prob_mark + q->stats.forced_mark;
 
 	return gnet_stats_copy_app(d, &st, sizeof(st));
 }
@@ -380,7 +377,7 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0678debdd856..7cbdad8419b7 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -488,7 +488,8 @@ static const struct tc_sfb_qopt sfb_default_ops = {
 	.penalty_burst = 20,
 };
 
-static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child;
@@ -512,7 +513,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 	if (limit == 0)
 		limit = qdisc_dev(sch)->tx_queue_len;
 
-	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
 	if (IS_ERR(child))
 		return PTR_ERR(child);
 
@@ -549,17 +550,18 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 	int err;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
 	q->qdisc = &noop_qdisc;
-	return sfb_change(sch, opt);
+	return sfb_change(sch, opt, extack);
 }
 
 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -615,7 +617,7 @@ static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 
@@ -643,7 +645,8 @@ static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
 }
 
 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct nlattr **tca, unsigned long *arg)
+			    struct nlattr **tca, unsigned long *arg,
+			    struct netlink_ext_ack *extack)
 {
 	return -ENOSYS;
 }
@@ -665,7 +668,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 	}
 }
 
-static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 930e5bd26d3d..2f2678197760 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -721,7 +721,8 @@ static void sfq_destroy(struct Qdisc *sch)
 	kfree(q->red_parms);
 }
 
-static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 	int i;
@@ -730,7 +731,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 	q->sch = sch;
 	timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -836,7 +837,8 @@ static void sfq_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 120f4f365967..83e76d046993 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -302,7 +302,8 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
 	[TCA_TBF_PBURST] = { .type = NLA_U32 },
 };
 
-static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	int err;
 	struct tbf_sched_data *q = qdisc_priv(sch);
@@ -326,11 +327,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 	qopt = nla_data(tb[TCA_TBF_PARMS]);
 	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
 		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
-					      tb[TCA_TBF_RTAB]));
+					      tb[TCA_TBF_RTAB],
+					      NULL));
 
 	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
 			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
-						      tb[TCA_TBF_PTAB]));
+						      tb[TCA_TBF_PTAB],
+						      NULL));
 
 	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
 	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -383,7 +386,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 		if (err)
 			goto done;
 	} else if (qopt->limit > 0) {
-		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
+					 extack);
 		if (IS_ERR(child)) {
 			err = PTR_ERR(child);
 			goto done;
@@ -421,19 +425,20 @@ done:
 	return err;
 }
 
-static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 	q->qdisc = &noop_qdisc;
 
-	if (opt == NULL)
+	if (!opt)
 		return -EINVAL;
 
 	q->t_c = ktime_get_ns();
 
-	return tbf_change(sch, opt);
+	return tbf_change(sch, opt, extack);
 }
 
 static void tbf_destroy(struct Qdisc *sch)
@@ -494,7 +499,7 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 9fe6b427afed..93f04cf5cac1 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -167,7 +167,8 @@ teql_destroy(struct Qdisc *sch)
 	}
 }
 
-static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
+static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
+			   struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct teql_master *m = (struct teql_master *)sch->ops;
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index d9c04dc1b3f3..c740b189d4ba 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -37,18 +37,6 @@ menuconfig IP_SCTP
 
 if IP_SCTP
 
-config NET_SCTPPROBE
-	tristate "SCTP: Association probing"
-        depends on PROC_FS && KPROBES
-        ---help---
-        This module allows for capturing the changes to SCTP association
-        state in response to incoming packets. It is used for debugging
-        SCTP congestion control algorithms. If you don't understand
-        what was just said, you don't need it: say N.
-
-        To compile this code as a module, choose M here: the
-        module will be called sctp_probe.
-
 config SCTP_DBG_OBJCNT
 	bool "SCTP: Debug object counts"
 	depends on PROC_FS
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 1ca84a288443..6776582ec449 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -4,7 +4,6 @@
 #
 
 obj-$(CONFIG_IP_SCTP) += sctp.o
-obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
 obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
 
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
@@ -14,9 +13,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
 	  tsnmap.o bind_addr.o socket.o primitive.o \
 	  output.o input.o debug.o stream.o auth.o \
 	  offload.o stream_sched.o stream_sched_prio.o \
-	  stream_sched_rr.o
-
-sctp_probe-y := probe.o
+	  stream_sched_rr.o stream_interleave.o
 
 sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
 sctp-$(CONFIG_PROC_FS) += proc.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 69394f4d6091..837806dd5799 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -861,7 +861,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
 					0, spc_state, error, GFP_ATOMIC);
 		if (event)
-			sctp_ulpq_tail_event(&asoc->ulpq, event);
+			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
 	}
 
 	/* Select new active and retran paths. */
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7f8baa48e7c2..991a530c6b31 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -124,7 +124,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
 			ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
 							    error, GFP_ATOMIC);
 			if (ev)
-				sctp_ulpq_tail_event(&asoc->ulpq, ev);
+				asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 		}
 
 		sctp_chunk_put(chunk);
@@ -191,7 +191,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 	 */
 	max_data = asoc->pathmtu -
 		   sctp_sk(asoc->base.sk)->pf->af->net_header_len -
-		   sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+		   sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream);
 	max_data = SCTP_TRUNC4(max_data);
 
 	/* If the the peer requested that we authenticate DATA chunks
@@ -264,8 +264,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 				frag |= SCTP_DATA_SACK_IMM;
 		}
 
-		chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag,
-						 0, GFP_KERNEL);
+		chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
+						       GFP_KERNEL);
 		if (!chunk) {
 			err = -ENOMEM;
 			goto errout;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ee1e601a0b11..8b3146816519 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -232,7 +232,7 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 {
 	ep->base.dead = true;
 
-	ep->base.sk->sk_state = SCTP_SS_CLOSED;
+	inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
 
 	/* Unlink this endpoint, so we can't find it again! */
 	sctp_unhash_endpoint(ep);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 4a865cd06d76..01a26ee051e3 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -313,6 +313,7 @@ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
 	/* We believe that this chunk is OK to add to the packet */
 	switch (chunk->chunk_hdr->type) {
 	case SCTP_CID_DATA:
+	case SCTP_CID_I_DATA:
 		/* Account for the data being in the packet */
 		sctp_packet_append_data(packet, chunk);
 		/* Disallow SACK bundling after DATA. */
@@ -724,7 +725,7 @@ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
 	 * or delay in hopes of bundling a full sized packet.
 	 */
 	if (chunk->skb->len + q->out_qlen > transport->pathmtu -
-		packet->overhead - sizeof(struct sctp_data_chunk) - 4)
+	    packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
 		/* Enough data queued to fill a packet */
 		return SCTP_XMIT_OK;
 
@@ -759,7 +760,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
 
 	asoc->peer.rwnd = rwnd;
 	sctp_chunk_assign_tsn(chunk);
-	sctp_chunk_assign_ssn(chunk);
+	asoc->stream.si->assign_number(chunk);
 }
 
 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c4ec99b20150..f211b3db6a35 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -67,8 +67,6 @@ static void sctp_mark_missing(struct sctp_outq *q,
 			      __u32 highest_new_tsn,
 			      int count_of_newacks);
 
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-
 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
 
 /* Add data to the front of the queue. */
@@ -591,7 +589,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
 	 * following the procedures outlined in C1 - C5.
 	 */
 	if (reason == SCTP_RTXR_T3_RTX)
-		sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
+		q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
 
 	/* Flush the queues only on timeout, since fast_rtx is only
 	 * triggered during sack processing and the queue
@@ -942,6 +940,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 		case SCTP_CID_ECN_ECNE:
 		case SCTP_CID_ASCONF:
 		case SCTP_CID_FWD_TSN:
+		case SCTP_CID_I_FWD_TSN:
 		case SCTP_CID_RECONF:
 			status = sctp_packet_transmit_chunk(packet, chunk,
 							    one_packet, gfp);
@@ -956,7 +955,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 			 * sender MUST assure that at least one T3-rtx
 			 * timer is running.
 			 */
-			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
+			    chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
 				sctp_transport_reset_t3_rtx(transport);
 				transport->last_time_sent = jiffies;
 			}
@@ -1372,7 +1372,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
 
 	asoc->peer.rwnd = sack_a_rwnd;
 
-	sctp_generate_fwdtsn(q, sack_ctsn);
+	asoc->stream.si->generate_ftsn(q, sack_ctsn);
 
 	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
 	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
@@ -1795,7 +1795,7 @@ static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
 }
 
 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
 {
 	struct sctp_association *asoc = q->asoc;
 	struct sctp_chunk *ftsn_chunk = NULL;
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
deleted file mode 100644
index 1280f85a598d..000000000000
--- a/net/sctp/probe.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * sctp_probe - Observe the SCTP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for SCTP from Stephen Hemminger's code
- * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/sctp.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-MODULE_SOFTDEP("pre: sctp");
-MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
-MODULE_DESCRIPTION("SCTP snooper");
-MODULE_LICENSE("GPL");
-
-static int port __read_mostly = 0;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int fwmark __read_mostly = 0;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int bufsize __read_mostly = 64 * 1024;
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-static int full __read_mostly = 1;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "sctpprobe";
-
-static struct {
-	struct kfifo	  fifo;
-	spinlock_t	  lock;
-	wait_queue_head_t wait;
-	struct timespec64 tstart;
-} sctpw;
-
-static __printf(1, 2) void printl(const char *fmt, ...)
-{
-	va_list args;
-	int len;
-	char tbuf[256];
-
-	va_start(args, fmt);
-	len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
-	va_end(args);
-
-	kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
-	wake_up(&sctpw.wait);
-}
-
-static int sctpprobe_open(struct inode *inode, struct file *file)
-{
-	kfifo_reset(&sctpw.fifo);
-	ktime_get_ts64(&sctpw.tstart);
-
-	return 0;
-}
-
-static ssize_t sctpprobe_read(struct file *file, char __user *buf,
-			      size_t len, loff_t *ppos)
-{
-	int error = 0, cnt = 0;
-	unsigned char *tbuf;
-
-	if (!buf)
-		return -EINVAL;
-
-	if (len == 0)
-		return 0;
-
-	tbuf = vmalloc(len);
-	if (!tbuf)
-		return -ENOMEM;
-
-	error = wait_event_interruptible(sctpw.wait,
-					 kfifo_len(&sctpw.fifo) != 0);
-	if (error)
-		goto out_free;
-
-	cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
-	error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
-	vfree(tbuf);
-
-	return error ? error : cnt;
-}
-
-static const struct file_operations sctpprobe_fops = {
-	.owner	= THIS_MODULE,
-	.open	= sctpprobe_open,
-	.read	= sctpprobe_read,
-	.llseek = noop_llseek,
-};
-
-static enum sctp_disposition jsctp_sf_eat_sack(
-					struct net *net,
-					const struct sctp_endpoint *ep,
-					const struct sctp_association *asoc,
-					const union sctp_subtype type,
-					void *arg,
-					struct sctp_cmd_seq *commands)
-{
-	struct sctp_chunk *chunk = arg;
-	struct sk_buff *skb = chunk->skb;
-	struct sctp_transport *sp;
-	static __u32 lcwnd = 0;
-	struct timespec64 now;
-
-	sp = asoc->peer.primary_path;
-
-	if (((port == 0 && fwmark == 0) ||
-	     asoc->peer.port == port ||
-	     ep->base.bind_addr.port == port ||
-	     (fwmark > 0 && skb->mark == fwmark)) &&
-	    (full || sp->cwnd != lcwnd)) {
-		lcwnd = sp->cwnd;
-
-		ktime_get_ts64(&now);
-		now = timespec64_sub(now, sctpw.tstart);
-
-		printl("%lu.%06lu ", (unsigned long) now.tv_sec,
-		       (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-
-		printl("%p %5d %5d %5d %8d %5d ", asoc,
-		       ep->base.bind_addr.port, asoc->peer.port,
-		       asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
-
-		list_for_each_entry(sp, &asoc->peer.transport_addr_list,
-					transports) {
-			if (sp == asoc->peer.primary_path)
-				printl("*");
-
-			printl("%pISc %2u %8u %8u %8u %8u %8u ",
-			       &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
-			       sp->flight_size, sp->partial_bytes_acked,
-			       sp->pathmtu);
-		}
-		printl("\n");
-	}
-
-	jprobe_return();
-	return 0;
-}
-
-static struct jprobe sctp_recv_probe = {
-	.kp	= {
-		.symbol_name = "sctp_sf_eat_sack_6_2",
-	},
-	.entry	= jsctp_sf_eat_sack,
-};
-
-static __init int sctp_setup_jprobe(void)
-{
-	int ret = register_jprobe(&sctp_recv_probe);
-
-	if (ret) {
-		if (request_module("sctp"))
-			goto out;
-		ret = register_jprobe(&sctp_recv_probe);
-	}
-
-out:
-	return ret;
-}
-
-static __init int sctpprobe_init(void)
-{
-	int ret = -ENOMEM;
-
-	/* Warning: if the function signature of sctp_sf_eat_sack_6_2,
-	 * has been changed, you also have to change the signature of
-	 * jsctp_sf_eat_sack, otherwise you end up right here!
-	 */
-	BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
-				 jsctp_sf_eat_sack) == 0);
-
-	init_waitqueue_head(&sctpw.wait);
-	spin_lock_init(&sctpw.lock);
-	if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
-		return ret;
-
-	if (!proc_create(procname, S_IRUSR, init_net.proc_net,
-			 &sctpprobe_fops))
-		goto free_kfifo;
-
-	ret = sctp_setup_jprobe();
-	if (ret)
-		goto remove_proc;
-
-	pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
-		port, fwmark, bufsize);
-	return 0;
-
-remove_proc:
-	remove_proc_entry(procname, init_net.proc_net);
-free_kfifo:
-	kfifo_free(&sctpw.fifo);
-	return ret;
-}
-
-static __exit void sctpprobe_exit(void)
-{
-	kfifo_free(&sctpw.fifo);
-	remove_proc_entry(procname, init_net.proc_net);
-	unregister_jprobe(&sctp_recv_probe);
-}
-
-module_init(sctpprobe_init);
-module_exit(sctpprobe_exit);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 26b4be6b4172..537545ebcb0e 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -95,7 +95,6 @@ static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations sctp_snmp_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = sctp_snmp_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -288,12 +287,8 @@ struct sctp_ht_iter {
 static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct sctp_ht_iter *iter = seq->private;
-	int err = sctp_transport_walk_start(&iter->hti);
 
-	if (err) {
-		iter->start_fail = 1;
-		return ERR_PTR(err);
-	}
+	sctp_transport_walk_start(&iter->hti);
 
 	iter->start_fail = 0;
 	return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9bf575f2e8ed..793b05ec692b 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
 	struct sctp_inithdr init;
 	union sctp_params addrs;
 	struct sctp_sock *sp;
-	__u8 extensions[4];
+	__u8 extensions[5];
 	size_t chunksize;
 	__be16 types[2];
 	int num_ext = 0;
@@ -278,6 +278,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
 	if (sp->adaptation_ind)
 		chunksize += sizeof(aiparam);
 
+	if (sp->strm_interleave) {
+		extensions[num_ext] = SCTP_CID_I_DATA;
+		num_ext += 1;
+	}
+
 	chunksize += vparam_len;
 
 	/* Account for AUTH related parameters */
@@ -392,7 +397,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
 	struct sctp_inithdr initack;
 	union sctp_params addrs;
 	struct sctp_sock *sp;
-	__u8 extensions[4];
+	__u8 extensions[5];
 	size_t chunksize;
 	int num_ext = 0;
 	int cookie_len;
@@ -442,6 +447,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
 	if (sp->adaptation_ind)
 		chunksize += sizeof(aiparam);
 
+	if (asoc->intl_enable) {
+		extensions[num_ext] = SCTP_CID_I_DATA;
+		num_ext += 1;
+	}
+
 	if (asoc->peer.auth_capable) {
 		auth_random = (struct sctp_paramhdr *)asoc->c.auth_random;
 		chunksize += ntohs(auth_random->length);
@@ -711,38 +721,31 @@ nodata:
 /* Make a DATA chunk for the given association from the provided
  * parameters.  However, do not populate the data payload.
  */
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
 					    const struct sctp_sndrcvinfo *sinfo,
-					    int data_len, __u8 flags, __u16 ssn,
-					    gfp_t gfp)
+					    int len, __u8 flags, gfp_t gfp)
 {
 	struct sctp_chunk *retval;
 	struct sctp_datahdr dp;
-	int chunk_len;
 
 	/* We assign the TSN as LATE as possible, not here when
 	 * creating the chunk.
 	 */
-	dp.tsn = 0;
+	memset(&dp, 0, sizeof(dp));
+	dp.ppid = sinfo->sinfo_ppid;
 	dp.stream = htons(sinfo->sinfo_stream);
-	dp.ppid   = sinfo->sinfo_ppid;
 
 	/* Set the flags for an unordered send.  */
-	if (sinfo->sinfo_flags & SCTP_UNORDERED) {
+	if (sinfo->sinfo_flags & SCTP_UNORDERED)
 		flags |= SCTP_DATA_UNORDERED;
-		dp.ssn = 0;
-	} else
-		dp.ssn = htons(ssn);
 
-	chunk_len = sizeof(dp) + data_len;
-	retval = sctp_make_data(asoc, flags, chunk_len, gfp);
+	retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
 	if (!retval)
-		goto nodata;
+		return NULL;
 
 	retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
 	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
 
-nodata:
 	return retval;
 }
 
@@ -1273,7 +1276,6 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
 	struct sctp_authhdr auth_hdr;
 	struct sctp_hmac *hmac_desc;
 	struct sctp_chunk *retval;
-	__u8 *hmac;
 
 	/* Get the first hmac that the peer told us to use */
 	hmac_desc = sctp_auth_asoc_get_hmac(asoc);
@@ -1292,7 +1294,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
 	retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(auth_hdr),
 						 &auth_hdr);
 
-	hmac = skb_put_zero(retval->skb, hmac_desc->hmac_len);
+	skb_put_zero(retval->skb, hmac_desc->hmac_len);
 
 	/* Adjust the chunk header to include the empty MAC */
 	retval->chunk_hdr->length =
@@ -1415,6 +1417,12 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
 	return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
 }
 
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+				   __u8 flags, int paylen, gfp_t gfp)
+{
+	return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
+}
+
 static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
 					    __u8 type, __u8 flags, int paylen,
 					    gfp_t gfp)
@@ -2032,6 +2040,10 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
 			if (net->sctp.addip_enable)
 				asoc->peer.asconf_capable = 1;
 			break;
+		case SCTP_CID_I_DATA:
+			if (sctp_sk(asoc->base.sk)->strm_interleave)
+				asoc->intl_enable = 1;
+			break;
 		default:
 			break;
 		}
@@ -3523,6 +3535,30 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
 	return retval;
 }
 
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+				     __u32 new_cum_tsn, size_t nstreams,
+				     struct sctp_ifwdtsn_skip *skiplist)
+{
+	struct sctp_chunk *retval = NULL;
+	struct sctp_ifwdtsn_hdr ftsn_hdr;
+	size_t hint;
+
+	hint = (nstreams + 1) * sizeof(__u32);
+
+	retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
+				   GFP_ATOMIC);
+	if (!retval)
+		return NULL;
+
+	ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
+	retval->subh.ifwdtsn_hdr =
+		sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
+
+	sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
+
+	return retval;
+}
+
 /* RE-CONFIG 3.1 (RE-CONFIG chunk)
  *   0                   1                   2                   3
  *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index df94d77401e7..b71e7fb0a20a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
 	struct sctp_chunk *abort;
 
 	/* Cancel any partial delivery in progress. */
-	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+	asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
 
 	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
@@ -878,12 +878,12 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
 		 * successfully completed a connect() call.
 		 */
 		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
-			sk->sk_state = SCTP_SS_ESTABLISHED;
+			inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
 
 		/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
 		if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
 		    sctp_sstate(sk, ESTABLISHED)) {
-			sk->sk_state = SCTP_SS_CLOSING;
+			inet_sk_set_state(sk, SCTP_SS_CLOSING);
 			sk->sk_shutdown |= RCV_SHUTDOWN;
 		}
 	}
@@ -972,7 +972,7 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
 		if (!ev)
 			return;
 
-		sctp_ulpq_tail_event(&asoc->ulpq, ev);
+		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 
 		switch (err_hdr->cause) {
 		case SCTP_ERROR_UNKNOWN_CHUNK:
@@ -1007,18 +1007,6 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
 	}
 }
 
-/* Process variable FWDTSN chunk information. */
-static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
-				    struct sctp_chunk *chunk)
-{
-	struct sctp_fwdtsn_skip *skip;
-
-	/* Walk through all the skipped SSNs */
-	sctp_walk_fwdtsn(skip, chunk) {
-		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
-	}
-}
-
 /* Helper function to remove the association non-primary peer
  * transports.
  */
@@ -1058,7 +1046,7 @@ static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
 					    asoc->c.sinit_max_instreams,
 					    NULL, GFP_ATOMIC);
 	if (ev)
-		sctp_ulpq_tail_event(&asoc->ulpq, ev);
+		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 }
 
 /* Helper function to generate an adaptation indication event */
@@ -1070,7 +1058,7 @@ static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
 	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
 
 	if (ev)
-		sctp_ulpq_tail_event(&asoc->ulpq, ev);
+		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 }
 
 
@@ -1368,18 +1356,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			break;
 
 		case SCTP_CMD_REPORT_FWDTSN:
-			/* Move the Cumulattive TSN Ack ahead. */
-			sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
-
-			/* purge the fragmentation queue */
-			sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
-
-			/* Abort any in progress partial delivery. */
-			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+			asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
 			break;
 
 		case SCTP_CMD_PROCESS_FWDTSN:
-			sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
+			asoc->stream.si->handle_ftsn(&asoc->ulpq,
+						     cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_GEN_SACK:
@@ -1483,8 +1465,9 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
 				 __func__, cmd->obj.chunk, &asoc->ulpq);
 
-			sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
-					    GFP_ATOMIC);
+			asoc->stream.si->ulpevent_data(&asoc->ulpq,
+						       cmd->obj.chunk,
+						       GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_EVENT_ULP:
@@ -1492,7 +1475,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
 				 __func__, cmd->obj.ulpevent, &asoc->ulpq);
 
-			sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
+			asoc->stream.si->enqueue_event(&asoc->ulpq,
+						       cmd->obj.ulpevent);
 			break;
 
 		case SCTP_CMD_REPLY:
@@ -1729,12 +1713,13 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			break;
 
 		case SCTP_CMD_PART_DELIVER:
-			sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
+			asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_RENEGE:
-			sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
-					 GFP_ATOMIC);
+			asoc->stream.si->renege_events(&asoc->ulpq,
+						       cmd->obj.chunk,
+						       GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_SETUP_T4:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8f8ccded13e4..eb7905ffe5f2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -59,6 +59,9 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/sctp.h>
+
 static struct sctp_packet *sctp_abort_pkt_new(
 					struct net *net,
 					const struct sctp_endpoint *ep,
@@ -3013,7 +3016,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -3034,7 +3037,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
 	case SCTP_IERROR_PROTO_VIOLATION:
 		return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
 					       (u8 *)chunk->subh.data_hdr,
-					       sizeof(struct sctp_datahdr));
+					       sctp_datahdr_len(&asoc->stream));
 	default:
 		BUG();
 	}
@@ -3133,7 +3136,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -3150,7 +3153,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
 	case SCTP_IERROR_PROTO_VIOLATION:
 		return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
 					       (u8 *)chunk->subh.data_hdr,
-					       sizeof(struct sctp_datahdr));
+					       sctp_datahdr_len(&asoc->stream));
 	default:
 		BUG();
 	}
@@ -3219,6 +3222,8 @@ enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net,
 	struct sctp_sackhdr *sackh;
 	__u32 ctsn;
 
+	trace_sctp_probe(ep, asoc, chunk);
+
 	if (!sctp_vtag_verify(chunk, asoc))
 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
@@ -3957,7 +3962,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
 {
 	struct sctp_fwdtsn_hdr *fwdtsn_hdr;
 	struct sctp_chunk *chunk = arg;
-	struct sctp_fwdtsn_skip *skip;
 	__u16 len;
 	__u32 tsn;
 
@@ -3971,7 +3975,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
 		return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the FORWARD_TSN chunk has valid length.  */
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -3990,14 +3994,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
 	if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
 		goto discard_noforce;
 
-	/* Silently discard the chunk if stream-id is not valid */
-	sctp_walk_fwdtsn(skip, chunk) {
-		if (ntohs(skip->stream) >= asoc->stream.incnt)
-			goto discard_noforce;
-	}
+	if (!asoc->stream.si->validate_ftsn(chunk))
+		goto discard_noforce;
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
-	if (len > sizeof(struct sctp_fwdtsn_hdr))
+	if (len > sctp_ftsnhdr_len(&asoc->stream))
 		sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
 				SCTP_CHUNK(chunk));
 
@@ -4028,7 +4029,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
 {
 	struct sctp_fwdtsn_hdr *fwdtsn_hdr;
 	struct sctp_chunk *chunk = arg;
-	struct sctp_fwdtsn_skip *skip;
 	__u16 len;
 	__u32 tsn;
 
@@ -4042,7 +4042,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
 		return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the FORWARD_TSN chunk has a valid length.  */
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -4061,14 +4061,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
 	if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
 		goto gen_shutdown;
 
-	/* Silently discard the chunk if stream-id is not valid */
-	sctp_walk_fwdtsn(skip, chunk) {
-		if (ntohs(skip->stream) >= asoc->stream.incnt)
-			goto gen_shutdown;
-	}
+	if (!asoc->stream.si->validate_ftsn(chunk))
+		goto gen_shutdown;
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
-	if (len > sizeof(struct sctp_fwdtsn_hdr))
+	if (len > sctp_ftsnhdr_len(&asoc->stream))
 		sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
 				SCTP_CHUNK(chunk));
 
@@ -6244,14 +6241,12 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	struct sctp_chunk *err;
 	enum sctp_verb deliver;
 	size_t datalen;
-	u8 ordered = 0;
-	u16 ssn, sid;
 	__u32 tsn;
 	int tmp;
 
 	data_hdr = (struct sctp_datahdr *)chunk->skb->data;
 	chunk->subh.data_hdr = data_hdr;
-	skb_pull(chunk->skb, sizeof(*data_hdr));
+	skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
 
 	tsn = ntohl(data_hdr->tsn);
 	pr_debug("%s: TSN 0x%x\n", __func__, tsn);
@@ -6299,7 +6294,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	 * Actually, allow a little bit of overflow (up to a MTU).
 	 */
 	datalen = ntohs(chunk->chunk_hdr->length);
-	datalen -= sizeof(struct sctp_data_chunk);
+	datalen -= sctp_datachk_len(&asoc->stream);
 
 	deliver = SCTP_CMD_CHUNK_ULP;
 
@@ -6394,7 +6389,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 		SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
 		if (chunk->asoc)
 			chunk->asoc->stats.iodchunks++;
-		ordered = 1;
 	}
 
 	/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
@@ -6405,8 +6399,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
 	 * and discard the DATA chunk.
 	 */
-	sid = ntohs(data_hdr->stream);
-	if (sid >= asoc->stream.incnt) {
+	if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
 		/* Mark tsn as received even though we drop it */
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
 
@@ -6427,8 +6420,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	 * SSN is smaller then the next expected one.  If it is, it wrapped
 	 * and is invalid.
 	 */
-	ssn = ntohs(data_hdr->ssn);
-	if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid)))
+	if (!asoc->stream.si->validate_data(chunk))
 		return SCTP_IERROR_PROTO_VIOLATION;
 
 	/* Send the data up to the user.  Note:  Schedule  the
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 79b6bee5b768..691d9dc620e3 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -985,11 +985,14 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
 	if (state > SCTP_STATE_MAX)
 		return &bug;
 
+	if (cid == SCTP_CID_I_DATA)
+		cid = SCTP_CID_DATA;
+
 	if (cid <= SCTP_CID_BASE_MAX)
 		return &chunk_event_table[cid][state];
 
 	if (net->sctp.prsctp_enable) {
-		if (cid == SCTP_CID_FWD_TSN)
+		if (cid == SCTP_CID_FWD_TSN || cid == SCTP_CID_I_FWD_TSN)
 			return &prsctp_chunk_event_table[0][state];
 	}
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 737e551fbf67..356e387f82e7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
 		cb(chunk);
 }
 
+static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
+				 void (*cb)(struct sk_buff *, struct sock *))
+
+{
+	struct sk_buff *skb, *tmp;
+
+	sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
+		cb(skb, sk);
+
+	sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
+		cb(skb, sk);
+
+	sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
+		cb(skb, sk);
+}
+
 /* Verify that this is a valid address. */
 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
 				   int len)
@@ -1489,7 +1505,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 	sk->sk_shutdown = SHUTDOWN_MASK;
-	sk->sk_state = SCTP_SS_CLOSING;
+	inet_sk_set_state(sk, SCTP_SS_CLOSING);
 
 	ep = sctp_sk(sk)->ep;
 
@@ -1515,6 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
 		if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
 		    !skb_queue_empty(&asoc->ulpq.reasm) ||
+		    !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
 		    (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
 			struct sctp_chunk *chunk;
 
@@ -1969,7 +1986,20 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 		if (err < 0)
 			goto out_free;
 
-		wait_connect = true;
+		/* If stream interleave is enabled, wait_connect has to be
+		 * done earlier than data enqueue, as it needs to make data
+		 * or idata according to asoc->intl_enable which is set
+		 * after connection is done.
+		 */
+		if (sctp_sk(asoc->base.sk)->strm_interleave) {
+			timeo = sock_sndtimeo(sk, 0);
+			err = sctp_wait_for_connect(asoc, &timeo);
+			if (err)
+				goto out_unlock;
+		} else {
+			wait_connect = true;
+		}
+
 		pr_debug("%s: we associated primitively\n", __func__);
 	}
 
@@ -2248,7 +2278,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
 			if (!event)
 				return -ENOMEM;
 
-			sctp_ulpq_tail_event(&asoc->ulpq, event);
+			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
 		}
 	}
 
@@ -3147,7 +3177,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
 		if (val == 0) {
 			val = asoc->pathmtu - sp->pf->af->net_header_len;
 			val -= sizeof(struct sctphdr) +
-			       sizeof(struct sctp_data_chunk);
+			       sctp_datachk_len(&asoc->stream);
 		}
 		asoc->user_frag = val;
 		asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
@@ -3317,7 +3347,10 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
 	if (get_user(val, (int __user *)optval))
 		return -EFAULT;
 
-	sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
+	sctp_sk(sk)->frag_interleave = !!val;
+
+	if (!sctp_sk(sk)->frag_interleave)
+		sctp_sk(sk)->strm_interleave = 0;
 
 	return 0;
 }
@@ -4000,6 +4033,40 @@ out:
 	return retval;
 }
 
+static int sctp_setsockopt_interleaving_supported(struct sock *sk,
+						  char __user *optval,
+						  unsigned int optlen)
+{
+	struct sctp_sock *sp = sctp_sk(sk);
+	struct net *net = sock_net(sk);
+	struct sctp_assoc_value params;
+	int retval = -EINVAL;
+
+	if (optlen < sizeof(params))
+		goto out;
+
+	optlen = sizeof(params);
+	if (copy_from_user(&params, optval, optlen)) {
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (params.assoc_id)
+		goto out;
+
+	if (!net->sctp.intl_enable || !sp->frag_interleave) {
+		retval = -EPERM;
+		goto out;
+	}
+
+	sp->strm_interleave = !!params.assoc_value;
+
+	retval = 0;
+
+out:
+	return retval;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4187,6 +4254,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
 	case SCTP_STREAM_SCHEDULER_VALUE:
 		retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
 		break;
+	case SCTP_INTERLEAVING_SUPPORTED:
+		retval = sctp_setsockopt_interleaving_supported(sk, optval,
+								optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -4563,7 +4634,7 @@ static void sctp_shutdown(struct sock *sk, int how)
 	if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
 		struct sctp_association *asoc;
 
-		sk->sk_state = SCTP_SS_CLOSING;
+		inet_sk_set_state(sk, SCTP_SS_CLOSING);
 		asoc = list_entry(ep->asocs.next,
 				  struct sctp_association, asocs);
 		sctp_primitive_SHUTDOWN(net, asoc, NULL);
@@ -4657,20 +4728,11 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
 EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
 
 /* use callback to avoid exporting the core structure */
-int sctp_transport_walk_start(struct rhashtable_iter *iter)
+void sctp_transport_walk_start(struct rhashtable_iter *iter)
 {
-	int err;
-
 	rhltable_walk_enter(&sctp_transport_hashtable, iter);
 
-	err = rhashtable_walk_start(iter);
-	if (err && err != -EAGAIN) {
-		rhashtable_walk_stop(iter);
-		rhashtable_walk_exit(iter);
-		return err;
-	}
-
-	return 0;
+	rhashtable_walk_start(iter);
 }
 
 void sctp_transport_walk_stop(struct rhashtable_iter *iter)
@@ -4764,9 +4826,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
 	int ret;
 
 again:
-	ret = sctp_transport_walk_start(&hti);
-	if (ret)
-		return ret;
+	ret = 0;
+	sctp_transport_walk_start(&hti);
 
 	tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
 	for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
@@ -6965,6 +7026,47 @@ out:
 	return retval;
 }
 
+static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
+						  char __user *optval,
+						  int __user *optlen)
+{
+	struct sctp_assoc_value params;
+	struct sctp_association *asoc;
+	int retval = -EFAULT;
+
+	if (len < sizeof(params)) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	len = sizeof(params);
+	if (copy_from_user(&params, optval, len))
+		goto out;
+
+	asoc = sctp_id2assoc(sk, params.assoc_id);
+	if (asoc) {
+		params.assoc_value = asoc->intl_enable;
+	} else if (!params.assoc_id) {
+		struct sctp_sock *sp = sctp_sk(sk);
+
+		params.assoc_value = sp->strm_interleave;
+	} else {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	if (put_user(len, optlen))
+		goto out;
+
+	if (copy_to_user(optval, &params, len))
+		goto out;
+
+	retval = 0;
+
+out:
+	return retval;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
 			   char __user *optval, int __user *optlen)
 {
@@ -7155,6 +7257,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
 		retval = sctp_getsockopt_scheduler_value(sk, len, optval,
 							 optlen);
 		break;
+	case SCTP_INTERLEAVING_SUPPORTED:
+		retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
+								optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -7389,13 +7495,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
 	 * sockets.
 	 *
 	 */
-	sk->sk_state = SCTP_SS_LISTENING;
+	inet_sk_set_state(sk, SCTP_SS_LISTENING);
 	if (!ep->base.bind_addr.port) {
 		if (sctp_autobind(sk))
 			return -EAGAIN;
 	} else {
 		if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
-			sk->sk_state = SCTP_SS_CLOSED;
+			inet_sk_set_state(sk, SCTP_SS_CLOSED);
 			return -EADDRINUSE;
 		}
 	}
@@ -8388,11 +8494,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
 
 	}
 
-	sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
-		sctp_skb_set_owner_r_frag(skb, newsk);
-
-	sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
-		sctp_skb_set_owner_r_frag(skb, newsk);
+	sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
 
 	/* Set the type of socket to indicate that it is peeled off from the
 	 * original UDP-style socket or created with the accept() call on a
@@ -8418,10 +8520,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
 	 * is called, set RCV_SHUTDOWN flag.
 	 */
 	if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
-		newsk->sk_state = SCTP_SS_CLOSED;
+		inet_sk_set_state(newsk, SCTP_SS_CLOSED);
 		newsk->sk_shutdown |= RCV_SHUTDOWN;
 	} else {
-		newsk->sk_state = SCTP_SS_ESTABLISHED;
+		inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
 	}
 
 	release_sock(newsk);
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 524dfeb94c41..cedf672487f9 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -167,6 +167,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 	sched->init(stream);
 
 in:
+	sctp_stream_interleave_init(stream);
 	if (!incnt)
 		goto out;
 
@@ -213,11 +214,13 @@ void sctp_stream_clear(struct sctp_stream *stream)
 {
 	int i;
 
-	for (i = 0; i < stream->outcnt; i++)
-		stream->out[i].ssn = 0;
+	for (i = 0; i < stream->outcnt; i++) {
+		stream->out[i].mid = 0;
+		stream->out[i].mid_uo = 0;
+	}
 
 	for (i = 0; i < stream->incnt; i++)
-		stream->in[i].ssn = 0;
+		stream->in[i].mid = 0;
 }
 
 void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
@@ -604,10 +607,10 @@ struct sctp_chunk *sctp_process_strreset_outreq(
 		}
 
 		for (i = 0; i < nums; i++)
-			stream->in[ntohs(str_p[i])].ssn = 0;
+			stream->in[ntohs(str_p[i])].mid = 0;
 	} else {
 		for (i = 0; i < stream->incnt; i++)
-			stream->in[i].ssn = 0;
+			stream->in[i].mid = 0;
 	}
 
 	result = SCTP_STRRESET_PERFORMED;
@@ -751,8 +754,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
 	 *     performed.
 	 */
 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
-	sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
-	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+	asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
 
 	/* G1: Compute an appropriate value for the Receiver's Next TSN -- the
 	 *     TSN that the peer should use to send the next DATA chunk.  The
@@ -781,10 +783,12 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
 	/* G5:  The next expected and outgoing SSNs MUST be reset to 0 for all
 	 *      incoming and outgoing streams.
 	 */
-	for (i = 0; i < stream->outcnt; i++)
-		stream->out[i].ssn = 0;
+	for (i = 0; i < stream->outcnt; i++) {
+		stream->out[i].mid = 0;
+		stream->out[i].mid_uo = 0;
+	}
 	for (i = 0; i < stream->incnt; i++)
-		stream->in[i].ssn = 0;
+		stream->in[i].mid = 0;
 
 	result = SCTP_STRRESET_PERFORMED;
 
@@ -974,11 +978,15 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
 		if (result == SCTP_STRRESET_PERFORMED) {
 			if (nums) {
-				for (i = 0; i < nums; i++)
-					stream->out[ntohs(str_p[i])].ssn = 0;
+				for (i = 0; i < nums; i++) {
+					stream->out[ntohs(str_p[i])].mid = 0;
+					stream->out[ntohs(str_p[i])].mid_uo = 0;
+				}
 			} else {
-				for (i = 0; i < stream->outcnt; i++)
-					stream->out[i].ssn = 0;
+				for (i = 0; i < stream->outcnt; i++) {
+					stream->out[i].mid = 0;
+					stream->out[i].mid_uo = 0;
+				}
 			}
 
 			flags = SCTP_STREAM_RESET_OUTGOING_SSN;
@@ -1021,8 +1029,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
 						&asoc->peer.tsn_map);
 			LIST_HEAD(temp);
 
-			sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
-			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+			asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
 
 			sctp_tsnmap_init(&asoc->peer.tsn_map,
 					 SCTP_TSN_MAP_INITIAL,
@@ -1040,10 +1047,12 @@ struct sctp_chunk *sctp_process_strreset_resp(
 			asoc->ctsn_ack_point = asoc->next_tsn - 1;
 			asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
 
-			for (i = 0; i < stream->outcnt; i++)
-				stream->out[i].ssn = 0;
+			for (i = 0; i < stream->outcnt; i++) {
+				stream->out[i].mid = 0;
+				stream->out[i].mid_uo = 0;
+			}
 			for (i = 0; i < stream->incnt; i++)
-				stream->in[i].ssn = 0;
+				stream->in[i].mid = 0;
 		}
 
 		for (i = 0; i < stream->outcnt; i++)
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
new file mode 100644
index 000000000000..8c7cf8f08711
--- /dev/null
+++ b/net/sctp/stream_interleave.c
@@ -0,0 +1,1334 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp stream queue/scheduling.
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ *                 ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ *    lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ *    Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <net/busy_poll.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/ulpevent.h>
+#include <linux/sctp.h>
+
+static struct sctp_chunk *sctp_make_idatafrag_empty(
+					const struct sctp_association *asoc,
+					const struct sctp_sndrcvinfo *sinfo,
+					int len, __u8 flags, gfp_t gfp)
+{
+	struct sctp_chunk *retval;
+	struct sctp_idatahdr dp;
+
+	memset(&dp, 0, sizeof(dp));
+	dp.stream = htons(sinfo->sinfo_stream);
+
+	if (sinfo->sinfo_flags & SCTP_UNORDERED)
+		flags |= SCTP_DATA_UNORDERED;
+
+	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
+	if (!retval)
+		return NULL;
+
+	retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
+	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
+
+	return retval;
+}
+
+static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
+{
+	struct sctp_stream *stream;
+	struct sctp_chunk *lchunk;
+	__u32 cfsn = 0;
+	__u16 sid;
+
+	if (chunk->has_mid)
+		return;
+
+	sid = sctp_chunk_stream_no(chunk);
+	stream = &chunk->asoc->stream;
+
+	list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
+		struct sctp_idatahdr *hdr;
+		__u32 mid;
+
+		lchunk->has_mid = 1;
+
+		hdr = lchunk->subh.idata_hdr;
+
+		if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
+			hdr->ppid = lchunk->sinfo.sinfo_ppid;
+		else
+			hdr->fsn = htonl(cfsn++);
+
+		if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+				sctp_mid_uo_next(stream, out, sid) :
+				sctp_mid_uo_peek(stream, out, sid);
+		} else {
+			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+				sctp_mid_next(stream, out, sid) :
+				sctp_mid_peek(stream, out, sid);
+		}
+		hdr->mid = htonl(mid);
+	}
+}
+
+static bool sctp_validate_data(struct sctp_chunk *chunk)
+{
+	const struct sctp_stream *stream;
+	__u16 sid, ssn;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_DATA)
+		return false;
+
+	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+		return true;
+
+	stream = &chunk->asoc->stream;
+	sid = sctp_chunk_stream_no(chunk);
+	ssn = ntohs(chunk->subh.data_hdr->ssn);
+
+	return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
+}
+
+static bool sctp_validate_idata(struct sctp_chunk *chunk)
+{
+	struct sctp_stream *stream;
+	__u32 mid;
+	__u16 sid;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
+		return false;
+
+	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+		return true;
+
+	stream = &chunk->asoc->stream;
+	sid = sctp_chunk_stream_no(chunk);
+	mid = ntohl(chunk->subh.idata_hdr->mid);
+
+	return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
+}
+
+static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
+				  struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *cevent;
+	struct sk_buff *pos;
+
+	pos = skb_peek_tail(&ulpq->reasm);
+	if (!pos) {
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+		return;
+	}
+
+	cevent = sctp_skb2event(pos);
+
+	if (event->stream == cevent->stream &&
+	    event->mid == cevent->mid &&
+	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+	      event->fsn > cevent->fsn))) {
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+		return;
+	}
+
+	if ((event->stream == cevent->stream &&
+	     MID_lt(cevent->mid, event->mid)) ||
+	    event->stream > cevent->stream) {
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+		return;
+	}
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		cevent = sctp_skb2event(pos);
+
+		if (event->stream < cevent->stream ||
+		    (event->stream == cevent->stream &&
+		     MID_lt(event->mid, cevent->mid)))
+			break;
+
+		if (event->stream == cevent->stream &&
+		    event->mid == cevent->mid &&
+		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+		     event->fsn < cevent->fsn))
+			break;
+	}
+
+	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sctp_stream_in *sin;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	int is_last = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+
+		if (cevent->stream > event->stream ||
+		    cevent->mid != sin->mid)
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			goto out;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = cevent->fsn + 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn++;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = 0;
+					is_last = 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn = 0;
+				is_last = 1;
+			}
+			goto out;
+		default:
+			goto out;
+		}
+	}
+
+out:
+	if (!first_frag)
+		return NULL;
+
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn = next_fsn;
+		if (is_last) {
+			retval->msg_flags |= MSG_EOR;
+			sin->pd_mode = 0;
+		}
+	}
+
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sctp_association *asoc = ulpq->asoc;
+	struct sk_buff *pos, *first_frag = NULL;
+	struct sctp_ulpevent *retval = NULL;
+	struct sk_buff *pd_first = NULL;
+	struct sk_buff *pd_last = NULL;
+	struct sctp_stream_in *sin;
+	__u32 next_fsn = 0;
+	__u32 pd_point = 0;
+	__u32 pd_len = 0;
+	__u32 mid = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+		if (cevent->stream > event->stream)
+			break;
+
+		if (MID_lt(cevent->mid, event->mid))
+			continue;
+		if (MID_lt(event->mid, cevent->mid))
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (cevent->mid == sin->mid) {
+				pd_first = pos;
+				pd_last = pos;
+				pd_len = pos->len;
+			}
+
+			first_frag = pos;
+			next_fsn = 0;
+			mid = cevent->mid;
+			break;
+
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				if (pd_first) {
+					pd_last = pos;
+					pd_len += pos->len;
+				}
+			} else {
+				first_frag = NULL;
+			}
+			break;
+
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn)
+				goto found;
+			else
+				first_frag = NULL;
+			break;
+		}
+	}
+
+	if (!pd_first)
+		goto out;
+
+	pd_point = sctp_sk(asoc->base.sk)->pd_point;
+	if (pd_point && pd_point <= pd_len) {
+		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+						     &ulpq->reasm,
+						     pd_first, pd_last);
+		if (retval) {
+			sin->fsn = next_fsn;
+			sin->pd_mode = 1;
+		}
+	}
+	goto out;
+
+found:
+	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+					     &ulpq->reasm,
+					     first_frag, pos);
+	if (retval)
+		retval->msg_flags |= MSG_EOR;
+
+out:
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
+					     struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *retval = NULL;
+	struct sctp_stream_in *sin;
+
+	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+		event->msg_flags |= MSG_EOR;
+		return event;
+	}
+
+	sctp_intl_store_reasm(ulpq, event);
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+	if (sin->pd_mode && event->mid == sin->mid &&
+	    event->fsn == sin->fsn)
+		retval = sctp_intl_retrieve_partial(ulpq, event);
+
+	if (!retval)
+		retval = sctp_intl_retrieve_reassembled(ulpq, event);
+
+	return retval;
+}
+
+static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
+				    struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *cevent;
+	struct sk_buff *pos;
+
+	pos = skb_peek_tail(&ulpq->lobby);
+	if (!pos) {
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+		return;
+	}
+
+	cevent = (struct sctp_ulpevent *)pos->cb;
+	if (event->stream == cevent->stream &&
+	    MID_lt(cevent->mid, event->mid)) {
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+		return;
+	}
+
+	if (event->stream > cevent->stream) {
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+		return;
+	}
+
+	skb_queue_walk(&ulpq->lobby, pos) {
+		cevent = (struct sctp_ulpevent *)pos->cb;
+
+		if (cevent->stream > event->stream)
+			break;
+
+		if (cevent->stream == event->stream &&
+		    MID_lt(event->mid, cevent->mid))
+			break;
+	}
+
+	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
+}
+
+static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
+				       struct sctp_ulpevent *event)
+{
+	struct sk_buff_head *event_list;
+	struct sctp_stream *stream;
+	struct sk_buff *pos, *tmp;
+	__u16 sid = event->stream;
+
+	stream  = &ulpq->asoc->stream;
+	event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
+
+	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
+		struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
+
+		if (cevent->stream > sid)
+			break;
+
+		if (cevent->stream < sid)
+			continue;
+
+		if (cevent->mid != sctp_mid_peek(stream, in, sid))
+			break;
+
+		sctp_mid_next(stream, in, sid);
+
+		__skb_unlink(pos, &ulpq->lobby);
+
+		__skb_queue_tail(event_list, pos);
+	}
+}
+
+static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
+					     struct sctp_ulpevent *event)
+{
+	struct sctp_stream *stream;
+	__u16 sid;
+
+	stream  = &ulpq->asoc->stream;
+	sid = event->stream;
+
+	if (event->mid != sctp_mid_peek(stream, in, sid)) {
+		sctp_intl_store_ordered(ulpq, event);
+		return NULL;
+	}
+
+	sctp_mid_next(stream, in, sid);
+
+	sctp_intl_retrieve_ordered(ulpq, event);
+
+	return event;
+}
+
+static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
+			      struct sctp_ulpevent *event)
+{
+	struct sk_buff *skb = sctp_event2skb(event);
+	struct sock *sk = ulpq->asoc->base.sk;
+	struct sctp_sock *sp = sctp_sk(sk);
+	struct sk_buff_head *skb_list;
+
+	skb_list = (struct sk_buff_head *)skb->prev;
+
+	if (sk->sk_shutdown & RCV_SHUTDOWN &&
+	    (sk->sk_shutdown & SEND_SHUTDOWN ||
+	     !sctp_ulpevent_is_notification(event)))
+		goto out_free;
+
+	if (!sctp_ulpevent_is_notification(event)) {
+		sk_mark_napi_id(sk, skb);
+		sk_incoming_cpu_update(sk);
+	}
+
+	if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+		goto out_free;
+
+	if (skb_list)
+		skb_queue_splice_tail_init(skb_list,
+					   &sk->sk_receive_queue);
+	else
+		__skb_queue_tail(&sk->sk_receive_queue, skb);
+
+	if (!sp->data_ready_signalled) {
+		sp->data_ready_signalled = 1;
+		sk->sk_data_ready(sk);
+	}
+
+	return 1;
+
+out_free:
+	if (skb_list)
+		sctp_queue_purge_ulpevents(skb_list);
+	else
+		sctp_ulpevent_free(event);
+
+	return 0;
+}
+
+static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
+				     struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *cevent;
+	struct sk_buff *pos;
+
+	pos = skb_peek_tail(&ulpq->reasm_uo);
+	if (!pos) {
+		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+		return;
+	}
+
+	cevent = sctp_skb2event(pos);
+
+	if (event->stream == cevent->stream &&
+	    event->mid == cevent->mid &&
+	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+	      event->fsn > cevent->fsn))) {
+		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+		return;
+	}
+
+	if ((event->stream == cevent->stream &&
+	     MID_lt(cevent->mid, event->mid)) ||
+	    event->stream > cevent->stream) {
+		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+		return;
+	}
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		cevent = sctp_skb2event(pos);
+
+		if (event->stream < cevent->stream ||
+		    (event->stream == cevent->stream &&
+		     MID_lt(event->mid, cevent->mid)))
+			break;
+
+		if (event->stream == cevent->stream &&
+		    event->mid == cevent->mid &&
+		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+		     event->fsn < cevent->fsn))
+			break;
+	}
+
+	__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sctp_stream_in *sin;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	int is_last = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+		if (cevent->stream > event->stream)
+			break;
+
+		if (MID_lt(cevent->mid, sin->mid_uo))
+			continue;
+		if (MID_lt(sin->mid_uo, cevent->mid))
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			goto out;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn_uo) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = cevent->fsn + 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn++;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn_uo) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = 0;
+					is_last = 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn = 0;
+				is_last = 1;
+			}
+			goto out;
+		default:
+			goto out;
+		}
+	}
+
+out:
+	if (!first_frag)
+		return NULL;
+
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm_uo, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn_uo = next_fsn;
+		if (is_last) {
+			retval->msg_flags |= MSG_EOR;
+			sin->pd_mode_uo = 0;
+		}
+	}
+
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sctp_association *asoc = ulpq->asoc;
+	struct sk_buff *pos, *first_frag = NULL;
+	struct sctp_ulpevent *retval = NULL;
+	struct sk_buff *pd_first = NULL;
+	struct sk_buff *pd_last = NULL;
+	struct sctp_stream_in *sin;
+	__u32 next_fsn = 0;
+	__u32 pd_point = 0;
+	__u32 pd_len = 0;
+	__u32 mid = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+		if (cevent->stream > event->stream)
+			break;
+
+		if (MID_lt(cevent->mid, event->mid))
+			continue;
+		if (MID_lt(event->mid, cevent->mid))
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (!sin->pd_mode_uo) {
+				sin->mid_uo = cevent->mid;
+				pd_first = pos;
+				pd_last = pos;
+				pd_len = pos->len;
+			}
+
+			first_frag = pos;
+			next_fsn = 0;
+			mid = cevent->mid;
+			break;
+
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				if (pd_first) {
+					pd_last = pos;
+					pd_len += pos->len;
+				}
+			} else {
+				first_frag = NULL;
+			}
+			break;
+
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn)
+				goto found;
+			else
+				first_frag = NULL;
+			break;
+		}
+	}
+
+	if (!pd_first)
+		goto out;
+
+	pd_point = sctp_sk(asoc->base.sk)->pd_point;
+	if (pd_point && pd_point <= pd_len) {
+		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+						     &ulpq->reasm_uo,
+						     pd_first, pd_last);
+		if (retval) {
+			sin->fsn_uo = next_fsn;
+			sin->pd_mode_uo = 1;
+		}
+	}
+	goto out;
+
+found:
+	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+					     &ulpq->reasm_uo,
+					     first_frag, pos);
+	if (retval)
+		retval->msg_flags |= MSG_EOR;
+
+out:
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *retval = NULL;
+	struct sctp_stream_in *sin;
+
+	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+		event->msg_flags |= MSG_EOR;
+		return event;
+	}
+
+	sctp_intl_store_reasm_uo(ulpq, event);
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+	if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
+	    event->fsn == sin->fsn_uo)
+		retval = sctp_intl_retrieve_partial_uo(ulpq, event);
+
+	if (!retval)
+		retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
+
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
+{
+	struct sctp_stream_in *csin, *sin = NULL;
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	__u16 sid = 0;
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+		if (csin->pd_mode_uo)
+			continue;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (first_frag)
+				goto out;
+			first_frag = pos;
+			last_frag = pos;
+			next_fsn = 0;
+			sin = csin;
+			sid = cevent->stream;
+			sin->mid_uo = cevent->mid;
+			break;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag)
+				break;
+			if (cevent->stream == sid &&
+			    cevent->mid == sin->mid_uo &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				last_frag = pos;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag)
+				goto out;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!first_frag)
+		return NULL;
+
+out:
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm_uo, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn_uo = next_fsn;
+		sin->pd_mode_uo = 1;
+	}
+
+	return retval;
+}
+
+static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
+			       struct sctp_chunk *chunk, gfp_t gfp)
+{
+	struct sctp_ulpevent *event;
+	struct sk_buff_head temp;
+	int event_eor = 0;
+
+	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
+	if (!event)
+		return -ENOMEM;
+
+	event->mid = ntohl(chunk->subh.idata_hdr->mid);
+	if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
+		event->ppid = chunk->subh.idata_hdr->ppid;
+	else
+		event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
+
+	if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
+		event = sctp_intl_reasm(ulpq, event);
+		if (event && event->msg_flags & MSG_EOR) {
+			skb_queue_head_init(&temp);
+			__skb_queue_tail(&temp, sctp_event2skb(event));
+
+			event = sctp_intl_order(ulpq, event);
+		}
+	} else {
+		event = sctp_intl_reasm_uo(ulpq, event);
+	}
+
+	if (event) {
+		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
+		sctp_enqueue_event(ulpq, event);
+	}
+
+	return event_eor;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
+{
+	struct sctp_stream_in *csin, *sin = NULL;
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	__u16 sid = 0;
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+		if (csin->pd_mode)
+			continue;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (first_frag)
+				goto out;
+			if (cevent->mid == csin->mid) {
+				first_frag = pos;
+				last_frag = pos;
+				next_fsn = 0;
+				sin = csin;
+				sid = cevent->stream;
+			}
+			break;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag)
+				break;
+			if (cevent->stream == sid &&
+			    cevent->mid == sin->mid &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				last_frag = pos;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag)
+				goto out;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!first_frag)
+		return NULL;
+
+out:
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn = next_fsn;
+		sin->pd_mode = 1;
+	}
+
+	return retval;
+}
+
+static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+	struct sctp_ulpevent *event;
+
+	if (!skb_queue_empty(&ulpq->reasm)) {
+		do {
+			event = sctp_intl_retrieve_first(ulpq);
+			if (event)
+				sctp_enqueue_event(ulpq, event);
+		} while (event);
+	}
+
+	if (!skb_queue_empty(&ulpq->reasm_uo)) {
+		do {
+			event = sctp_intl_retrieve_first_uo(ulpq);
+			if (event)
+				sctp_enqueue_event(ulpq, event);
+		} while (event);
+	}
+}
+
+static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+			       gfp_t gfp)
+{
+	struct sctp_association *asoc = ulpq->asoc;
+	__u32 freed = 0;
+	__u16 needed;
+
+	if (chunk) {
+		needed = ntohs(chunk->chunk_hdr->length);
+		needed -= sizeof(struct sctp_idata_chunk);
+	} else {
+		needed = SCTP_DEFAULT_MAXWINDOW;
+	}
+
+	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
+		freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
+		if (freed < needed)
+			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
+						       needed);
+		if (freed < needed)
+			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
+						       needed);
+	}
+
+	if (chunk && freed >= needed)
+		if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
+			sctp_intl_start_pd(ulpq, gfp);
+
+	sk_mem_reclaim(asoc->base.sk);
+}
+
+static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
+				      __u32 mid, __u16 flags, gfp_t gfp)
+{
+	struct sock *sk = ulpq->asoc->base.sk;
+	struct sctp_ulpevent *ev = NULL;
+
+	if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
+					&sctp_sk(sk)->subscribe))
+		return;
+
+	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
+				      sid, mid, flags, gfp);
+	if (ev) {
+		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
+
+		if (!sctp_sk(sk)->data_ready_signalled) {
+			sctp_sk(sk)->data_ready_signalled = 1;
+			sk->sk_data_ready(sk);
+		}
+	}
+}
+
+static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
+{
+	struct sctp_stream *stream = &ulpq->asoc->stream;
+	struct sctp_ulpevent *cevent, *event = NULL;
+	struct sk_buff_head *lobby = &ulpq->lobby;
+	struct sk_buff *pos, *tmp;
+	struct sk_buff_head temp;
+	__u16 csid;
+	__u32 cmid;
+
+	skb_queue_head_init(&temp);
+	sctp_skb_for_each(pos, lobby, tmp) {
+		cevent = (struct sctp_ulpevent *)pos->cb;
+		csid = cevent->stream;
+		cmid = cevent->mid;
+
+		if (csid > sid)
+			break;
+
+		if (csid < sid)
+			continue;
+
+		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
+			break;
+
+		__skb_unlink(pos, lobby);
+		if (!event)
+			event = sctp_skb2event(pos);
+
+		__skb_queue_tail(&temp, pos);
+	}
+
+	if (!event && pos != (struct sk_buff *)lobby) {
+		cevent = (struct sctp_ulpevent *)pos->cb;
+		csid = cevent->stream;
+		cmid = cevent->mid;
+
+		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
+			sctp_mid_next(stream, in, csid);
+			__skb_unlink(pos, lobby);
+			__skb_queue_tail(&temp, pos);
+			event = sctp_skb2event(pos);
+		}
+	}
+
+	if (event) {
+		sctp_intl_retrieve_ordered(ulpq, event);
+		sctp_enqueue_event(ulpq, event);
+	}
+}
+
+static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+	struct sctp_stream *stream = &ulpq->asoc->stream;
+	__u16 sid;
+
+	for (sid = 0; sid < stream->incnt; sid++) {
+		struct sctp_stream_in *sin = &stream->in[sid];
+		__u32 mid;
+
+		if (sin->pd_mode_uo) {
+			sin->pd_mode_uo = 0;
+
+			mid = sin->mid_uo;
+			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
+		}
+
+		if (sin->pd_mode) {
+			sin->pd_mode = 0;
+
+			mid = sin->mid;
+			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
+			sctp_mid_skip(stream, in, sid, mid);
+
+			sctp_intl_reap_ordered(ulpq, sid);
+		}
+	}
+
+	/* intl abort pd happens only when all data needs to be cleaned */
+	sctp_ulpq_flush(ulpq);
+}
+
+static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
+				    int nskips, __be16 stream, __u8 flags)
+{
+	int i;
+
+	for (i = 0; i < nskips; i++)
+		if (skiplist[i].stream == stream &&
+		    skiplist[i].flags == flags)
+			return i;
+
+	return i;
+}
+
+#define SCTP_FTSN_U_BIT	0x1
+static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+{
+	struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
+	struct sctp_association *asoc = q->asoc;
+	struct sctp_chunk *ftsn_chunk = NULL;
+	struct list_head *lchunk, *temp;
+	int nskips = 0, skip_pos;
+	struct sctp_chunk *chunk;
+	__u32 tsn;
+
+	if (!asoc->peer.prsctp_capable)
+		return;
+
+	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
+		asoc->adv_peer_ack_point = ctsn;
+
+	list_for_each_safe(lchunk, temp, &q->abandoned) {
+		chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
+		tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+		if (TSN_lte(tsn, ctsn)) {
+			list_del_init(lchunk);
+			sctp_chunk_free(chunk);
+		} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
+			__be16 sid = chunk->subh.idata_hdr->stream;
+			__be32 mid = chunk->subh.idata_hdr->mid;
+			__u8 flags = 0;
+
+			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+				flags |= SCTP_FTSN_U_BIT;
+
+			asoc->adv_peer_ack_point = tsn;
+			skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
+						     sid, flags);
+			ftsn_skip_arr[skip_pos].stream = sid;
+			ftsn_skip_arr[skip_pos].reserved = 0;
+			ftsn_skip_arr[skip_pos].flags = flags;
+			ftsn_skip_arr[skip_pos].mid = mid;
+			if (skip_pos == nskips)
+				nskips++;
+			if (nskips == 10)
+				break;
+		} else {
+			break;
+		}
+	}
+
+	if (asoc->adv_peer_ack_point > ctsn)
+		ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
+					       nskips, &ftsn_skip_arr[0]);
+
+	if (ftsn_chunk) {
+		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
+	}
+}
+
+#define _sctp_walk_ifwdtsn(pos, chunk, end) \
+	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
+	     (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
+
+#define sctp_walk_ifwdtsn(pos, ch) \
+	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
+					sizeof(struct sctp_ifwdtsn_chunk))
+
+static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
+{
+	struct sctp_fwdtsn_skip *skip;
+	__u16 incnt;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
+		return false;
+
+	incnt = chunk->asoc->stream.incnt;
+	sctp_walk_fwdtsn(skip, chunk)
+		if (ntohs(skip->stream) >= incnt)
+			return false;
+
+	return true;
+}
+
+static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
+{
+	struct sctp_ifwdtsn_skip *skip;
+	__u16 incnt;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
+		return false;
+
+	incnt = chunk->asoc->stream.incnt;
+	sctp_walk_ifwdtsn(skip, chunk)
+		if (ntohs(skip->stream) >= incnt)
+			return false;
+
+	return true;
+}
+
+static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+	/* Move the Cumulattive TSN Ack ahead. */
+	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+	/* purge the fragmentation queue */
+	sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
+	/* Abort any in progress partial delivery. */
+	sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+	struct sk_buff *pos, *tmp;
+
+	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+		struct sctp_ulpevent *event = sctp_skb2event(pos);
+		__u32 tsn = event->tsn;
+
+		if (TSN_lte(tsn, ftsn)) {
+			__skb_unlink(pos, &ulpq->reasm);
+			sctp_ulpevent_free(event);
+		}
+	}
+
+	skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
+		struct sctp_ulpevent *event = sctp_skb2event(pos);
+		__u32 tsn = event->tsn;
+
+		if (TSN_lte(tsn, ftsn)) {
+			__skb_unlink(pos, &ulpq->reasm_uo);
+			sctp_ulpevent_free(event);
+		}
+	}
+}
+
+static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+	/* Move the Cumulattive TSN Ack ahead. */
+	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+	/* purge the fragmentation queue */
+	sctp_intl_reasm_flushtsn(ulpq, ftsn);
+	/* abort only when it's for all data */
+	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
+		sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+	struct sctp_fwdtsn_skip *skip;
+
+	/* Walk through all the skipped SSNs */
+	sctp_walk_fwdtsn(skip, chunk)
+		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
+}
+
+static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
+			   __u8 flags)
+{
+	struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
+	struct sctp_stream *stream  = &ulpq->asoc->stream;
+
+	if (flags & SCTP_FTSN_U_BIT) {
+		if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
+			sin->pd_mode_uo = 0;
+			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
+						  GFP_ATOMIC);
+		}
+		return;
+	}
+
+	if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
+		return;
+
+	if (sin->pd_mode) {
+		sin->pd_mode = 0;
+		sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
+	}
+
+	sctp_mid_skip(stream, in, sid, mid);
+
+	sctp_intl_reap_ordered(ulpq, sid);
+}
+
+static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+	struct sctp_ifwdtsn_skip *skip;
+
+	/* Walk through all the skipped MIDs and abort stream pd if possible */
+	sctp_walk_ifwdtsn(skip, chunk)
+		sctp_intl_skip(ulpq, ntohs(skip->stream),
+			       ntohl(skip->mid), skip->flags);
+}
+
+static struct sctp_stream_interleave sctp_stream_interleave_0 = {
+	.data_chunk_len		= sizeof(struct sctp_data_chunk),
+	.ftsn_chunk_len		= sizeof(struct sctp_fwdtsn_chunk),
+	/* DATA process functions */
+	.make_datafrag		= sctp_make_datafrag_empty,
+	.assign_number		= sctp_chunk_assign_ssn,
+	.validate_data		= sctp_validate_data,
+	.ulpevent_data		= sctp_ulpq_tail_data,
+	.enqueue_event		= sctp_ulpq_tail_event,
+	.renege_events		= sctp_ulpq_renege,
+	.start_pd		= sctp_ulpq_partial_delivery,
+	.abort_pd		= sctp_ulpq_abort_pd,
+	/* FORWARD-TSN process functions */
+	.generate_ftsn		= sctp_generate_fwdtsn,
+	.validate_ftsn		= sctp_validate_fwdtsn,
+	.report_ftsn		= sctp_report_fwdtsn,
+	.handle_ftsn		= sctp_handle_fwdtsn,
+};
+
+static struct sctp_stream_interleave sctp_stream_interleave_1 = {
+	.data_chunk_len		= sizeof(struct sctp_idata_chunk),
+	.ftsn_chunk_len		= sizeof(struct sctp_ifwdtsn_chunk),
+	/* I-DATA process functions */
+	.make_datafrag		= sctp_make_idatafrag_empty,
+	.assign_number		= sctp_chunk_assign_mid,
+	.validate_data		= sctp_validate_idata,
+	.ulpevent_data		= sctp_ulpevent_idata,
+	.enqueue_event		= sctp_enqueue_event,
+	.renege_events		= sctp_renege_events,
+	.start_pd		= sctp_intl_start_pd,
+	.abort_pd		= sctp_intl_abort_pd,
+	/* I-FORWARD-TSN process functions */
+	.generate_ftsn		= sctp_generate_iftsn,
+	.validate_ftsn		= sctp_validate_iftsn,
+	.report_ftsn		= sctp_report_iftsn,
+	.handle_ftsn		= sctp_handle_iftsn,
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream)
+{
+	struct sctp_association *asoc;
+
+	asoc = container_of(stream, struct sctp_association, stream);
+	stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
+				       : &sctp_stream_interleave_0;
+}
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index d8c162a4089c..f5fcd425232a 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -242,7 +242,8 @@ int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
 
 void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
 {
-	if (!list_is_last(&ch->frag_list, &ch->msg->chunks)) {
+	if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
+	    !q->asoc->intl_enable) {
 		struct sctp_stream_out *sout;
 		__u16 sid;
 
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ef7ca44d6e6a..33ca5b73cdb3 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -289,6 +289,13 @@ static struct ctl_table sctp_net_table[] = {
 		.proc_handler	= proc_sctp_do_auth,
 	},
 	{
+		.procname	= "intl_enable",
+		.data		= &init_net.sctp.intl_enable,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "addr_scope_policy",
 		.data		= &init_net.sctp.scope_policy,
 		.maxlen		= sizeof(int),
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 5447228bf1a0..84207ad33e8e 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -443,8 +443,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
 		goto fail;
 
 	/* Pull off the common chunk header and DATA header.  */
-	skb_pull(skb, sizeof(struct sctp_data_chunk));
-	len -= sizeof(struct sctp_data_chunk);
+	skb_pull(skb, sctp_datachk_len(&asoc->stream));
+	len -= sctp_datachk_len(&asoc->stream);
 
 	/* Embed the event fields inside the cloned skb.  */
 	event = sctp_skb2event(skb);
@@ -705,8 +705,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
 	sctp_ulpevent_receive_data(event, asoc);
 
 	event->stream = ntohs(chunk->subh.data_hdr->stream);
-	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
-	event->ppid = chunk->subh.data_hdr->ppid;
 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
 		event->flags |= SCTP_UNORDERED;
 		event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
@@ -732,8 +730,9 @@ fail:
  *   various events.
  */
 struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
-	const struct sctp_association *asoc, __u32 indication,
-	gfp_t gfp)
+					const struct sctp_association *asoc,
+					__u32 indication, __u32 sid, __u32 seq,
+					__u32 flags, gfp_t gfp)
 {
 	struct sctp_ulpevent *event;
 	struct sctp_pdapi_event *pd;
@@ -754,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
 	 *   Currently unused.
 	 */
 	pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
-	pd->pdapi_flags = 0;
+	pd->pdapi_flags = flags;
+	pd->pdapi_stream = sid;
+	pd->pdapi_seq = seq;
 
 	/* pdapi_length: 32 bits (unsigned integer)
 	 *
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index e36ec5dd64c6..0b427100b0d4 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
 
 	ulpq->asoc = asoc;
 	skb_queue_head_init(&ulpq->reasm);
+	skb_queue_head_init(&ulpq->reasm_uo);
 	skb_queue_head_init(&ulpq->lobby);
 	ulpq->pd_mode  = 0;
 
@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
 		sctp_ulpevent_free(event);
 	}
 
+	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
+		event = sctp_skb2event(skb);
+		sctp_ulpevent_free(event);
+	}
 }
 
 /* Dispose of a ulpqueue.  */
@@ -104,6 +109,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 	if (!event)
 		return -ENOMEM;
 
+	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
+	event->ppid = chunk->subh.data_hdr->ppid;
+
 	/* Do reassembly if needed.  */
 	event = sctp_ulpq_reasm(ulpq, event);
 
@@ -328,9 +336,10 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
-	struct sk_buff_head *queue, struct sk_buff *f_frag,
-	struct sk_buff *l_frag)
+struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+						  struct sk_buff_head *queue,
+						  struct sk_buff *f_frag,
+						  struct sk_buff *l_frag)
 {
 	struct sk_buff *pos;
 	struct sk_buff *new = NULL;
@@ -853,7 +862,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 	struct sctp_stream *stream;
 
 	/* Check if this message needs ordering.  */
-	if (SCTP_DATA_UNORDERED & event->msg_flags)
+	if (event->msg_flags & SCTP_DATA_UNORDERED)
 		return event;
 
 	/* Note: The stream ID must be verified before this routine.  */
@@ -974,8 +983,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 	sctp_ulpq_reap_ordered(ulpq, sid);
 }
 
-static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
-		struct sk_buff_head *list, __u16 needed)
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
+			    __u16 needed)
 {
 	__u16 freed = 0;
 	__u32 tsn, last_tsn;
@@ -1132,7 +1141,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
 				       &sctp_sk(sk)->subscribe))
 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
 					      SCTP_PARTIAL_DELIVERY_ABORTED,
-					      gfp);
+					      0, 0, 0, gfp);
 	if (ev)
 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
 
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 449f62e1e270..3583c8ab1bae 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
 		goto out;
 
 	smc = smc_sk(sk);
-	sock_hold(sk);
 	if (sk->sk_state == SMC_LISTEN)
 		/* smc_close_non_accepted() is called and acquires
 		 * sock lock for child sockets again
@@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
 	else
 		lock_sock(sk);
 
-	if (smc->use_fallback) {
-		sk->sk_state = SMC_CLOSED;
-		sk->sk_state_change(sk);
-	} else {
+	if (!smc->use_fallback) {
 		rc = smc_close_active(smc);
 		sock_set_flag(sk, SOCK_DEAD);
 		sk->sk_shutdown |= SHUTDOWN_MASK;
@@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
 		sock_release(smc->clcsock);
 		smc->clcsock = NULL;
 	}
+	if (smc->use_fallback) {
+		sock_put(sk); /* passive closing */
+		sk->sk_state = SMC_CLOSED;
+		sk->sk_state_change(sk);
+	}
 
 	/* detach socket */
 	sock_orphan(sk);
 	sock->sk = NULL;
-	if (smc->use_fallback) {
-		schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
-	} else if (sk->sk_state == SMC_CLOSED) {
+	if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
 		smc_conn_free(&smc->conn);
-		schedule_delayed_work(&smc->sock_put_work,
-				      SMC_CLOSE_SOCK_PUT_DELAY);
-	}
 	release_sock(sk);
 
-	sock_put(sk);
+	sk->sk_prot->unhash(sk);
+	sock_put(sk); /* final sock_put */
 out:
 	return rc;
 }
@@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
 	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 	INIT_LIST_HEAD(&smc->accept_q);
 	spin_lock_init(&smc->accept_q_lock);
-	INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
 	sk->sk_prot->hash(sk);
 	sk_refcnt_debug_inc(sk);
 
@@ -377,6 +373,15 @@ static void smc_link_save_peer_info(struct smc_link *link,
 	link->peer_mtu = clc->qp_mtu;
 }
 
+static void smc_lgr_forget(struct smc_link_group *lgr)
+{
+	spin_lock_bh(&smc_lgr_list.lock);
+	/* do not use this link group for new connections */
+	if (!list_empty(&lgr->list))
+		list_del_init(&lgr->list);
+	spin_unlock_bh(&smc_lgr_list.lock);
+}
+
 /* setup for RDMA connection of client */
 static int smc_connect_rdma(struct smc_sock *smc)
 {
@@ -390,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
 	int rc = 0;
 	u8 ibport;
 
+	sock_hold(&smc->sk); /* sock put in passive closing */
+
 	if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
 		/* peer has not signalled SMC-capability */
 		smc->use_fallback = true;
@@ -513,6 +520,8 @@ out_connected:
 	return rc ? rc : local_contact;
 
 decline_rdma_unlock:
+	if (local_contact == SMC_FIRST_CONTACT)
+		smc_lgr_forget(smc->conn.lgr);
 	mutex_unlock(&smc_create_lgr_pending);
 	smc_conn_free(&smc->conn);
 decline_rdma:
@@ -520,15 +529,19 @@ decline_rdma:
 	smc->use_fallback = true;
 	if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
 		rc = smc_clc_send_decline(smc, reason_code);
-		if (rc < sizeof(struct smc_clc_msg_decline))
+		if (rc < 0)
 			goto out_err;
 	}
 	goto out_connected;
 
 out_err_unlock:
+	if (local_contact == SMC_FIRST_CONTACT)
+		smc_lgr_forget(smc->conn.lgr);
 	mutex_unlock(&smc_create_lgr_pending);
 	smc_conn_free(&smc->conn);
 out_err:
+	if (smc->sk.sk_state == SMC_INIT)
+		sock_put(&smc->sk); /* passive closing */
 	return rc;
 }
 
@@ -581,40 +594,33 @@ out_err:
 
 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
 {
-	struct sock *sk = &lsmc->sk;
-	struct socket *new_clcsock;
+	struct socket *new_clcsock = NULL;
+	struct sock *lsk = &lsmc->sk;
 	struct sock *new_sk;
 	int rc;
 
-	release_sock(&lsmc->sk);
-	new_sk = smc_sock_alloc(sock_net(sk), NULL);
+	release_sock(lsk);
+	new_sk = smc_sock_alloc(sock_net(lsk), NULL);
 	if (!new_sk) {
 		rc = -ENOMEM;
-		lsmc->sk.sk_err = ENOMEM;
+		lsk->sk_err = ENOMEM;
 		*new_smc = NULL;
-		lock_sock(&lsmc->sk);
+		lock_sock(lsk);
 		goto out;
 	}
 	*new_smc = smc_sk(new_sk);
 
 	rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
-	lock_sock(&lsmc->sk);
-	if  (rc < 0) {
-		lsmc->sk.sk_err = -rc;
-		new_sk->sk_state = SMC_CLOSED;
-		sock_set_flag(new_sk, SOCK_DEAD);
-		sk->sk_prot->unhash(new_sk);
-		sock_put(new_sk);
-		*new_smc = NULL;
-		goto out;
-	}
-	if (lsmc->sk.sk_state == SMC_CLOSED) {
+	lock_sock(lsk);
+	if  (rc < 0)
+		lsk->sk_err = -rc;
+	if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
 		if (new_clcsock)
 			sock_release(new_clcsock);
 		new_sk->sk_state = SMC_CLOSED;
 		sock_set_flag(new_sk, SOCK_DEAD);
-		sk->sk_prot->unhash(new_sk);
-		sock_put(new_sk);
+		new_sk->sk_prot->unhash(new_sk);
+		sock_put(new_sk); /* final */
 		*new_smc = NULL;
 		goto out;
 	}
@@ -631,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
 {
 	struct smc_sock *par = smc_sk(parent);
 
-	sock_hold(sk);
+	sock_hold(sk); /* sock_put in smc_accept_unlink () */
 	spin_lock(&par->accept_q_lock);
 	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
 	spin_unlock(&par->accept_q_lock);
@@ -647,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
 	list_del_init(&smc_sk(sk)->accept_q);
 	spin_unlock(&par->accept_q_lock);
 	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
-	sock_put(sk);
+	sock_put(sk); /* sock_hold in smc_accept_enqueue */
 }
 
 /* remove a sock from the accept queue to bind it to a new socket created
@@ -664,8 +670,12 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 
 		smc_accept_unlink(new_sk);
 		if (new_sk->sk_state == SMC_CLOSED) {
+			if (isk->clcsock) {
+				sock_release(isk->clcsock);
+				isk->clcsock = NULL;
+			}
 			new_sk->sk_prot->unhash(new_sk);
-			sock_put(new_sk);
+			sock_put(new_sk); /* final */
 			continue;
 		}
 		if (new_sock)
@@ -680,14 +690,11 @@ void smc_close_non_accepted(struct sock *sk)
 {
 	struct smc_sock *smc = smc_sk(sk);
 
-	sock_hold(sk);
 	lock_sock(sk);
 	if (!sk->sk_lingertime)
 		/* wait for peer closing */
 		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
-	if (smc->use_fallback) {
-		sk->sk_state = SMC_CLOSED;
-	} else {
+	if (!smc->use_fallback) {
 		smc_close_active(smc);
 		sock_set_flag(sk, SOCK_DEAD);
 		sk->sk_shutdown |= SHUTDOWN_MASK;
@@ -700,14 +707,15 @@ void smc_close_non_accepted(struct sock *sk)
 		sock_release(tcp);
 	}
 	if (smc->use_fallback) {
-		schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
-	} else if (sk->sk_state == SMC_CLOSED) {
-		smc_conn_free(&smc->conn);
-		schedule_delayed_work(&smc->sock_put_work,
-				      SMC_CLOSE_SOCK_PUT_DELAY);
+		sock_put(sk); /* passive closing */
+		sk->sk_state = SMC_CLOSED;
+	} else {
+		if (sk->sk_state == SMC_CLOSED)
+			smc_conn_free(&smc->conn);
 	}
 	release_sock(sk);
-	sock_put(sk);
+	sk->sk_prot->unhash(sk);
+	sock_put(sk); /* final sock_put */
 }
 
 static int smc_serv_conf_first_link(struct smc_sock *smc)
@@ -751,14 +759,16 @@ static void smc_listen_work(struct work_struct *work)
 {
 	struct smc_sock *new_smc = container_of(work, struct smc_sock,
 						smc_listen_work);
+	struct smc_clc_msg_proposal_prefix *pclc_prfx;
 	struct socket *newclcsock = new_smc->clcsock;
 	struct smc_sock *lsmc = new_smc->listen_smc;
 	struct smc_clc_msg_accept_confirm cclc;
 	int local_contact = SMC_REUSE_CONTACT;
 	struct sock *newsmcsk = &new_smc->sk;
-	struct smc_clc_msg_proposal pclc;
+	struct smc_clc_msg_proposal *pclc;
 	struct smc_ib_device *smcibdev;
 	struct sockaddr_in peeraddr;
+	u8 buf[SMC_CLC_MAX_LEN];
 	struct smc_link *link;
 	int reason_code = 0;
 	int rc = 0, len;
@@ -775,7 +785,7 @@ static void smc_listen_work(struct work_struct *work)
 	/* do inband token exchange -
 	 *wait for and receive SMC Proposal CLC message
 	 */
-	reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc),
+	reason_code = smc_clc_wait_msg(new_smc, &buf, sizeof(buf),
 				       SMC_CLC_PROPOSAL);
 	if (reason_code < 0)
 		goto out_err;
@@ -804,8 +814,11 @@ static void smc_listen_work(struct work_struct *work)
 		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
 		goto decline_rdma;
 	}
-	if ((pclc.outgoing_subnet != subnet) ||
-	    (pclc.prefix_len != prefix_len)) {
+
+	pclc = (struct smc_clc_msg_proposal *)&buf;
+	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+	if (pclc_prfx->outgoing_subnet != subnet ||
+	    pclc_prfx->prefix_len != prefix_len) {
 		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
 		goto decline_rdma;
 	}
@@ -816,7 +829,7 @@ static void smc_listen_work(struct work_struct *work)
 	/* allocate connection / link group */
 	mutex_lock(&smc_create_lgr_pending);
 	local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr,
-					smcibdev, ibport, &pclc.lcl, 0);
+					smcibdev, ibport, &pclc->lcl, 0);
 	if (local_contact < 0) {
 		rc = local_contact;
 		if (rc == -ENOMEM)
@@ -879,11 +892,9 @@ static void smc_listen_work(struct work_struct *work)
 		}
 		/* QP confirmation over RoCE fabric */
 		reason_code = smc_serv_conf_first_link(new_smc);
-		if (reason_code < 0) {
+		if (reason_code < 0)
 			/* peer is not aware of a problem */
-			rc = reason_code;
 			goto out_err_unlock;
-		}
 		if (reason_code > 0)
 			goto decline_rdma_unlock;
 	}
@@ -910,21 +921,26 @@ enqueue:
 	return;
 
 decline_rdma_unlock:
+	if (local_contact == SMC_FIRST_CONTACT)
+		smc_lgr_forget(new_smc->conn.lgr);
 	mutex_unlock(&smc_create_lgr_pending);
 decline_rdma:
 	/* RDMA setup failed, switch back to TCP */
 	smc_conn_free(&new_smc->conn);
 	new_smc->use_fallback = true;
 	if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
-		rc = smc_clc_send_decline(new_smc, reason_code);
-		if (rc < sizeof(struct smc_clc_msg_decline))
+		if (smc_clc_send_decline(new_smc, reason_code) < 0)
 			goto out_err;
 	}
 	goto out_connected;
 
 out_err_unlock:
+	if (local_contact == SMC_FIRST_CONTACT)
+		smc_lgr_forget(new_smc->conn.lgr);
 	mutex_unlock(&smc_create_lgr_pending);
 out_err:
+	if (newsmcsk->sk_state == SMC_INIT)
+		sock_put(&new_smc->sk); /* passive closing */
 	newsmcsk->sk_state = SMC_CLOSED;
 	smc_conn_free(&new_smc->conn);
 	goto enqueue; /* queue new sock with sk_err set */
@@ -934,11 +950,12 @@ static void smc_tcp_listen_work(struct work_struct *work)
 {
 	struct smc_sock *lsmc = container_of(work, struct smc_sock,
 					     tcp_listen_work);
+	struct sock *lsk = &lsmc->sk;
 	struct smc_sock *new_smc;
 	int rc = 0;
 
-	lock_sock(&lsmc->sk);
-	while (lsmc->sk.sk_state == SMC_LISTEN) {
+	lock_sock(lsk);
+	while (lsk->sk_state == SMC_LISTEN) {
 		rc = smc_clcsock_accept(lsmc, &new_smc);
 		if (rc)
 			goto out;
@@ -947,15 +964,25 @@ static void smc_tcp_listen_work(struct work_struct *work)
 
 		new_smc->listen_smc = lsmc;
 		new_smc->use_fallback = false; /* assume rdma capability first*/
-		sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */
+		sock_hold(lsk); /* sock_put in smc_listen_work */
 		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
 		smc_copy_sock_settings_to_smc(new_smc);
-		schedule_work(&new_smc->smc_listen_work);
+		sock_hold(&new_smc->sk); /* sock_put in passive closing */
+		if (!schedule_work(&new_smc->smc_listen_work))
+			sock_put(&new_smc->sk);
 	}
 
 out:
-	release_sock(&lsmc->sk);
-	lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */
+	if (lsmc->clcsock) {
+		sock_release(lsmc->clcsock);
+		lsmc->clcsock = NULL;
+	}
+	release_sock(lsk);
+	/* no more listening, wake up smc_close_wait_listen_clcsock and
+	 * accept
+	 */
+	lsk->sk_state_change(lsk);
+	sock_put(&lsmc->sk); /* sock_hold in smc_listen */
 }
 
 static int smc_listen(struct socket *sock, int backlog)
@@ -989,7 +1016,9 @@ static int smc_listen(struct socket *sock, int backlog)
 	sk->sk_ack_backlog = 0;
 	sk->sk_state = SMC_LISTEN;
 	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
-	schedule_work(&smc->tcp_listen_work);
+	sock_hold(sk); /* sock_hold in tcp_listen_worker */
+	if (!schedule_work(&smc->tcp_listen_work))
+		sock_put(sk);
 
 out:
 	release_sock(sk);
@@ -1006,6 +1035,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 	int rc = 0;
 
 	lsmc = smc_sk(sk);
+	sock_hold(sk); /* sock_put below */
 	lock_sock(sk);
 
 	if (lsmc->sk.sk_state != SMC_LISTEN) {
@@ -1040,6 +1070,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 
 out:
 	release_sock(sk);
+	sock_put(sk); /* sock_hold above */
 	return rc;
 }
 
@@ -1109,21 +1140,15 @@ out:
 
 static __poll_t smc_accept_poll(struct sock *parent)
 {
-	struct smc_sock *isk;
-	struct sock *sk;
+	struct smc_sock *isk = smc_sk(parent);
+	int mask = 0;
 
-	lock_sock(parent);
-	list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
-		sk = (struct sock *)isk;
+	spin_lock(&isk->accept_q_lock);
+	if (!list_empty(&isk->accept_q))
+		mask = POLLIN | POLLRDNORM;
+	spin_unlock(&isk->accept_q_lock);
 
-		if (sk->sk_state == SMC_ACTIVE) {
-			release_sock(parent);
-			return POLLIN | POLLRDNORM;
-		}
-	}
-	release_sock(parent);
-
-	return 0;
+	return mask;
 }
 
 static __poll_t smc_poll(struct file *file, struct socket *sock,
@@ -1134,9 +1159,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
 	struct smc_sock *smc;
 	int rc;
 
+	if (!sk)
+		return POLLNVAL;
+
 	smc = smc_sk(sock->sk);
+	sock_hold(sk);
+	lock_sock(sk);
 	if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
 		/* delegate to CLC child sock */
+		release_sock(sk);
 		mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
 		/* if non-blocking connect finished ... */
 		lock_sock(sk);
@@ -1148,37 +1179,43 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
 				rc = smc_connect_rdma(smc);
 				if (rc < 0)
 					mask |= POLLERR;
-				else
-					/* success cases including fallback */
-					mask |= POLLOUT | POLLWRNORM;
+				/* success cases including fallback */
+				mask |= POLLOUT | POLLWRNORM;
 			}
 		}
-		release_sock(sk);
 	} else {
-		sock_poll_wait(file, sk_sleep(sk), wait);
-		if (sk->sk_state == SMC_LISTEN)
-			/* woken up by sk_data_ready in smc_listen_work() */
-			mask |= smc_accept_poll(sk);
+		if (sk->sk_state != SMC_CLOSED) {
+			release_sock(sk);
+			sock_poll_wait(file, sk_sleep(sk), wait);
+			lock_sock(sk);
+		}
 		if (sk->sk_err)
 			mask |= POLLERR;
-		if (atomic_read(&smc->conn.sndbuf_space) ||
-		    (sk->sk_shutdown & SEND_SHUTDOWN)) {
-			mask |= POLLOUT | POLLWRNORM;
-		} else {
-			sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
-			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-		}
-		if (atomic_read(&smc->conn.bytes_to_rcv))
-			mask |= POLLIN | POLLRDNORM;
 		if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
 		    (sk->sk_state == SMC_CLOSED))
 			mask |= POLLHUP;
-		if (sk->sk_shutdown & RCV_SHUTDOWN)
-			mask |= POLLIN | POLLRDNORM | POLLRDHUP;
-		if (sk->sk_state == SMC_APPCLOSEWAIT1)
-			mask |= POLLIN;
+		if (sk->sk_state == SMC_LISTEN) {
+			/* woken up by sk_data_ready in smc_listen_work() */
+			mask = smc_accept_poll(sk);
+		} else {
+			if (atomic_read(&smc->conn.sndbuf_space) ||
+			    sk->sk_shutdown & SEND_SHUTDOWN) {
+				mask |= POLLOUT | POLLWRNORM;
+			} else {
+				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+			}
+			if (atomic_read(&smc->conn.bytes_to_rcv))
+				mask |= POLLIN | POLLRDNORM;
+			if (sk->sk_shutdown & RCV_SHUTDOWN)
+				mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+			if (sk->sk_state == SMC_APPCLOSEWAIT1)
+				mask |= POLLIN;
+		}
 
 	}
+	release_sock(sk);
+	sock_put(sk);
 
 	return mask;
 }
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 0bee9d16cf29..9518986c97b1 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -178,7 +178,6 @@ struct smc_sock {				/* smc sock container */
 	struct work_struct	smc_listen_work;/* prepare new accept socket */
 	struct list_head	accept_q;	/* sockets to be accepted */
 	spinlock_t		accept_q_lock;	/* protects accept_q */
-	struct delayed_work	sock_put_work;	/* final socket freeing */
 	bool			use_fallback;	/* fallback to tcp */
 	u8			wait_close_tx_prepared : 1;
 						/* shutdown wr or close
@@ -253,12 +252,12 @@ static inline int smc_uncompress_bufsize(u8 compressed)
 static inline bool using_ipsec(struct smc_sock *smc)
 {
 	return (smc->clcsock->sk->sk_policy[0] ||
-		smc->clcsock->sk->sk_policy[1]) ? 1 : 0;
+		smc->clcsock->sk->sk_policy[1]) ? true : false;
 }
 #else
 static inline bool using_ipsec(struct smc_sock *smc)
 {
-	return 0;
+	return false;
 }
 #endif
 
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 87f7bede6eab..3cd086e5bd28 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -57,9 +57,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
 			       cdcpend->conn);
 	}
 	smc_tx_sndbuf_nonfull(smc);
-	if (smc->sk.sk_state != SMC_ACTIVE)
-		/* wake up smc_close_wait_tx_pends() */
-		smc->sk.sk_state_change(&smc->sk);
 	bh_unlock_sock(&smc->sk);
 }
 
@@ -68,9 +65,14 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
 			  struct smc_cdc_tx_pend **pend)
 {
 	struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+	int rc;
 
-	return smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
-				       (struct smc_wr_tx_pend_priv **)pend);
+	rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
+				     (struct smc_wr_tx_pend_priv **)pend);
+	if (!conn->alert_token_local)
+		/* abnormal termination */
+		rc = -EPIPE;
+	return rc;
 }
 
 static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
@@ -155,14 +157,6 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
 				(unsigned long)conn);
 }
 
-bool smc_cdc_tx_has_pending(struct smc_connection *conn)
-{
-	struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
-
-	return smc_wr_tx_has_pending(link, SMC_CDC_MSG_TYPE,
-				     smc_cdc_tx_filter, (unsigned long)conn);
-}
-
 /********************************* receive ***********************************/
 
 static inline bool smc_cdc_before(u16 seq1, u16 seq2)
@@ -213,6 +207,17 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
 		/* guarantee 0 <= bytes_to_rcv <= rmbe_size */
 		smp_mb__after_atomic();
 		smc->sk.sk_data_ready(&smc->sk);
+	} else if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
+		   (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req)) {
+		smc->sk.sk_data_ready(&smc->sk);
+	}
+
+	/* piggy backed tx info */
+	/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
+	if (diff_cons && smc_tx_prepared_sends(conn)) {
+		smc_tx_sndbuf_nonempty(conn);
+		/* trigger socket release if connection closed */
+		smc_close_wake_tx_prepared(smc);
 	}
 
 	if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
@@ -224,25 +229,10 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
 		if (smc->clcsock && smc->clcsock->sk)
 			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
 		sock_set_flag(&smc->sk, SOCK_DONE);
-		schedule_work(&conn->close_work);
+		sock_hold(&smc->sk); /* sock_put in close_work */
+		if (!schedule_work(&conn->close_work))
+			sock_put(&smc->sk);
 	}
-
-	/* piggy backed tx info */
-	/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
-	if (diff_cons && smc_tx_prepared_sends(conn)) {
-		smc_tx_sndbuf_nonempty(conn);
-		/* trigger socket release if connection closed */
-		smc_close_wake_tx_prepared(smc);
-	}
-
-	/* socket connected but not accepted */
-	if (!smc->sk.sk_socket)
-		return;
-
-	/* data available */
-	if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
-	    (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req))
-		smc_tx_consumer_update(conn);
 }
 
 /* called under tasklet context */
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 149ceda1b088..ab240b37ad11 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -214,7 +214,6 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
 		     struct smc_cdc_tx_pend *pend);
 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
-bool smc_cdc_tx_has_pending(struct smc_connection *conn);
 int smc_cdc_init(void) __init;
 
 #endif /* SMC_CDC_H */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 511548085d16..8ac51583a063 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -22,6 +22,54 @@
 #include "smc_clc.h"
 #include "smc_ib.h"
 
+/* check if received message has a correct header length and contains valid
+ * heading and trailing eyecatchers
+ */
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+{
+	struct smc_clc_msg_proposal_prefix *pclc_prfx;
+	struct smc_clc_msg_accept_confirm *clc;
+	struct smc_clc_msg_proposal *pclc;
+	struct smc_clc_msg_decline *dclc;
+	struct smc_clc_msg_trail *trl;
+
+	if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+		return false;
+	switch (clcm->type) {
+	case SMC_CLC_PROPOSAL:
+		pclc = (struct smc_clc_msg_proposal *)clcm;
+		pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+		if (ntohs(pclc->hdr.length) !=
+			sizeof(*pclc) + ntohs(pclc->iparea_offset) +
+			sizeof(*pclc_prfx) +
+			pclc_prfx->ipv6_prefixes_cnt *
+				sizeof(struct smc_clc_ipv6_prefix) +
+			sizeof(*trl))
+			return false;
+		trl = (struct smc_clc_msg_trail *)
+			((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
+		break;
+	case SMC_CLC_ACCEPT:
+	case SMC_CLC_CONFIRM:
+		clc = (struct smc_clc_msg_accept_confirm *)clcm;
+		if (ntohs(clc->hdr.length) != sizeof(*clc))
+			return false;
+		trl = &clc->trl;
+		break;
+	case SMC_CLC_DECLINE:
+		dclc = (struct smc_clc_msg_decline *)clcm;
+		if (ntohs(dclc->hdr.length) != sizeof(*dclc))
+			return false;
+		trl = &dclc->trl;
+		break;
+	default:
+		return false;
+	}
+	if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+		return false;
+	return true;
+}
+
 /* Wait for data on the tcp-socket, analyze received data
  * Returns:
  * 0 if success and it was not a decline that we received.
@@ -75,9 +123,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	}
 	datlen = ntohs(clcm->length);
 	if ((len < sizeof(struct smc_clc_msg_hdr)) ||
-	    (datlen < sizeof(struct smc_clc_msg_decline)) ||
-	    (datlen > sizeof(struct smc_clc_msg_accept_confirm)) ||
-	    memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) ||
+	    (datlen > buflen) ||
 	    ((clcm->type != SMC_CLC_DECLINE) &&
 	     (clcm->type != expected_type))) {
 		smc->sk.sk_err = EPROTO;
@@ -91,7 +137,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	krflags = MSG_WAITALL;
 	smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
 	len = sock_recvmsg(smc->clcsock, &msg, krflags);
-	if (len < datlen) {
+	if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
 		smc->sk.sk_err = EPROTO;
 		reason_code = -EPROTO;
 		goto out;
@@ -135,7 +181,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
 		smc->sk.sk_err = EPROTO;
 	if (len < 0)
 		smc->sk.sk_err = -len;
-	return len;
+	return sock_error(&smc->sk);
 }
 
 /* send CLC PROPOSAL message across internal TCP socket */
@@ -143,33 +189,43 @@ int smc_clc_send_proposal(struct smc_sock *smc,
 			  struct smc_ib_device *smcibdev,
 			  u8 ibport)
 {
+	struct smc_clc_msg_proposal_prefix pclc_prfx;
 	struct smc_clc_msg_proposal pclc;
+	struct smc_clc_msg_trail trl;
 	int reason_code = 0;
+	struct kvec vec[3];
 	struct msghdr msg;
-	struct kvec vec;
-	int len, rc;
+	int len, plen, rc;
 
 	/* send SMC Proposal CLC message */
+	plen = sizeof(pclc) + sizeof(pclc_prfx) + sizeof(trl);
 	memset(&pclc, 0, sizeof(pclc));
 	memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
 	pclc.hdr.type = SMC_CLC_PROPOSAL;
-	pclc.hdr.length = htons(sizeof(pclc));
+	pclc.hdr.length = htons(plen);
 	pclc.hdr.version = SMC_CLC_V1;		/* SMC version */
 	memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
 	memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
 	memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
+	pclc.iparea_offset = htons(0);
 
+	memset(&pclc_prfx, 0, sizeof(pclc_prfx));
 	/* determine subnet and mask from internal TCP socket */
-	rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet,
-				  &pclc.prefix_len);
+	rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc_prfx.outgoing_subnet,
+				  &pclc_prfx.prefix_len);
 	if (rc)
 		return SMC_CLC_DECL_CNFERR; /* configuration error */
-	memcpy(pclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+	pclc_prfx.ipv6_prefixes_cnt = 0;
+	memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
 	memset(&msg, 0, sizeof(msg));
-	vec.iov_base = &pclc;
-	vec.iov_len = sizeof(pclc);
+	vec[0].iov_base = &pclc;
+	vec[0].iov_len = sizeof(pclc);
+	vec[1].iov_base = &pclc_prfx;
+	vec[1].iov_len = sizeof(pclc_prfx);
+	vec[2].iov_base = &trl;
+	vec[2].iov_len = sizeof(trl);
 	/* due to the few bytes needed for clc-handshake this cannot block */
-	len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(pclc));
+	len = kernel_sendmsg(smc->clcsock, &msg, vec, 3, plen);
 	if (len < sizeof(pclc)) {
 		if (len >= 0) {
 			reason_code = -ENETUNREACH;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 12a9af1539a2..c145a0f36a68 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -44,7 +44,7 @@ struct smc_clc_msg_hdr {	/* header1 of clc messages */
 #if defined(__BIG_ENDIAN_BITFIELD)
 	u8 version : 4,
 	   flag    : 1,
-	   rsvd	   : 3;
+	   rsvd    : 3;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
 	u8 rsvd    : 3,
 	   flag    : 1,
@@ -62,17 +62,31 @@ struct smc_clc_msg_local {	/* header2 of clc messages */
 	u8 mac[6];		/* mac of ib_device port */
 };
 
-struct smc_clc_msg_proposal {	/* clc proposal message */
-	struct smc_clc_msg_hdr hdr;
-	struct smc_clc_msg_local lcl;
-	__be16 iparea_offset;	/* offset to IP address information area */
+struct smc_clc_ipv6_prefix {
+	u8 prefix[4];
+	u8 prefix_len;
+} __packed;
+
+struct smc_clc_msg_proposal_prefix {	/* prefix part of clc proposal message*/
 	__be32 outgoing_subnet;	/* subnet mask */
 	u8 prefix_len;		/* number of significant bits in mask */
 	u8 reserved[2];
 	u8 ipv6_prefixes_cnt;	/* number of IPv6 prefixes in prefix array */
-	struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
 } __aligned(4);
 
+struct smc_clc_msg_proposal {	/* clc proposal message sent by Linux */
+	struct smc_clc_msg_hdr hdr;
+	struct smc_clc_msg_local lcl;
+	__be16 iparea_offset;	/* offset to IP address information area */
+} __aligned(4);
+
+#define SMC_CLC_PROPOSAL_MAX_OFFSET	0x28
+#define SMC_CLC_PROPOSAL_MAX_PREFIX	(8 * sizeof(struct smc_clc_ipv6_prefix))
+#define SMC_CLC_MAX_LEN		(sizeof(struct smc_clc_msg_proposal) + \
+				 SMC_CLC_PROPOSAL_MAX_OFFSET + \
+				 SMC_CLC_PROPOSAL_MAX_PREFIX + \
+				 sizeof(struct smc_clc_msg_trail))
+
 struct smc_clc_msg_accept_confirm {	/* clc accept / confirm message */
 	struct smc_clc_msg_hdr hdr;
 	struct smc_clc_msg_local lcl;
@@ -102,6 +116,14 @@ struct smc_clc_msg_decline {	/* clc decline message */
 	struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
 } __aligned(4);
 
+/* determine start of the prefix area within the proposal message */
+static inline struct smc_clc_msg_proposal_prefix *
+smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
+{
+	return (struct smc_clc_msg_proposal_prefix *)
+	       ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
+}
+
 struct smc_sock;
 struct smc_ib_device;
 
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 48615d2ac4aa..e339c0186dcf 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -19,7 +19,7 @@
 #include "smc_cdc.h"
 #include "smc_close.h"
 
-#define SMC_CLOSE_WAIT_TX_PENDS_TIME		(5 * HZ)
+#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME	(5 * HZ)
 
 static void smc_close_cleanup_listen(struct sock *parent)
 {
@@ -30,23 +30,24 @@ static void smc_close_cleanup_listen(struct sock *parent)
 		smc_close_non_accepted(sk);
 }
 
-static void smc_close_wait_tx_pends(struct smc_sock *smc)
+static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
 {
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 	struct sock *sk = &smc->sk;
 	signed long timeout;
 
-	timeout = SMC_CLOSE_WAIT_TX_PENDS_TIME;
+	timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
 	add_wait_queue(sk_sleep(sk), &wait);
-	while (!signal_pending(current) && timeout) {
-		int rc;
-
-		rc = sk_wait_event(sk, &timeout,
-				   !smc_cdc_tx_has_pending(&smc->conn),
-				   &wait);
-		if (rc)
+	do {
+		release_sock(sk);
+		if (smc->clcsock)
+			timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
+					     timeout);
+		sched_annotate_sleep();
+		lock_sock(sk);
+		if (!smc->clcsock)
 			break;
-	}
+	} while (timeout);
 	remove_wait_queue(sk_sleep(sk), &wait);
 }
 
@@ -111,58 +112,63 @@ static int smc_close_abort(struct smc_connection *conn)
 }
 
 /* terminate smc socket abnormally - active abort
- * RDMA communication no longer possible
+ * link group is terminated, i.e. RDMA communication no longer possible
  */
-void smc_close_active_abort(struct smc_sock *smc)
+static void smc_close_active_abort(struct smc_sock *smc)
 {
+	struct sock *sk = &smc->sk;
+
 	struct smc_cdc_conn_state_flags *txflags =
 		&smc->conn.local_tx_ctrl.conn_state_flags;
 
-	smc->sk.sk_err = ECONNABORTED;
+	sk->sk_err = ECONNABORTED;
 	if (smc->clcsock && smc->clcsock->sk) {
 		smc->clcsock->sk->sk_err = ECONNABORTED;
 		smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
 	}
-	switch (smc->sk.sk_state) {
+	switch (sk->sk_state) {
 	case SMC_INIT:
 	case SMC_ACTIVE:
-		smc->sk.sk_state = SMC_PEERABORTWAIT;
+		sk->sk_state = SMC_PEERABORTWAIT;
+		release_sock(sk);
+		cancel_delayed_work_sync(&smc->conn.tx_work);
+		lock_sock(sk);
+		sock_put(sk); /* passive closing */
 		break;
 	case SMC_APPCLOSEWAIT1:
 	case SMC_APPCLOSEWAIT2:
-		txflags->peer_conn_abort = 1;
-		sock_release(smc->clcsock);
 		if (!smc_cdc_rxed_any_close(&smc->conn))
-			smc->sk.sk_state = SMC_PEERABORTWAIT;
+			sk->sk_state = SMC_PEERABORTWAIT;
 		else
-			smc->sk.sk_state = SMC_CLOSED;
+			sk->sk_state = SMC_CLOSED;
+		release_sock(sk);
+		cancel_delayed_work_sync(&smc->conn.tx_work);
+		lock_sock(sk);
 		break;
 	case SMC_PEERCLOSEWAIT1:
 	case SMC_PEERCLOSEWAIT2:
 		if (!txflags->peer_conn_closed) {
-			smc->sk.sk_state = SMC_PEERABORTWAIT;
-			txflags->peer_conn_abort = 1;
-			sock_release(smc->clcsock);
+			/* just SHUTDOWN_SEND done */
+			sk->sk_state = SMC_PEERABORTWAIT;
 		} else {
-			smc->sk.sk_state = SMC_CLOSED;
+			sk->sk_state = SMC_CLOSED;
 		}
+		sock_put(sk); /* passive closing */
 		break;
 	case SMC_PROCESSABORT:
 	case SMC_APPFINCLOSEWAIT:
-		if (!txflags->peer_conn_closed) {
-			txflags->peer_conn_abort = 1;
-			sock_release(smc->clcsock);
-		}
-		smc->sk.sk_state = SMC_CLOSED;
+		sk->sk_state = SMC_CLOSED;
 		break;
 	case SMC_PEERFINCLOSEWAIT:
+		sock_put(sk); /* passive closing */
+		break;
 	case SMC_PEERABORTWAIT:
 	case SMC_CLOSED:
 		break;
 	}
 
-	sock_set_flag(&smc->sk, SOCK_DEAD);
-	smc->sk.sk_state_change(&smc->sk);
+	sock_set_flag(sk, SOCK_DEAD);
+	sk->sk_state_change(sk);
 }
 
 static inline bool smc_close_sent_any_close(struct smc_connection *conn)
@@ -185,13 +191,11 @@ int smc_close_active(struct smc_sock *smc)
 		  0 : sock_flag(sk, SOCK_LINGER) ?
 		      sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
 
-again:
 	old_state = sk->sk_state;
-	switch (old_state) {
+again:
+	switch (sk->sk_state) {
 	case SMC_INIT:
 		sk->sk_state = SMC_CLOSED;
-		if (smc->smc_listen_work.func)
-			cancel_work_sync(&smc->smc_listen_work);
 		break;
 	case SMC_LISTEN:
 		sk->sk_state = SMC_CLOSED;
@@ -200,11 +204,9 @@ again:
 			rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
 			/* wake up kernel_accept of smc_tcp_listen_worker */
 			smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
+			smc_close_wait_listen_clcsock(smc);
 		}
-		release_sock(sk);
 		smc_close_cleanup_listen(sk);
-		cancel_work_sync(&smc->smc_listen_work);
-		lock_sock(sk);
 		break;
 	case SMC_ACTIVE:
 		smc_close_stream_wait(smc, timeout);
@@ -214,6 +216,8 @@ again:
 		if (sk->sk_state == SMC_ACTIVE) {
 			/* send close request */
 			rc = smc_close_final(conn);
+			if (rc)
+				break;
 			sk->sk_state = SMC_PEERCLOSEWAIT1;
 		} else {
 			/* peer event has changed the state */
@@ -226,9 +230,10 @@ again:
 		    !smc_close_sent_any_close(conn)) {
 			/* just shutdown wr done, send close request */
 			rc = smc_close_final(conn);
+			if (rc)
+				break;
 		}
 		sk->sk_state = SMC_CLOSED;
-		smc_close_wait_tx_pends(smc);
 		break;
 	case SMC_APPCLOSEWAIT1:
 	case SMC_APPCLOSEWAIT2:
@@ -237,19 +242,21 @@ again:
 		release_sock(sk);
 		cancel_delayed_work_sync(&conn->tx_work);
 		lock_sock(sk);
-		if (sk->sk_err != ECONNABORTED) {
-			/* confirm close from peer */
-			rc = smc_close_final(conn);
-			if (rc)
-				break;
-		}
-		if (smc_cdc_rxed_any_close(conn))
+		if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
+		    sk->sk_state != SMC_APPCLOSEWAIT2)
+			goto again;
+		/* confirm close from peer */
+		rc = smc_close_final(conn);
+		if (rc)
+			break;
+		if (smc_cdc_rxed_any_close(conn)) {
 			/* peer has closed the socket already */
 			sk->sk_state = SMC_CLOSED;
-		else
+			sock_put(sk); /* postponed passive closing */
+		} else {
 			/* peer has just issued a shutdown write */
 			sk->sk_state = SMC_PEERFINCLOSEWAIT;
-		smc_close_wait_tx_pends(smc);
+		}
 		break;
 	case SMC_PEERCLOSEWAIT1:
 	case SMC_PEERCLOSEWAIT2:
@@ -257,6 +264,8 @@ again:
 		    !smc_close_sent_any_close(conn)) {
 			/* just shutdown wr done, send close request */
 			rc = smc_close_final(conn);
+			if (rc)
+				break;
 		}
 		/* peer sending PeerConnectionClosed will cause transition */
 		break;
@@ -264,12 +273,8 @@ again:
 		/* peer sending PeerConnectionClosed will cause transition */
 		break;
 	case SMC_PROCESSABORT:
-		release_sock(sk);
-		cancel_delayed_work_sync(&conn->tx_work);
-		lock_sock(sk);
 		smc_close_abort(conn);
 		sk->sk_state = SMC_CLOSED;
-		smc_close_wait_tx_pends(smc);
 		break;
 	case SMC_PEERABORTWAIT:
 	case SMC_CLOSED:
@@ -278,7 +283,7 @@ again:
 	}
 
 	if (old_state != sk->sk_state)
-		sk->sk_state_change(&smc->sk);
+		sk->sk_state_change(sk);
 	return rc;
 }
 
@@ -289,37 +294,42 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
 	struct sock *sk = &smc->sk;
 
 	switch (sk->sk_state) {
+	case SMC_INIT:
 	case SMC_ACTIVE:
-	case SMC_APPFINCLOSEWAIT:
 	case SMC_APPCLOSEWAIT1:
-	case SMC_APPCLOSEWAIT2:
-		smc_close_abort(&smc->conn);
+		sk->sk_state = SMC_PROCESSABORT;
+		sock_put(sk); /* passive closing */
+		break;
+	case SMC_APPFINCLOSEWAIT:
 		sk->sk_state = SMC_PROCESSABORT;
 		break;
 	case SMC_PEERCLOSEWAIT1:
 	case SMC_PEERCLOSEWAIT2:
 		if (txflags->peer_done_writing &&
-		    !smc_close_sent_any_close(&smc->conn)) {
+		    !smc_close_sent_any_close(&smc->conn))
 			/* just shutdown, but not yet closed locally */
-			smc_close_abort(&smc->conn);
 			sk->sk_state = SMC_PROCESSABORT;
-		} else {
+		else
 			sk->sk_state = SMC_CLOSED;
-		}
+		sock_put(sk); /* passive closing */
 		break;
+	case SMC_APPCLOSEWAIT2:
 	case SMC_PEERFINCLOSEWAIT:
+		sk->sk_state = SMC_CLOSED;
+		sock_put(sk); /* passive closing */
+		break;
 	case SMC_PEERABORTWAIT:
 		sk->sk_state = SMC_CLOSED;
 		break;
-	case SMC_INIT:
 	case SMC_PROCESSABORT:
 	/* nothing to do, add tracing in future patch */
 		break;
 	}
 }
 
-/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
- * or peer_done_writing.
+/* Either some kind of closing has been received: peer_conn_closed,
+ * peer_conn_abort, or peer_done_writing
+ * or the link group of the connection terminates abnormally.
  */
 static void smc_close_passive_work(struct work_struct *work)
 {
@@ -331,7 +341,7 @@ static void smc_close_passive_work(struct work_struct *work)
 	struct sock *sk = &smc->sk;
 	int old_state;
 
-	lock_sock(&smc->sk);
+	lock_sock(sk);
 	old_state = sk->sk_state;
 
 	if (!conn->alert_token_local) {
@@ -340,23 +350,32 @@ static void smc_close_passive_work(struct work_struct *work)
 		goto wakeup;
 	}
 
-	rxflags = &smc->conn.local_rx_ctrl.conn_state_flags;
+	rxflags = &conn->local_rx_ctrl.conn_state_flags;
 	if (rxflags->peer_conn_abort) {
+		/* peer has not received all data */
 		smc_close_passive_abort_received(smc);
+		release_sock(&smc->sk);
+		cancel_delayed_work_sync(&conn->tx_work);
+		lock_sock(&smc->sk);
 		goto wakeup;
 	}
 
 	switch (sk->sk_state) {
 	case SMC_INIT:
-		if (atomic_read(&smc->conn.bytes_to_rcv) ||
+		if (atomic_read(&conn->bytes_to_rcv) ||
 		    (rxflags->peer_done_writing &&
-		     !smc_cdc_rxed_any_close(conn)))
+		     !smc_cdc_rxed_any_close(conn))) {
 			sk->sk_state = SMC_APPCLOSEWAIT1;
-		else
+		} else {
 			sk->sk_state = SMC_CLOSED;
+			sock_put(sk); /* passive closing */
+		}
 		break;
 	case SMC_ACTIVE:
 		sk->sk_state = SMC_APPCLOSEWAIT1;
+		/* postpone sock_put() for passive closing to cover
+		 * received SEND_SHUTDOWN as well
+		 */
 		break;
 	case SMC_PEERCLOSEWAIT1:
 		if (rxflags->peer_done_writing)
@@ -364,8 +383,7 @@ static void smc_close_passive_work(struct work_struct *work)
 		/* fall through */
 		/* to check for closing */
 	case SMC_PEERCLOSEWAIT2:
-	case SMC_PEERFINCLOSEWAIT:
-		if (!smc_cdc_rxed_any_close(&smc->conn))
+		if (!smc_cdc_rxed_any_close(conn))
 			break;
 		if (sock_flag(sk, SOCK_DEAD) &&
 		    smc_close_sent_any_close(conn)) {
@@ -375,9 +393,20 @@ static void smc_close_passive_work(struct work_struct *work)
 			/* just shutdown, but not yet closed locally */
 			sk->sk_state = SMC_APPFINCLOSEWAIT;
 		}
+		sock_put(sk); /* passive closing */
+		break;
+	case SMC_PEERFINCLOSEWAIT:
+		if (smc_cdc_rxed_any_close(conn)) {
+			sk->sk_state = SMC_CLOSED;
+			sock_put(sk); /* passive closing */
+		}
 		break;
 	case SMC_APPCLOSEWAIT1:
 	case SMC_APPCLOSEWAIT2:
+		/* postpone sock_put() for passive closing to cover
+		 * received SEND_SHUTDOWN as well
+		 */
+		break;
 	case SMC_APPFINCLOSEWAIT:
 	case SMC_PEERABORTWAIT:
 	case SMC_PROCESSABORT:
@@ -393,23 +422,11 @@ wakeup:
 	if (old_state != sk->sk_state) {
 		sk->sk_state_change(sk);
 		if ((sk->sk_state == SMC_CLOSED) &&
-		    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
-			smc_conn_free(&smc->conn);
-			schedule_delayed_work(&smc->sock_put_work,
-					      SMC_CLOSE_SOCK_PUT_DELAY);
-		}
+		    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
+			smc_conn_free(conn);
 	}
-	release_sock(&smc->sk);
-}
-
-void smc_close_sock_put_work(struct work_struct *work)
-{
-	struct smc_sock *smc = container_of(to_delayed_work(work),
-					    struct smc_sock,
-					    sock_put_work);
-
-	smc->sk.sk_prot->unhash(&smc->sk);
-	sock_put(&smc->sk);
+	release_sock(sk);
+	sock_put(sk); /* sock_hold done by schedulers of close_work */
 }
 
 int smc_close_shutdown_write(struct smc_sock *smc)
@@ -424,20 +441,21 @@ int smc_close_shutdown_write(struct smc_sock *smc)
 		  0 : sock_flag(sk, SOCK_LINGER) ?
 		      sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
 
-again:
 	old_state = sk->sk_state;
-	switch (old_state) {
+again:
+	switch (sk->sk_state) {
 	case SMC_ACTIVE:
 		smc_close_stream_wait(smc, timeout);
 		release_sock(sk);
 		cancel_delayed_work_sync(&conn->tx_work);
 		lock_sock(sk);
+		if (sk->sk_state != SMC_ACTIVE)
+			goto again;
 		/* send close wr request */
 		rc = smc_close_wr(conn);
-		if (sk->sk_state == SMC_ACTIVE)
-			sk->sk_state = SMC_PEERCLOSEWAIT1;
-		else
-			goto again;
+		if (rc)
+			break;
+		sk->sk_state = SMC_PEERCLOSEWAIT1;
 		break;
 	case SMC_APPCLOSEWAIT1:
 		/* passive close */
@@ -446,8 +464,12 @@ again:
 		release_sock(sk);
 		cancel_delayed_work_sync(&conn->tx_work);
 		lock_sock(sk);
+		if (sk->sk_state != SMC_APPCLOSEWAIT1)
+			goto again;
 		/* confirm close from peer */
 		rc = smc_close_wr(conn);
+		if (rc)
+			break;
 		sk->sk_state = SMC_APPCLOSEWAIT2;
 		break;
 	case SMC_APPCLOSEWAIT2:
@@ -462,7 +484,7 @@ again:
 	}
 
 	if (old_state != sk->sk_state)
-		sk->sk_state_change(&smc->sk);
+		sk->sk_state_change(sk);
 	return rc;
 }
 
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
index ed82506b1b0a..19eb6a211c23 100644
--- a/net/smc/smc_close.h
+++ b/net/smc/smc_close.h
@@ -20,9 +20,7 @@
 #define SMC_CLOSE_SOCK_PUT_DELAY		HZ
 
 void smc_close_wake_tx_prepared(struct smc_sock *smc);
-void smc_close_active_abort(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
-void smc_close_sock_put_work(struct work_struct *work);
 int smc_close_shutdown_write(struct smc_sock *smc);
 void smc_close_init(struct smc_sock *smc);
 
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 94f21116dac5..2424c7100aaf 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -128,6 +128,8 @@ static void smc_lgr_free_work(struct work_struct *work)
 	bool conns;
 
 	spin_lock_bh(&smc_lgr_list.lock);
+	if (list_empty(&lgr->list))
+		goto free;
 	read_lock_bh(&lgr->conns_lock);
 	conns = RB_EMPTY_ROOT(&lgr->conns_all);
 	read_unlock_bh(&lgr->conns_lock);
@@ -136,6 +138,7 @@ static void smc_lgr_free_work(struct work_struct *work)
 		return;
 	}
 	list_del_init(&lgr->list); /* remove from smc_lgr_list */
+free:
 	spin_unlock_bh(&smc_lgr_list.lock);
 	smc_lgr_free(lgr);
 }
@@ -231,9 +234,7 @@ static void smc_buf_unuse(struct smc_connection *conn)
 /* remove a finished connection from its link group */
 void smc_conn_free(struct smc_connection *conn)
 {
-	struct smc_link_group *lgr = conn->lgr;
-
-	if (!lgr)
+	if (!conn->lgr)
 		return;
 	smc_cdc_tx_dismiss_slots(conn);
 	smc_lgr_unregister_conn(conn);
@@ -327,13 +328,17 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
 	while (node) {
 		conn = rb_entry(node, struct smc_connection, alert_node);
 		smc = container_of(conn, struct smc_sock, conn);
-		sock_hold(&smc->sk);
+		sock_hold(&smc->sk); /* sock_put in close work */
+		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
 		__smc_lgr_unregister_conn(conn);
-		schedule_work(&conn->close_work);
-		sock_put(&smc->sk);
+		write_unlock_bh(&lgr->conns_lock);
+		if (!schedule_work(&conn->close_work))
+			sock_put(&smc->sk);
+		write_lock_bh(&lgr->conns_lock);
 		node = rb_first(&lgr->conns_all);
 	}
 	write_unlock_bh(&lgr->conns_lock);
+	wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
 }
 
 /* Determine vlan of internal TCP socket.
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index d2d01cf70224..427b91c1c964 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -86,7 +86,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
 	if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
 		goto errout;
 
-	if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.lgr) {
+	if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
+	    smc->conn.alert_token_local) {
 		struct smc_connection *conn = &smc->conn;
 		struct smc_diag_conninfo cinfo = {
 			.token = conn->alert_token_local,
@@ -124,7 +125,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
 			goto errout;
 	}
 
-	if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr) {
+	if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr &&
+	    !list_empty(&smc->conn.lgr->list)) {
 		struct smc_diag_lgrinfo linfo = {
 			.role = smc->conn.lgr->role,
 			.lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 90f1a7f9085c..2a8957bd6d38 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -141,6 +141,17 @@ out:
 	return rc;
 }
 
+static void smc_ib_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
+{
+	struct smc_link_group *lgr, *l;
+
+	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
+		if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
+		    lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
+			smc_lgr_terminate(lgr);
+	}
+}
+
 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
 static void smc_ib_port_event_work(struct work_struct *work)
 {
@@ -151,6 +162,8 @@ static void smc_ib_port_event_work(struct work_struct *work)
 	for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
 		smc_ib_remember_port_attr(smcibdev, port_idx + 1);
 		clear_bit(port_idx, &smcibdev->port_event_mask);
+		if (!smc_ib_port_active(smcibdev, port_idx + 1))
+			smc_ib_port_terminate(smcibdev, port_idx + 1);
 	}
 }
 
@@ -165,15 +178,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
 
 	switch (ibevent->event) {
 	case IB_EVENT_PORT_ERR:
-		port_idx = ibevent->element.port_num - 1;
-		set_bit(port_idx, &smcibdev->port_event_mask);
-		schedule_work(&smcibdev->port_event_work);
-		/* fall through */
 	case IB_EVENT_DEVICE_FATAL:
-		/* tbd in follow-on patch:
-		 * abnormal close of corresponding connections
-		 */
-		break;
 	case IB_EVENT_PORT_ACTIVE:
 		port_idx = ibevent->element.port_num - 1;
 		set_bit(port_idx, &smcibdev->port_event_mask);
@@ -186,7 +191,8 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
 
 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
 {
-	ib_dealloc_pd(lnk->roce_pd);
+	if (lnk->roce_pd)
+		ib_dealloc_pd(lnk->roce_pd);
 	lnk->roce_pd = NULL;
 }
 
@@ -203,14 +209,18 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
 
 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
 {
+	struct smc_ib_device *smcibdev =
+		(struct smc_ib_device *)ibevent->device;
+	u8 port_idx;
+
 	switch (ibevent->event) {
 	case IB_EVENT_DEVICE_FATAL:
 	case IB_EVENT_GID_CHANGE:
 	case IB_EVENT_PORT_ERR:
 	case IB_EVENT_QP_ACCESS_ERR:
-		/* tbd in follow-on patch:
-		 * abnormal close of corresponding connections
-		 */
+		port_idx = ibevent->element.port_num - 1;
+		set_bit(port_idx, &smcibdev->port_event_mask);
+		schedule_work(&smcibdev->port_event_work);
 		break;
 	default:
 		break;
@@ -219,7 +229,8 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
 
 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
 {
-	ib_destroy_qp(lnk->roce_qp);
+	if (lnk->roce_qp)
+		ib_destroy_qp(lnk->roce_qp);
 	lnk->roce_qp = NULL;
 }
 
@@ -462,6 +473,7 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
 {
 	if (!smcibdev->initialized)
 		return;
+	smcibdev->initialized = 0;
 	smc_wr_remove_dev(smcibdev);
 	ib_unregister_event_handler(&smcibdev->event_handler);
 	ib_destroy_cq(smcibdev->roce_cq_recv);
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index cbf58637ee14..9dc392ca06bf 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -65,7 +65,6 @@ static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
 	rc = sk_wait_event(sk, timeo,
 			   sk->sk_err ||
 			   sk->sk_shutdown & RCV_SHUTDOWN ||
-			   sock_flag(sk, SOCK_DONE) ||
 			   atomic_read(&conn->bytes_to_rcv) ||
 			   smc_cdc_rxed_any_close_or_senddone(conn),
 			   &wait);
@@ -116,7 +115,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
 		if (read_done) {
 			if (sk->sk_err ||
 			    sk->sk_state == SMC_CLOSED ||
-			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
+			    sk->sk_shutdown & RCV_SHUTDOWN ||
 			    !timeo ||
 			    signal_pending(current) ||
 			    smc_cdc_rxed_any_close_or_senddone(conn) ||
@@ -124,8 +123,6 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
 			    peer_conn_abort)
 				break;
 		} else {
-			if (sock_flag(sk, SOCK_DONE))
-				break;
 			if (sk->sk_err) {
 				read_done = sock_error(sk);
 				break;
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index c48dc2d5fd3a..838bce20c361 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -86,7 +86,7 @@ static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
 			rc = -EPIPE;
 			break;
 		}
-		if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
+		if (smc_cdc_rxed_any_close(conn)) {
 			rc = -ECONNRESET;
 			break;
 		}
@@ -104,14 +104,12 @@ static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
 		if (atomic_read(&conn->sndbuf_space))
 			break; /* at least 1 byte of free space available */
 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-		sk->sk_write_pending++;
 		sk_wait_event(sk, &timeo,
 			      sk->sk_err ||
 			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
-			      smc_cdc_rxed_any_close_or_senddone(conn) ||
+			      smc_cdc_rxed_any_close(conn) ||
 			      atomic_read(&conn->sndbuf_space),
 			      &wait);
-		sk->sk_write_pending--;
 	}
 	remove_wait_queue(sk_sleep(sk), &wait);
 	return rc;
@@ -250,8 +248,10 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
 		peer_rmbe_offset;
 	rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
 	rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
-	if (rc)
+	if (rc) {
 		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+		smc_lgr_terminate(lgr);
+	}
 	return rc;
 }
 
@@ -408,8 +408,9 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
 				goto out_unlock;
 			}
 			rc = 0;
-			schedule_delayed_work(&conn->tx_work,
-					      SMC_TX_WORK_DELAY);
+			if (conn->alert_token_local) /* connection healthy */
+				schedule_delayed_work(&conn->tx_work,
+						      SMC_TX_WORK_DELAY);
 		}
 		goto out_unlock;
 	}
@@ -440,19 +441,24 @@ static void smc_tx_work(struct work_struct *work)
 	int rc;
 
 	lock_sock(&smc->sk);
+	if (smc->sk.sk_err ||
+	    !conn->alert_token_local ||
+	    conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
+		goto out;
+
 	rc = smc_tx_sndbuf_nonempty(conn);
 	if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
 	    !atomic_read(&conn->bytes_to_rcv))
 		conn->local_rx_ctrl.prod_flags.write_blocked = 0;
+
+out:
 	release_sock(&smc->sk);
 }
 
 void smc_tx_consumer_update(struct smc_connection *conn)
 {
 	union smc_host_cursor cfed, cons;
-	struct smc_cdc_tx_pend *pend;
-	struct smc_wr_buf *wr_buf;
-	int to_confirm, rc;
+	int to_confirm;
 
 	smc_curs_write(&cons,
 		       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
@@ -466,10 +472,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
 	    ((to_confirm > conn->rmbe_update_limit) &&
 	     ((to_confirm > (conn->rmbe_size / 2)) ||
 	      conn->local_rx_ctrl.prod_flags.write_blocked))) {
-		rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
-		if (!rc)
-			rc = smc_cdc_msg_send(conn, wr_buf, pend);
-		if (rc < 0) {
+		if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
+		    conn->alert_token_local) { /* connection healthy */
 			schedule_delayed_work(&conn->tx_work,
 					      SMC_TX_WORK_DELAY);
 			return;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index de4537f66832..1b8af23e6e2b 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -122,6 +122,7 @@ static void smc_wr_tx_tasklet_fn(unsigned long data)
 again:
 	polled++;
 	do {
+		memset(&wc, 0, sizeof(wc));
 		rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
 		if (polled == 1) {
 			ib_req_notify_cq(dev->roce_cq_send,
@@ -173,9 +174,9 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
 			    struct smc_wr_tx_pend_priv **wr_pend_priv)
 {
 	struct smc_wr_tx_pend *wr_pend;
+	u32 idx = link->wr_tx_cnt;
 	struct ib_send_wr *wr_ib;
 	u64 wr_id;
-	u32 idx;
 	int rc;
 
 	*wr_buf = NULL;
@@ -185,21 +186,20 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
 		if (rc)
 			return rc;
 	} else {
-		rc = wait_event_interruptible_timeout(
+		struct smc_link_group *lgr;
+
+		lgr = container_of(link, struct smc_link_group,
+				   lnk[SMC_SINGLE_LINK]);
+		rc = wait_event_timeout(
 			link->wr_tx_wait,
+			list_empty(&lgr->list) || /* lgr terminated */
 			(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
 			SMC_WR_TX_WAIT_FREE_SLOT_TIME);
 		if (!rc) {
 			/* timeout - terminate connections */
-			struct smc_link_group *lgr;
-
-			lgr = container_of(link, struct smc_link_group,
-					   lnk[SMC_SINGLE_LINK]);
 			smc_lgr_terminate(lgr);
 			return -EPIPE;
 		}
-		if (rc == -ERESTARTSYS)
-			return -EINTR;
 		if (idx == link->wr_tx_cnt)
 			return -EPIPE;
 	}
@@ -249,8 +249,14 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
 	pend = container_of(priv, struct smc_wr_tx_pend, priv);
 	rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
 			  &failed_wr);
-	if (rc)
+	if (rc) {
+		struct smc_link_group *lgr =
+			container_of(link, struct smc_link_group,
+				     lnk[SMC_SINGLE_LINK]);
+
 		smc_wr_tx_put_slot(link, priv);
+		smc_lgr_terminate(lgr);
+	}
 	return rc;
 }
 
@@ -300,18 +306,18 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
 	return rc;
 }
 
-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type,
+void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
 			     smc_wr_tx_filter filter,
 			     smc_wr_tx_dismisser dismisser,
 			     unsigned long data)
 {
 	struct smc_wr_tx_pend_priv *tx_pend;
-	struct smc_wr_rx_hdr *wr_rx;
+	struct smc_wr_rx_hdr *wr_tx;
 	int i;
 
 	for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
-		wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
-		if (wr_rx->type != wr_rx_hdr_type)
+		wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
+		if (wr_tx->type != wr_tx_hdr_type)
 			continue;
 		tx_pend = &link->wr_tx_pends[i].priv;
 		if (filter(tx_pend, data))
@@ -319,24 +325,6 @@ void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type,
 	}
 }
 
-bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
-			   smc_wr_tx_filter filter, unsigned long data)
-{
-	struct smc_wr_tx_pend_priv *tx_pend;
-	struct smc_wr_rx_hdr *wr_rx;
-	int i;
-
-	for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
-		wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
-		if (wr_rx->type != wr_rx_hdr_type)
-			continue;
-		tx_pend = &link->wr_tx_pends[i].priv;
-		if (filter(tx_pend, data))
-			return true;
-	}
-	return false;
-}
-
 /****************************** receive queue ********************************/
 
 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 2acf12b06063..ef0c3494c9cb 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -93,8 +93,6 @@ int smc_wr_tx_put_slot(struct smc_link *link,
 int smc_wr_tx_send(struct smc_link *link,
 		   struct smc_wr_tx_pend_priv *wr_pend_priv);
 void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
-bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
-			   smc_wr_tx_filter filter, unsigned long data);
 void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
 			     smc_wr_tx_filter filter,
 			     smc_wr_tx_dismisser dismisser,
diff --git a/net/socket.c b/net/socket.c
index 2f378449bc1b..a93c99b518ca 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -163,12 +163,6 @@ static DEFINE_SPINLOCK(net_family_lock);
 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
- *	Statistics counters of the socket lists
- */
-
-static DEFINE_PER_CPU(int, sockets_in_use);
-
-/*
  * Support routines.
  * Move socket addresses back and forth across the kernel/user
  * divide and look after the messy bits.
@@ -580,7 +574,6 @@ struct socket *sock_alloc(void)
 	inode->i_gid = current_fsgid();
 	inode->i_op = &sockfs_inode_ops;
 
-	this_cpu_add(sockets_in_use, 1);
 	return sock;
 }
 EXPORT_SYMBOL(sock_alloc);
@@ -607,7 +600,6 @@ void sock_release(struct socket *sock)
 	if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
 		pr_err("%s: fasync list not empty!\n", __func__);
 
-	this_cpu_sub(sockets_in_use, 1);
 	if (!sock->file) {
 		iput(SOCK_INODE(sock));
 		return;
@@ -969,9 +961,28 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
 	 * If this ioctl is unknown try to hand it down
 	 * to the NIC driver.
 	 */
-	if (err == -ENOIOCTLCMD)
-		err = dev_ioctl(net, cmd, argp);
+	if (err != -ENOIOCTLCMD)
+		return err;
 
+	if (cmd == SIOCGIFCONF) {
+		struct ifconf ifc;
+		if (copy_from_user(&ifc, argp, sizeof(struct ifconf)))
+			return -EFAULT;
+		rtnl_lock();
+		err = dev_ifconf(net, &ifc, sizeof(struct ifreq));
+		rtnl_unlock();
+		if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf)))
+			err = -EFAULT;
+	} else {
+		struct ifreq ifr;
+		bool need_copyout;
+		if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+			return -EFAULT;
+		err = dev_ioctl(net, cmd, &ifr, &need_copyout);
+		if (!err && need_copyout)
+			if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+				return -EFAULT;
+	}
 	return err;
 }
 
@@ -996,12 +1007,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 	sock = file->private_data;
 	sk = sock->sk;
 	net = sock_net(sk);
-	if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
-		err = dev_ioctl(net, cmd, argp);
+	if (unlikely(cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))) {
+		struct ifreq ifr;
+		bool need_copyout;
+		if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+			return -EFAULT;
+		err = dev_ioctl(net, cmd, &ifr, &need_copyout);
+		if (!err && need_copyout)
+			if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+				return -EFAULT;
 	} else
 #ifdef CONFIG_WEXT_CORE
 	if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
-		err = dev_ioctl(net, cmd, argp);
+		err = wext_handle_ioctl(net, cmd, argp);
 	} else
 #endif
 		switch (cmd) {
@@ -2621,29 +2639,11 @@ out_fs:
 
 core_initcall(sock_init);	/* early initcall */
 
-static int __init jit_init(void)
-{
-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
-	bpf_jit_enable = 1;
-#endif
-	return 0;
-}
-pure_initcall(jit_init);
-
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {
-	int cpu;
-	int counter = 0;
-
-	for_each_possible_cpu(cpu)
-	    counter += per_cpu(sockets_in_use, cpu);
-
-	/* It can be negative, by the way. 8) */
-	if (counter < 0)
-		counter = 0;
-
-	seq_printf(seq, "sockets: used %d\n", counter);
+	seq_printf(seq, "sockets: used %d\n",
+		   sock_inuse_get(seq->private));
 }
 #endif				/* CONFIG_PROC_FS */
 
@@ -2680,89 +2680,25 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
 	return err;
 }
 
-static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32)
-{
-	struct ifreq __user *uifr;
-	int err;
-
-	uifr = compat_alloc_user_space(sizeof(struct ifreq));
-	if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
-		return -EFAULT;
-
-	err = dev_ioctl(net, SIOCGIFNAME, uifr);
-	if (err)
-		return err;
-
-	if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq)))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
+static int compat_dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
 {
 	struct compat_ifconf ifc32;
 	struct ifconf ifc;
-	struct ifconf __user *uifc;
-	struct compat_ifreq __user *ifr32;
-	struct ifreq __user *ifr;
-	unsigned int i, j;
 	int err;
 
 	if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
 		return -EFAULT;
 
-	memset(&ifc, 0, sizeof(ifc));
-	if (ifc32.ifcbuf == 0) {
-		ifc32.ifc_len = 0;
-		ifc.ifc_len = 0;
-		ifc.ifc_req = NULL;
-		uifc = compat_alloc_user_space(sizeof(struct ifconf));
-	} else {
-		size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) *
-			sizeof(struct ifreq);
-		uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
-		ifc.ifc_len = len;
-		ifr = ifc.ifc_req = (void __user *)(uifc + 1);
-		ifr32 = compat_ptr(ifc32.ifcbuf);
-		for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) {
-			if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
-				return -EFAULT;
-			ifr++;
-			ifr32++;
-		}
-	}
-	if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
-		return -EFAULT;
+	ifc.ifc_len = ifc32.ifc_len;
+	ifc.ifc_req = compat_ptr(ifc32.ifcbuf);
 
-	err = dev_ioctl(net, SIOCGIFCONF, uifc);
+	rtnl_lock();
+	err = dev_ifconf(net, &ifc, sizeof(struct compat_ifreq));
+	rtnl_unlock();
 	if (err)
 		return err;
 
-	if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
-		return -EFAULT;
-
-	ifr = ifc.ifc_req;
-	ifr32 = compat_ptr(ifc32.ifcbuf);
-	for (i = 0, j = 0;
-	     i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
-	     i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) {
-		if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq)))
-			return -EFAULT;
-		ifr32++;
-		ifr++;
-	}
-
-	if (ifc32.ifcbuf == 0) {
-		/* Translate from 64-bit structure multiple to
-		 * a 32-bit one.
-		 */
-		i = ifc.ifc_len;
-		i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq));
-		ifc32.ifc_len = i;
-	} else {
-		ifc32.ifc_len = i;
-	}
+	ifc32.ifc_len = ifc.ifc_len;
 	if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf)))
 		return -EFAULT;
 
@@ -2773,9 +2709,9 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
 {
 	struct compat_ethtool_rxnfc __user *compat_rxnfc;
 	bool convert_in = false, convert_out = false;
-	size_t buf_size = ALIGN(sizeof(struct ifreq), 8);
-	struct ethtool_rxnfc __user *rxnfc;
-	struct ifreq __user *ifr;
+	size_t buf_size = 0;
+	struct ethtool_rxnfc __user *rxnfc = NULL;
+	struct ifreq ifr;
 	u32 rule_cnt = 0, actual_rule_cnt;
 	u32 ethcmd;
 	u32 data;
@@ -2812,18 +2748,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
 	case ETHTOOL_SRXCLSRLDEL:
 		buf_size += sizeof(struct ethtool_rxnfc);
 		convert_in = true;
+		rxnfc = compat_alloc_user_space(buf_size);
 		break;
 	}
 
-	ifr = compat_alloc_user_space(buf_size);
-	rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
-
-	if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+	if (copy_from_user(&ifr.ifr_name, &ifr32->ifr_name, IFNAMSIZ))
 		return -EFAULT;
 
-	if (put_user(convert_in ? rxnfc : compat_ptr(data),
-		     &ifr->ifr_ifru.ifru_data))
-		return -EFAULT;
+	ifr.ifr_data = convert_in ? rxnfc : (void __user *)compat_rxnfc;
 
 	if (convert_in) {
 		/* We expect there to be holes between fs.m_ext and
@@ -2851,7 +2783,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
 			return -EFAULT;
 	}
 
-	ret = dev_ioctl(net, SIOCETHTOOL, ifr);
+	ret = dev_ioctl(net, SIOCETHTOOL, &ifr, NULL);
 	if (ret)
 		return ret;
 
@@ -2892,113 +2824,43 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
 
 static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
 {
-	void __user *uptr;
 	compat_uptr_t uptr32;
-	struct ifreq __user *uifr;
+	struct ifreq ifr;
+	void __user *saved;
+	int err;
 
-	uifr = compat_alloc_user_space(sizeof(*uifr));
-	if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
+	if (copy_from_user(&ifr, uifr32, sizeof(struct compat_ifreq)))
 		return -EFAULT;
 
 	if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu))
 		return -EFAULT;
 
-	uptr = compat_ptr(uptr32);
-
-	if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc))
-		return -EFAULT;
-
-	return dev_ioctl(net, SIOCWANDEV, uifr);
-}
-
-static int bond_ioctl(struct net *net, unsigned int cmd,
-			 struct compat_ifreq __user *ifr32)
-{
-	struct ifreq kifr;
-	mm_segment_t old_fs;
-	int err;
+	saved = ifr.ifr_settings.ifs_ifsu.raw_hdlc;
+	ifr.ifr_settings.ifs_ifsu.raw_hdlc = compat_ptr(uptr32);
 
-	switch (cmd) {
-	case SIOCBONDENSLAVE:
-	case SIOCBONDRELEASE:
-	case SIOCBONDSETHWADDR:
-	case SIOCBONDCHANGEACTIVE:
-		if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq)))
-			return -EFAULT;
-
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = dev_ioctl(net, cmd,
-				(struct ifreq __user __force *) &kifr);
-		set_fs(old_fs);
-
-		return err;
-	default:
-		return -ENOIOCTLCMD;
+	err = dev_ioctl(net, SIOCWANDEV, &ifr, NULL);
+	if (!err) {
+		ifr.ifr_settings.ifs_ifsu.raw_hdlc = saved;
+		if (copy_to_user(uifr32, &ifr, sizeof(struct compat_ifreq)))
+			err = -EFAULT;
 	}
+	return err;
 }
 
 /* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */
 static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
 				 struct compat_ifreq __user *u_ifreq32)
 {
-	struct ifreq __user *u_ifreq64;
-	char tmp_buf[IFNAMSIZ];
-	void __user *data64;
+	struct ifreq ifreq;
 	u32 data32;
 
-	if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
-			   IFNAMSIZ))
+	if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ))
 		return -EFAULT;
-	if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
+	if (get_user(data32, &u_ifreq32->ifr_data))
 		return -EFAULT;
-	data64 = compat_ptr(data32);
+	ifreq.ifr_data = compat_ptr(data32);
 
-	u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
-
-	if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
-			 IFNAMSIZ))
-		return -EFAULT;
-	if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
-		return -EFAULT;
-
-	return dev_ioctl(net, cmd, u_ifreq64);
-}
-
-static int dev_ifsioc(struct net *net, struct socket *sock,
-			 unsigned int cmd, struct compat_ifreq __user *uifr32)
-{
-	struct ifreq __user *uifr;
-	int err;
-
-	uifr = compat_alloc_user_space(sizeof(*uifr));
-	if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
-		return -EFAULT;
-
-	err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
-
-	if (!err) {
-		switch (cmd) {
-		case SIOCGIFFLAGS:
-		case SIOCGIFMETRIC:
-		case SIOCGIFMTU:
-		case SIOCGIFMEM:
-		case SIOCGIFHWADDR:
-		case SIOCGIFINDEX:
-		case SIOCGIFADDR:
-		case SIOCGIFBRDADDR:
-		case SIOCGIFDSTADDR:
-		case SIOCGIFNETMASK:
-		case SIOCGIFPFLAGS:
-		case SIOCGIFTXQLEN:
-		case SIOCGMIIPHY:
-		case SIOCGMIIREG:
-			if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
-				err = -EFAULT;
-			break;
-		}
-	}
-	return err;
+	return dev_ioctl(net, cmd, &ifreq, NULL);
 }
 
 static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
@@ -3006,7 +2868,6 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
 {
 	struct ifreq ifr;
 	struct compat_ifmap __user *uifmap32;
-	mm_segment_t old_fs;
 	int err;
 
 	uifmap32 = &uifr32->ifr_ifru.ifru_map;
@@ -3020,10 +2881,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
 	if (err)
 		return -EFAULT;
 
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	err = dev_ioctl(net, cmd, (void  __user __force *)&ifr);
-	set_fs(old_fs);
+	err = dev_ioctl(net, cmd, &ifr, NULL);
 
 	if (cmd == SIOCGIFMAP && !err) {
 		err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
@@ -3156,10 +3014,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
 	case SIOCSIFBR:
 	case SIOCGIFBR:
 		return old_bridge_ioctl(argp);
-	case SIOCGIFNAME:
-		return dev_ifname32(net, argp);
 	case SIOCGIFCONF:
-		return dev_ifconf(net, argp);
+		return compat_dev_ifconf(net, argp);
 	case SIOCETHTOOL:
 		return ethtool_ioctl(net, argp);
 	case SIOCWANDEV:
@@ -3167,11 +3023,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
 	case SIOCGIFMAP:
 	case SIOCSIFMAP:
 		return compat_sioc_ifmap(net, cmd, argp);
-	case SIOCBONDENSLAVE:
-	case SIOCBONDRELEASE:
-	case SIOCBONDSETHWADDR:
-	case SIOCBONDCHANGEACTIVE:
-		return bond_ioctl(net, cmd, argp);
 	case SIOCADDRT:
 	case SIOCDELRT:
 		return routing_ioctl(net, sock, cmd, argp);
@@ -3231,12 +3082,15 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
 	case SIOCGMIIPHY:
 	case SIOCGMIIREG:
 	case SIOCSMIIREG:
-		return dev_ifsioc(net, sock, cmd, argp);
-
 	case SIOCSARP:
 	case SIOCGARP:
 	case SIOCDARP:
 	case SIOCATMARK:
+	case SIOCBONDENSLAVE:
+	case SIOCBONDRELEASE:
+	case SIOCBONDSETHWADDR:
+	case SIOCBONDCHANGEACTIVE:
+	case SIOCGIFNAME:
 		return sock_do_ioctl(net, sock, cmd, arg);
 	}
 
@@ -3391,19 +3245,6 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage_locked);
 
-int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
-{
-	mm_segment_t oldfs = get_fs();
-	int err;
-
-	set_fs(KERNEL_DS);
-	err = sock->ops->ioctl(sock, cmd, arg);
-	set_fs(oldfs);
-
-	return err;
-}
-EXPORT_SYMBOL(kernel_sock_ioctl);
-
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
 {
 	return sock->ops->shutdown(sock, how);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 329325bd553e..37892b3909af 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.c: TIPC broadcast code
  *
- * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
@@ -42,8 +42,8 @@
 #include "link.h"
 #include "name_table.h"
 
-#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
-#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
+#define BCLINK_WIN_DEFAULT  50	/* bcast link window size (default) */
+#define BCLINK_WIN_MIN      32	/* bcast minimum link window size */
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -74,6 +74,10 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
 	return tipc_net(net)->bcbase;
 }
 
+/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
+ * Note: the MTU is decremented to give room for a tunnel header, in
+ * case the message needs to be sent as replicast
+ */
 int tipc_bcast_get_mtu(struct net *net)
 {
 	return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
@@ -515,7 +519,7 @@ int tipc_bcast_init(struct net *net)
 	spin_lock_init(&tipc_net(net)->bclock);
 
 	if (!tipc_link_bc_create(net, 0, 0,
-				 U16_MAX,
+				 FB_MTU,
 				 BCLINK_WIN_DEFAULT,
 				 0,
 				 &bb->inputq,
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 964342689f2c..20b21af2ff14 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -49,7 +49,6 @@
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
 #include <linux/atomic.h>
-#include <asm/hardirq.h>
 #include <linux/netdevice.h>
 #include <linux/in.h>
 #include <linux/list.h>
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 5f4ffae807ee..122162a31816 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -49,8 +49,6 @@
 #define ADV_ACTIVE (ADV_UNIT * 12)
 
 enum mbr_state {
-	MBR_QUARANTINED,
-	MBR_DISCOVERED,
 	MBR_JOINING,
 	MBR_PUBLISHED,
 	MBR_JOINED,
@@ -64,8 +62,7 @@ enum mbr_state {
 struct tipc_member {
 	struct rb_node tree_node;
 	struct list_head list;
-	struct list_head congested;
-	struct sk_buff *event_msg;
+	struct list_head small_win;
 	struct sk_buff_head deferredq;
 	struct tipc_group *group;
 	u32 node;
@@ -77,21 +74,18 @@ struct tipc_member {
 	u16 bc_rcv_nxt;
 	u16 bc_syncpt;
 	u16 bc_acked;
-	bool usr_pending;
 };
 
 struct tipc_group {
 	struct rb_root members;
-	struct list_head congested;
+	struct list_head small_win;
 	struct list_head pending;
 	struct list_head active;
-	struct list_head reclaiming;
 	struct tipc_nlist dests;
 	struct net *net;
 	int subid;
 	u32 type;
 	u32 instance;
-	u32 domain;
 	u32 scope;
 	u32 portid;
 	u16 member_cnt;
@@ -99,6 +93,7 @@ struct tipc_group {
 	u16 max_active;
 	u16 bc_snd_nxt;
 	u16 bc_ackers;
+	bool *open;
 	bool loopback;
 	bool events;
 };
@@ -106,6 +101,16 @@ struct tipc_group {
 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 				  int mtyp, struct sk_buff_head *xmitq);
 
+static void tipc_group_open(struct tipc_member *m, bool *wakeup)
+{
+	*wakeup = false;
+	if (list_empty(&m->small_win))
+		return;
+	list_del_init(&m->small_win);
+	*m->group->open = true;
+	*wakeup = true;
+}
+
 static void tipc_group_decr_active(struct tipc_group *grp,
 				   struct tipc_member *m)
 {
@@ -137,14 +142,14 @@ u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
 	return grp->bc_snd_nxt;
 }
 
-static bool tipc_group_is_enabled(struct tipc_member *m)
+static bool tipc_group_is_receiver(struct tipc_member *m)
 {
-	return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING;
+	return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
 }
 
-static bool tipc_group_is_receiver(struct tipc_member *m)
+static bool tipc_group_is_sender(struct tipc_member *m)
 {
-	return m && m->state >= MBR_JOINED;
+	return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
 }
 
 u32 tipc_group_exclude(struct tipc_group *grp)
@@ -160,8 +165,11 @@ int tipc_group_size(struct tipc_group *grp)
 }
 
 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
-				     struct tipc_group_req *mreq)
+				     struct tipc_group_req *mreq,
+				     bool *group_is_open)
 {
+	u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
+	bool global = mreq->scope != TIPC_NODE_SCOPE;
 	struct tipc_group *grp;
 	u32 type = mreq->type;
 
@@ -169,25 +177,41 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
 	if (!grp)
 		return NULL;
 	tipc_nlist_init(&grp->dests, tipc_own_addr(net));
-	INIT_LIST_HEAD(&grp->congested);
+	INIT_LIST_HEAD(&grp->small_win);
 	INIT_LIST_HEAD(&grp->active);
 	INIT_LIST_HEAD(&grp->pending);
-	INIT_LIST_HEAD(&grp->reclaiming);
 	grp->members = RB_ROOT;
 	grp->net = net;
 	grp->portid = portid;
-	grp->domain = addr_domain(net, mreq->scope);
 	grp->type = type;
 	grp->instance = mreq->instance;
 	grp->scope = mreq->scope;
 	grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
 	grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
-	if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid))
+	grp->open = group_is_open;
+	filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
+	if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
+				    filter, &grp->subid))
 		return grp;
 	kfree(grp);
 	return NULL;
 }
 
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
+{
+	struct rb_root *tree = &grp->members;
+	struct tipc_member *m, *tmp;
+	struct sk_buff_head xmitq;
+
+	skb_queue_head_init(&xmitq);
+	rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
+		tipc_group_update_member(m, 0);
+	}
+	tipc_node_distr_xmit(net, &xmitq);
+	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
 void tipc_group_delete(struct net *net, struct tipc_group *grp)
 {
 	struct rb_root *tree = &grp->members;
@@ -233,7 +257,7 @@ static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
 	struct tipc_member *m;
 
 	m = tipc_group_find_member(grp, node, port);
-	if (m && tipc_group_is_enabled(m))
+	if (m && tipc_group_is_receiver(m))
 		return m;
 	return NULL;
 }
@@ -278,7 +302,7 @@ static void tipc_group_add_to_tree(struct tipc_group *grp,
 
 static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
 						    u32 node, u32 port,
-						    int state)
+						    u32 instance, int state)
 {
 	struct tipc_member *m;
 
@@ -286,11 +310,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
 	if (!m)
 		return NULL;
 	INIT_LIST_HEAD(&m->list);
-	INIT_LIST_HEAD(&m->congested);
+	INIT_LIST_HEAD(&m->small_win);
 	__skb_queue_head_init(&m->deferredq);
 	m->group = grp;
 	m->node = node;
 	m->port = port;
+	m->instance = instance;
 	m->bc_acked = grp->bc_snd_nxt - 1;
 	grp->member_cnt++;
 	tipc_group_add_to_tree(grp, m);
@@ -299,9 +324,10 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
 	return m;
 }
 
-void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port)
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+			   u32 port, u32 instance)
 {
-	tipc_group_create_member(grp, node, port, MBR_DISCOVERED);
+	tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
 }
 
 static void tipc_group_delete_member(struct tipc_group *grp,
@@ -315,7 +341,7 @@ static void tipc_group_delete_member(struct tipc_group *grp,
 		grp->bc_ackers--;
 
 	list_del_init(&m->list);
-	list_del_init(&m->congested);
+	list_del_init(&m->small_win);
 	tipc_group_decr_active(grp, m);
 
 	/* If last member on a node, remove node from dest list */
@@ -344,7 +370,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
 	struct tipc_group *grp = m->group;
 	struct tipc_member *_m, *tmp;
 
-	if (!tipc_group_is_enabled(m))
+	if (!tipc_group_is_receiver(m))
 		return;
 
 	m->window -= len;
@@ -352,16 +378,14 @@ void tipc_group_update_member(struct tipc_member *m, int len)
 	if (m->window >= ADV_IDLE)
 		return;
 
-	list_del_init(&m->congested);
+	list_del_init(&m->small_win);
 
-	/* Sort member into congested members' list */
-	list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
-		if (m->window > _m->window)
-			continue;
-		list_add_tail(&m->congested, &_m->congested);
-		return;
+	/* Sort member into small_window members' list */
+	list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+		if (_m->window > m->window)
+			break;
 	}
-	list_add_tail(&m->congested, &grp->congested);
+	list_add_tail(&m->small_win, &_m->small_win);
 }
 
 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
@@ -373,7 +397,7 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
 
 	for (n = rb_first(&grp->members); n; n = rb_next(n)) {
 		m = container_of(n, struct tipc_member, tree_node);
-		if (tipc_group_is_enabled(m)) {
+		if (tipc_group_is_receiver(m)) {
 			tipc_group_update_member(m, len);
 			m->bc_acked = prev;
 			ackers++;
@@ -394,20 +418,20 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
 	int adv, state;
 
 	m = tipc_group_find_dest(grp, dnode, dport);
-	*mbr = m;
-	if (!m)
+	if (!tipc_group_is_receiver(m)) {
+		*mbr = NULL;
 		return false;
-	if (m->usr_pending)
-		return true;
+	}
+	*mbr = m;
+
 	if (m->window >= len)
 		return false;
-	m->usr_pending = true;
+
+	*grp->open = false;
 
 	/* If not fully advertised, do it now to prevent mutual blocking */
 	adv = m->advertised;
 	state = m->state;
-	if (state < MBR_JOINED)
-		return true;
 	if (state == MBR_JOINED && adv == ADV_IDLE)
 		return true;
 	if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
@@ -425,13 +449,14 @@ bool tipc_group_bc_cong(struct tipc_group *grp, int len)
 	struct tipc_member *m = NULL;
 
 	/* If prev bcast was replicast, reject until all receivers have acked */
-	if (grp->bc_ackers)
+	if (grp->bc_ackers) {
+		*grp->open = false;
 		return true;
-
-	if (list_empty(&grp->congested))
+	}
+	if (list_empty(&grp->small_win))
 		return false;
 
-	m = list_first_entry(&grp->congested, struct tipc_member, congested);
+	m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
 	if (m->window >= len)
 		return false;
 
@@ -486,7 +511,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
 		goto drop;
 
 	m = tipc_group_find_member(grp, node, port);
-	if (!tipc_group_is_receiver(m))
+	if (!tipc_group_is_sender(m))
 		goto drop;
 
 	if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
@@ -573,24 +598,34 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 
 	switch (m->state) {
 	case MBR_JOINED:
-		/* Reclaim advertised space from least active member */
-		if (!list_empty(active) && active_cnt >= reclaim_limit) {
+		/* First, decide if member can go active */
+		if (active_cnt <= max_active) {
+			m->state = MBR_ACTIVE;
+			list_add_tail(&m->list, active);
+			grp->active_cnt++;
+			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+		} else {
+			m->state = MBR_PENDING;
+			list_add_tail(&m->list, &grp->pending);
+		}
+
+		if (active_cnt < reclaim_limit)
+			break;
+
+		/* Reclaim from oldest active member, if possible */
+		if (!list_empty(active)) {
 			rm = list_first_entry(active, struct tipc_member, list);
 			rm->state = MBR_RECLAIMING;
-			list_move_tail(&rm->list, &grp->reclaiming);
+			list_del_init(&rm->list);
 			tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
-		}
-		/* If max active, become pending and wait for reclaimed space */
-		if (active_cnt >= max_active) {
-			m->state = MBR_PENDING;
-			list_add_tail(&m->list, &grp->pending);
 			break;
 		}
-		/* Otherwise become active */
-		m->state = MBR_ACTIVE;
-		list_add_tail(&m->list, &grp->active);
-		grp->active_cnt++;
-		/* Fall through */
+		/* Nobody to reclaim from; - revert oldest pending to JOINED */
+		pm = list_first_entry(&grp->pending, struct tipc_member, list);
+		list_del_init(&pm->list);
+		pm->state = MBR_JOINED;
+		tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+		break;
 	case MBR_ACTIVE:
 		if (!list_is_last(&m->list, &grp->active))
 			list_move_tail(&m->list, &grp->active);
@@ -602,12 +637,12 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 		if (m->advertised > ADV_IDLE)
 			break;
 		m->state = MBR_JOINED;
+		grp->active_cnt--;
 		if (m->advertised < ADV_IDLE) {
 			pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
 			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
 		}
-		grp->active_cnt--;
-		list_del_init(&m->list);
+
 		if (list_empty(&grp->pending))
 			return;
 
@@ -619,7 +654,6 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 		tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
 		break;
 	case MBR_RECLAIMING:
-	case MBR_DISCOVERED:
 	case MBR_JOINING:
 	case MBR_LEAVING:
 	default:
@@ -627,6 +661,40 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 	}
 }
 
+static void tipc_group_create_event(struct tipc_group *grp,
+				    struct tipc_member *m,
+				    u32 event, u16 seqno,
+				    struct sk_buff_head *inputq)
+{	u32 dnode = tipc_own_addr(grp->net);
+	struct tipc_event evt;
+	struct sk_buff *skb;
+	struct tipc_msg *hdr;
+
+	evt.event = event;
+	evt.found_lower = m->instance;
+	evt.found_upper = m->instance;
+	evt.port.ref = m->port;
+	evt.port.node = m->node;
+	evt.s.seq.type = grp->type;
+	evt.s.seq.lower = m->instance;
+	evt.s.seq.upper = m->instance;
+
+	skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
+			      GROUP_H_SIZE, sizeof(evt), dnode, m->node,
+			      grp->portid, m->port, 0);
+	if (!skb)
+		return;
+
+	hdr = buf_msg(skb);
+	msg_set_nametype(hdr, grp->type);
+	msg_set_grp_evt(hdr, event);
+	msg_set_dest_droppable(hdr, true);
+	msg_set_grp_bc_seqno(hdr, seqno);
+	memcpy(msg_data(hdr), &evt, sizeof(evt));
+	TIPC_SKB_CB(skb)->orig_member = m->instance;
+	__skb_queue_tail(inputq, skb);
+}
+
 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 				  int mtyp, struct sk_buff_head *xmitq)
 {
@@ -672,83 +740,73 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 	u32 node = msg_orignode(hdr);
 	u32 port = msg_origport(hdr);
 	struct tipc_member *m, *pm;
-	struct tipc_msg *ehdr;
 	u16 remitted, in_flight;
 
 	if (!grp)
 		return;
 
+	if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
+		return;
+
 	m = tipc_group_find_member(grp, node, port);
 
 	switch (msg_type(hdr)) {
 	case GRP_JOIN_MSG:
 		if (!m)
 			m = tipc_group_create_member(grp, node, port,
-						     MBR_QUARANTINED);
+						     0, MBR_JOINING);
 		if (!m)
 			return;
 		m->bc_syncpt = msg_grp_bc_syncpt(hdr);
 		m->bc_rcv_nxt = m->bc_syncpt;
 		m->window += msg_adv_win(hdr);
 
-		/* Wait until PUBLISH event is received */
-		if (m->state == MBR_DISCOVERED) {
-			m->state = MBR_JOINING;
-		} else if (m->state == MBR_PUBLISHED) {
-			m->state = MBR_JOINED;
-			*usr_wakeup = true;
-			m->usr_pending = false;
-			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
-			ehdr = buf_msg(m->event_msg);
-			msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
-			__skb_queue_tail(inputq, m->event_msg);
-		}
-		list_del_init(&m->congested);
+		/* Wait until PUBLISH event is received if necessary */
+		if (m->state != MBR_PUBLISHED)
+			return;
+
+		/* Member can be taken into service */
+		m->state = MBR_JOINED;
+		tipc_group_open(m, usr_wakeup);
 		tipc_group_update_member(m, 0);
+		tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+		tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+					m->bc_syncpt, inputq);
 		return;
 	case GRP_LEAVE_MSG:
 		if (!m)
 			return;
 		m->bc_syncpt = msg_grp_bc_syncpt(hdr);
 		list_del_init(&m->list);
-		list_del_init(&m->congested);
-		*usr_wakeup = true;
-
-		/* Wait until WITHDRAW event is received */
-		if (m->state != MBR_LEAVING) {
-			tipc_group_decr_active(grp, m);
-			m->state = MBR_LEAVING;
-			return;
-		}
-		/* Otherwise deliver already received WITHDRAW event */
-		ehdr = buf_msg(m->event_msg);
-		msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
-		__skb_queue_tail(inputq, m->event_msg);
+		tipc_group_open(m, usr_wakeup);
+		tipc_group_decr_active(grp, m);
+		m->state = MBR_LEAVING;
+		tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+					m->bc_syncpt, inputq);
 		return;
 	case GRP_ADV_MSG:
 		if (!m)
 			return;
 		m->window += msg_adv_win(hdr);
-		*usr_wakeup = m->usr_pending;
-		m->usr_pending = false;
-		list_del_init(&m->congested);
+		tipc_group_open(m, usr_wakeup);
 		return;
 	case GRP_ACK_MSG:
 		if (!m)
 			return;
 		m->bc_acked = msg_grp_bc_acked(hdr);
 		if (--grp->bc_ackers)
-			break;
+			return;
+		list_del_init(&m->small_win);
+		*m->group->open = true;
 		*usr_wakeup = true;
-		m->usr_pending = false;
+		tipc_group_update_member(m, 0);
 		return;
 	case GRP_RECLAIM_MSG:
 		if (!m)
 			return;
-		*usr_wakeup = m->usr_pending;
-		m->usr_pending = false;
 		tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
 		m->window = ADV_IDLE;
+		tipc_group_open(m, usr_wakeup);
 		return;
 	case GRP_REMIT_MSG:
 		if (!m || m->state != MBR_RECLAIMING)
@@ -763,18 +821,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 			m->advertised = ADV_IDLE + in_flight;
 			return;
 		}
-		/* All messages preceding the REMIT have been read */
-		if (m->advertised <= remitted) {
-			m->state = MBR_JOINED;
-			in_flight = 0;
-		}
-		/* ..and the REMIT overtaken by more messages => re-advertise */
+		/* This should never happen */
 		if (m->advertised < remitted)
-			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+			pr_warn_ratelimited("Unexpected REMIT msg\n");
 
-		m->advertised = ADV_IDLE + in_flight;
+		/* All messages preceding the REMIT have been read */
+		m->state = MBR_JOINED;
 		grp->active_cnt--;
-		list_del_init(&m->list);
+		m->advertised = ADV_IDLE;
 
 		/* Set oldest pending member to active and advertise */
 		if (list_empty(&grp->pending))
@@ -796,11 +850,10 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 void tipc_group_member_evt(struct tipc_group *grp,
 			   bool *usr_wakeup,
 			   int *sk_rcvbuf,
-			   struct sk_buff *skb,
+			   struct tipc_msg *hdr,
 			   struct sk_buff_head *inputq,
 			   struct sk_buff_head *xmitq)
 {
-	struct tipc_msg *hdr = buf_msg(skb);
 	struct tipc_event *evt = (void *)msg_data(hdr);
 	u32 instance = evt->found_lower;
 	u32 node = evt->port.node;
@@ -808,89 +861,59 @@ void tipc_group_member_evt(struct tipc_group *grp,
 	int event = evt->event;
 	struct tipc_member *m;
 	struct net *net;
-	bool node_up;
 	u32 self;
 
 	if (!grp)
-		goto drop;
+		return;
 
 	net = grp->net;
 	self = tipc_own_addr(net);
 	if (!grp->loopback && node == self && port == grp->portid)
-		goto drop;
-
-	/* Convert message before delivery to user */
-	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
-	msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
-	msg_set_type(hdr, TIPC_GRP_MEMBER_EVT);
-	msg_set_origport(hdr, port);
-	msg_set_orignode(hdr, node);
-	msg_set_nametype(hdr, grp->type);
-	msg_set_grp_evt(hdr, event);
+		return;
 
 	m = tipc_group_find_member(grp, node, port);
 
-	if (event == TIPC_PUBLISHED) {
-		if (!m)
-			m = tipc_group_create_member(grp, node, port,
-						     MBR_DISCOVERED);
-		if (!m)
-			goto drop;
-
-		/* Hold back event if JOIN message not yet received */
-		if (m->state == MBR_DISCOVERED) {
-			m->event_msg = skb;
-			m->state = MBR_PUBLISHED;
-		} else {
-			msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-			__skb_queue_tail(inputq, skb);
-			m->state = MBR_JOINED;
-			*usr_wakeup = true;
-			m->usr_pending = false;
+	switch (event) {
+	case TIPC_PUBLISHED:
+		/* Send and wait for arrival of JOIN message if necessary */
+		if (!m) {
+			m = tipc_group_create_member(grp, node, port, instance,
+						     MBR_PUBLISHED);
+			if (!m)
+				break;
+			tipc_group_update_member(m, 0);
+			tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+			break;
 		}
+
+		if (m->state != MBR_JOINING)
+			break;
+
+		/* Member can be taken into service */
 		m->instance = instance;
-		TIPC_SKB_CB(skb)->orig_member = m->instance;
+		m->state = MBR_JOINED;
+		tipc_group_open(m, usr_wakeup);
+		tipc_group_update_member(m, 0);
 		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
-		if (m->window < ADV_IDLE)
-			tipc_group_update_member(m, 0);
-		else
-			list_del_init(&m->congested);
-	} else if (event == TIPC_WITHDRAWN) {
+		tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+					m->bc_syncpt, inputq);
+		break;
+	case TIPC_WITHDRAWN:
 		if (!m)
-			goto drop;
-
-		TIPC_SKB_CB(skb)->orig_member = m->instance;
+			break;
 
-		*usr_wakeup = true;
-		m->usr_pending = false;
-		node_up = tipc_node_is_up(net, node);
-		m->event_msg = NULL;
-
-		if (node_up) {
-			/* Hold back event if a LEAVE msg should be expected */
-			if (m->state != MBR_LEAVING) {
-				m->event_msg = skb;
-				tipc_group_decr_active(grp, m);
-				m->state = MBR_LEAVING;
-			} else {
-				msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-				__skb_queue_tail(inputq, skb);
-			}
-		} else {
-			if (m->state != MBR_LEAVING) {
-				tipc_group_decr_active(grp, m);
-				m->state = MBR_LEAVING;
-				msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
-			} else {
-				msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-			}
-			__skb_queue_tail(inputq, skb);
-		}
+		tipc_group_decr_active(grp, m);
+		m->state = MBR_LEAVING;
 		list_del_init(&m->list);
-		list_del_init(&m->congested);
+		tipc_group_open(m, usr_wakeup);
+
+		/* Only send event if no LEAVE message can be expected */
+		if (!tipc_node_is_up(net, node))
+			tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+						m->bc_rcv_nxt, inputq);
+		break;
+	default:
+		break;
 	}
 	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
-	return;
-drop:
-	kfree_skb(skb);
 }
diff --git a/net/tipc/group.h b/net/tipc/group.h
index d525e1cd7de5..5996af6e9f1d 100644
--- a/net/tipc/group.h
+++ b/net/tipc/group.h
@@ -43,9 +43,12 @@ struct tipc_member;
 struct tipc_msg;
 
 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
-				     struct tipc_group_req *mreq);
+				     struct tipc_group_req *mreq,
+				     bool *group_is_open);
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf);
 void tipc_group_delete(struct net *net, struct tipc_group *grp);
-void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port);
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+			   u32 port, u32 instance);
 struct tipc_nlist *tipc_group_dests(struct tipc_group *grp);
 void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
 		     int *scope);
@@ -54,7 +57,7 @@ void tipc_group_filter_msg(struct tipc_group *grp,
 			   struct sk_buff_head *inputq,
 			   struct sk_buff_head *xmitq);
 void tipc_group_member_evt(struct tipc_group *grp, bool *wakeup,
-			   int *sk_rcvbuf, struct sk_buff *skb,
+			   int *sk_rcvbuf, struct tipc_msg *hdr,
 			   struct sk_buff_head *inputq,
 			   struct sk_buff_head *xmitq);
 void tipc_group_proto_rcv(struct tipc_group *grp, bool *wakeup,
@@ -69,5 +72,4 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 			       u32 port, struct sk_buff_head *xmitq);
 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
 void tipc_group_update_member(struct tipc_member *m, int len);
-int tipc_group_size(struct tipc_group *grp);
 #endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6bce0b1117bd..2d6b2aed30e0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -483,7 +483,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 /**
  * tipc_link_bc_create - create new link to be used for broadcast
  * @n: pointer to associated node
- * @mtu: mtu to be used
+ * @mtu: mtu to be used initially if no peers
  * @window: send window to be used
  * @inputq: queue to put messages ready for delivery
  * @namedq: queue to put binding table update messages ready for delivery
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b0d07b35909d..55d8ba92291d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -251,20 +251,23 @@ bool tipc_msg_validate(struct sk_buff **_skb)
  * @pktmax: Max packet size that can be used
  * @list: Buffer or chain of buffers to be returned to caller
  *
+ * Note that the recursive call we are making here is safe, since it can
+ * logically go only one further level down.
+ *
  * Returns message data size or errno: -ENOMEM, -EFAULT
  */
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
-		   int offset, int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+		   int dsz, int pktmax, struct sk_buff_head *list)
 {
 	int mhsz = msg_hdr_sz(mhdr);
+	struct tipc_msg pkthdr;
 	int msz = mhsz + dsz;
-	int pktno = 1;
-	int pktsz;
 	int pktrem = pktmax;
-	int drem = dsz;
-	struct tipc_msg pkthdr;
 	struct sk_buff *skb;
+	int drem = dsz;
+	int pktno = 1;
 	char *pktpos;
+	int pktsz;
 	int rc;
 
 	msg_set_size(mhdr, msz);
@@ -272,8 +275,18 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 	/* No fragmentation needed? */
 	if (likely(msz <= pktmax)) {
 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
-		if (unlikely(!skb))
+
+		/* Fall back to smaller MTU if node local message */
+		if (unlikely(!skb)) {
+			if (pktmax != MAX_MSG_SIZE)
+				return -ENOMEM;
+			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
+			if (rc != dsz)
+				return rc;
+			if (tipc_msg_assemble(list))
+				return dsz;
 			return -ENOMEM;
+		}
 		skb_orphan(skb);
 		__skb_queue_tail(list, skb);
 		skb_copy_to_linear_data(skb, mhdr, mhsz);
@@ -589,6 +602,30 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 	return true;
 }
 
+/* tipc_msg_assemble() - assemble chain of fragments into one message
+ */
+bool tipc_msg_assemble(struct sk_buff_head *list)
+{
+	struct sk_buff *skb, *tmp = NULL;
+
+	if (skb_queue_len(list) == 1)
+		return true;
+
+	while ((skb = __skb_dequeue(list))) {
+		skb->next = NULL;
+		if (tipc_buf_append(&tmp, &skb)) {
+			__skb_queue_tail(list, skb);
+			return true;
+		}
+		if (!tmp)
+			break;
+	}
+	__skb_queue_purge(list);
+	__skb_queue_head_init(list);
+	pr_warn("Failed do assemble buffer\n");
+	return false;
+}
+
 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
  *                         reassemble the clones into one message
  */
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 3e4384c222f7..b4ba1b4f9ae7 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -98,7 +98,7 @@ struct plist;
 #define MAX_H_SIZE                60	/* Largest possible TIPC header size */
 
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-
+#define FB_MTU                  3744
 #define TIPC_MEDIA_INFO_OFFSET	5
 
 struct tipc_skb_cb {
@@ -943,6 +943,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
+bool tipc_msg_assemble(struct sk_buff_head *list);
 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
 			struct sk_buff_head *cpy);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index b3829bcf63c7..ed0457cc99d6 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -328,7 +328,8 @@ static struct publication *tipc_nameseq_insert_publ(struct net *net,
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
 		tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
 					    TIPC_PUBLISHED, publ->ref,
-					    publ->node, created_subseq);
+					    publ->node, publ->scope,
+					    created_subseq);
 	}
 	return publ;
 }
@@ -398,19 +399,21 @@ found:
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
 		tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
 					    TIPC_WITHDRAWN, publ->ref,
-					    publ->node, removed_subseq);
+					    publ->node, publ->scope,
+					    removed_subseq);
 	}
 
 	return publ;
 }
 
 /**
- * tipc_nameseq_subscribe - attach a subscription, and issue
- * the prescribed number of events if there is any sub-
+ * tipc_nameseq_subscribe - attach a subscription, and optionally
+ * issue the prescribed number of events if there is any sub-
  * sequence overlapping with the requested sequence
  */
 static void tipc_nameseq_subscribe(struct name_seq *nseq,
-				   struct tipc_subscription *s)
+				   struct tipc_subscription *s,
+				   bool status)
 {
 	struct sub_seq *sseq = nseq->sseqs;
 	struct tipc_name_seq ns;
@@ -420,7 +423,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
 	tipc_subscrp_get(s);
 	list_add(&s->nameseq_list, &nseq->subscriptions);
 
-	if (!sseq)
+	if (!status || !sseq)
 		return;
 
 	while (sseq != &nseq->sseqs[nseq->first_free]) {
@@ -434,6 +437,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
 							    sseq->upper,
 							    TIPC_PUBLISHED,
 							    crs->ref, crs->node,
+							    crs->scope,
 							    must_report);
 				must_report = 0;
 			}
@@ -597,7 +601,7 @@ not_found:
 	return ref;
 }
 
-bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
+bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope,
 			 struct list_head *dsts, int *dstcnt, u32 exclude,
 			 bool all)
 {
@@ -607,9 +611,6 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
 	struct name_seq *seq;
 	struct sub_seq *sseq;
 
-	if (!tipc_in_scope(domain, self))
-		return false;
-
 	*dstcnt = 0;
 	rcu_read_lock();
 	seq = nametbl_find_seq(net, type);
@@ -620,7 +621,7 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
 	if (likely(sseq)) {
 		info = sseq->info;
 		list_for_each_entry(publ, &info->zone_list, zone_list) {
-			if (!tipc_in_scope(domain, publ->node))
+			if (publ->scope != scope)
 				continue;
 			if (publ->ref == exclude && publ->node == self)
 				continue;
@@ -638,13 +639,14 @@ exit:
 	return !list_empty(dsts);
 }
 
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
-			      u32 limit, struct list_head *dports)
+int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
+			   u32 scope, bool exact, struct list_head *dports)
 {
-	struct name_seq *seq;
-	struct sub_seq *sseq;
 	struct sub_seq *sseq_stop;
 	struct name_info *info;
+	struct publication *p;
+	struct name_seq *seq;
+	struct sub_seq *sseq;
 	int res = 0;
 
 	rcu_read_lock();
@@ -656,15 +658,12 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
 	sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
 	sseq_stop = seq->sseqs + seq->first_free;
 	for (; sseq != sseq_stop; sseq++) {
-		struct publication *publ;
-
 		if (sseq->lower > upper)
 			break;
-
 		info = sseq->info;
-		list_for_each_entry(publ, &info->node_list, node_list) {
-			if (publ->scope <= limit)
-				tipc_dest_push(dports, 0, publ->ref);
+		list_for_each_entry(p, &info->node_list, node_list) {
+			if (p->scope == scope || (!exact && p->scope < scope))
+				tipc_dest_push(dports, 0, p->ref);
 		}
 
 		if (info->cluster_list_size != info->node_list_size)
@@ -681,8 +680,7 @@ exit:
  * - Determines if any node local ports overlap
  */
 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
-				   u32 upper, u32 domain,
-				   struct tipc_nlist *nodes)
+				   u32 upper, struct tipc_nlist *nodes)
 {
 	struct sub_seq *sseq, *stop;
 	struct publication *publ;
@@ -700,8 +698,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
 	for (; sseq != stop && sseq->lower <= upper; sseq++) {
 		info = sseq->info;
 		list_for_each_entry(publ, &info->zone_list, zone_list) {
-			if (tipc_in_scope(domain, publ->node))
-				tipc_nlist_add(nodes, publ->node);
+			tipc_nlist_add(nodes, publ->node);
 		}
 	}
 	spin_unlock_bh(&seq->lock);
@@ -712,7 +709,7 @@ exit:
 /* tipc_nametbl_build_group - build list of communication group members
  */
 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
-			      u32 type, u32 domain)
+			      u32 type, u32 scope)
 {
 	struct sub_seq *sseq, *stop;
 	struct name_info *info;
@@ -730,9 +727,9 @@ void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
 	for (; sseq != stop; sseq++) {
 		info = sseq->info;
 		list_for_each_entry(p, &info->zone_list, zone_list) {
-			if (!tipc_in_scope(domain, p->node))
+			if (p->scope != scope)
 				continue;
-			tipc_group_add_member(grp, p->node, p->ref);
+			tipc_group_add_member(grp, p->node, p->ref, p->lower);
 		}
 	}
 	spin_unlock_bh(&seq->lock);
@@ -811,7 +808,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
 /**
  * tipc_nametbl_subscribe - add a subscription object to the name table
  */
-void tipc_nametbl_subscribe(struct tipc_subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status)
 {
 	struct tipc_net *tn = net_generic(s->net, tipc_net_id);
 	u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
@@ -825,7 +822,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
 		seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
 	if (seq) {
 		spin_lock_bh(&seq->lock);
-		tipc_nameseq_subscribe(seq, s);
+		tipc_nameseq_subscribe(seq, s, status);
 		spin_unlock_bh(&seq->lock);
 	} else {
 		tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 71926e429446..f56e7cb3d436 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -100,13 +100,12 @@ struct name_table {
 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
-			      u32 limit, struct list_head *dports);
+int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
+			   u32 scope, bool exact, struct list_head *dports);
 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
 			      u32 type, u32 domain);
 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
-				   u32 upper, u32 domain,
-				   struct tipc_nlist *nodes);
+				   u32 upper, struct tipc_nlist *nodes);
 bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
 			 struct list_head *dsts, int *dstcnt, u32 exclude,
 			 bool all);
@@ -121,7 +120,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
 					     u32 lower, u32 node, u32 ref,
 					     u32 key);
-void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status);
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
 int tipc_nametbl_init(struct net *net);
 void tipc_nametbl_stop(struct net *net);
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 78a292a84afc..df0c563c90cd 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -132,10 +132,11 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
 
 	spin_lock_bh(&s->idr_lock);
 	con = idr_find(&s->conn_idr, conid);
-	if (con && test_bit(CF_CONNECTED, &con->flags))
-		conn_get(con);
-	else
-		con = NULL;
+	if (con) {
+		if (!test_bit(CF_CONNECTED, &con->flags) ||
+		    !kref_get_unless_zero(&con->kref))
+			con = NULL;
+	}
 	spin_unlock_bh(&s->idr_lock);
 	return con;
 }
@@ -183,35 +184,28 @@ static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
-static void tipc_unregister_callbacks(struct tipc_conn *con)
-{
-	struct sock *sk = con->sock->sk;
-
-	write_lock_bh(&sk->sk_callback_lock);
-	sk->sk_user_data = NULL;
-	write_unlock_bh(&sk->sk_callback_lock);
-}
-
 static void tipc_close_conn(struct tipc_conn *con)
 {
 	struct tipc_server *s = con->server;
+	struct sock *sk = con->sock->sk;
+	bool disconnect = false;
 
-	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
-		if (con->sock)
-			tipc_unregister_callbacks(con);
-
+	write_lock_bh(&sk->sk_callback_lock);
+	disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
+	if (disconnect) {
+		sk->sk_user_data = NULL;
 		if (con->conid)
 			s->tipc_conn_release(con->conid, con->usr_data);
-
-		/* We shouldn't flush pending works as we may be in the
-		 * thread. In fact the races with pending rx/tx work structs
-		 * are harmless for us here as we have already deleted this
-		 * connection from server connection list.
-		 */
-		if (con->sock)
-			kernel_sock_shutdown(con->sock, SHUT_RDWR);
-		conn_put(con);
 	}
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	/* Handle concurrent calls from sending and receiving threads */
+	if (!disconnect)
+		return;
+
+	/* Don't flush pending works, -just let them expire */
+	kernel_sock_shutdown(con->sock, SHUT_RDWR);
+	conn_put(con);
 }
 
 static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
@@ -248,9 +242,10 @@ static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
 
 static int tipc_receive_from_sock(struct tipc_conn *con)
 {
-	struct msghdr msg = {};
 	struct tipc_server *s = con->server;
+	struct sock *sk = con->sock->sk;
 	struct sockaddr_tipc addr;
+	struct msghdr msg = {};
 	struct kvec iov;
 	void *buf;
 	int ret;
@@ -271,12 +266,15 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
 		goto out_close;
 	}
 
-	s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
-			     con->usr_data, buf, ret);
-
+	read_lock_bh(&sk->sk_callback_lock);
+	if (test_bit(CF_CONNECTED, &con->flags))
+		ret = s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid,
+					   &addr, con->usr_data, buf, ret);
+	read_unlock_bh(&sk->sk_callback_lock);
 	kmem_cache_free(s->rcvbuf_cache, buf);
-
-	return 0;
+	if (ret < 0)
+		tipc_conn_terminate(s, con->conid);
+	return ret;
 
 out_close:
 	if (ret != -EWOULDBLOCK)
@@ -489,8 +487,8 @@ void tipc_conn_terminate(struct tipc_server *s, int conid)
 	}
 }
 
-bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
-			     u32 lower, u32 upper, int *conid)
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+			     u32 upper, u32 filter, int *conid)
 {
 	struct tipc_subscriber *scbr;
 	struct tipc_subscr sub;
@@ -501,7 +499,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
 	sub.seq.lower = lower;
 	sub.seq.upper = upper;
 	sub.timeout = TIPC_WAIT_FOREVER;
-	sub.filter = TIPC_SUB_PORTS;
+	sub.filter = filter;
 	*(u32 *)&sub.usr_handle = port;
 
 	con = tipc_alloc_conn(tipc_topsrv(net));
@@ -525,11 +523,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
 void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
 {
 	struct tipc_conn *con;
+	struct tipc_server *srv;
 
 	con = tipc_conn_lookup(tipc_topsrv(net), conid);
 	if (!con)
 		return;
-	tipc_close_conn(con);
+
+	test_and_clear_bit(CF_CONNECTED, &con->flags);
+	srv = con->server;
+	if (con->conid)
+		srv->tipc_conn_release(con->conid, con->usr_data);
+	conn_put(con);
 	conn_put(con);
 }
 
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 2113c9192633..64df7513cd70 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -41,6 +41,9 @@
 #include <net/net_namespace.h>
 
 #define TIPC_SERVER_NAME_LEN	32
+#define TIPC_SUB_CLUSTER_SCOPE  0x20
+#define TIPC_SUB_NODE_SCOPE     0x40
+#define TIPC_SUB_NO_STATUS      0x80
 
 /**
  * struct tipc_server - TIPC server structure
@@ -71,9 +74,9 @@ struct tipc_server {
 	int max_rcvbuf_size;
 	void *(*tipc_conn_new)(int conid);
 	void (*tipc_conn_release)(int conid, void *usr_data);
-	void (*tipc_conn_recvmsg)(struct net *net, int conid,
-				  struct sockaddr_tipc *addr, void *usr_data,
-				  void *buf, size_t len);
+	int (*tipc_conn_recvmsg)(struct net *net, int conid,
+				 struct sockaddr_tipc *addr, void *usr_data,
+				 void *buf, size_t len);
 	struct sockaddr_tipc *saddr;
 	char name[TIPC_SERVER_NAME_LEN];
 	int imp;
@@ -83,8 +86,8 @@ struct tipc_server {
 int tipc_conn_sendmsg(struct tipc_server *s, int conid,
 		      struct sockaddr_tipc *addr, void *data, size_t len);
 
-bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
-			     u32 lower, u32 upper, int *conid);
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+			     u32 upper, u32 filter, int *conid);
 void tipc_topsrv_kern_unsubscr(struct net *net, int conid);
 
 /**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 2aa46e8cd8fe..163f3a547501 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -116,6 +116,7 @@ struct tipc_sock {
 	struct tipc_mc_method mc_method;
 	struct rcu_head rcu;
 	struct tipc_group *group;
+	bool group_is_open;
 };
 
 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -715,7 +716,6 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
 {
 	struct sock *sk = sock->sk;
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_group *grp = tsk->group;
 	__poll_t revents = 0;
 
 	sock_poll_wait(file, sk_sleep(sk), wait);
@@ -736,9 +736,8 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
 			revents |= POLLIN | POLLRDNORM;
 		break;
 	case TIPC_OPEN:
-		if (!grp || tipc_group_size(grp))
-			if (!tsk->cong_link_cnt)
-				revents |= POLLOUT;
+		if (tsk->group_is_open && !tsk->cong_link_cnt)
+			revents |= POLLOUT;
 		if (!tipc_sk_type_connectionless(sk))
 			break;
 		if (skb_queue_empty(&sk->sk_receive_queue))
@@ -772,7 +771,6 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
 	struct net *net = sock_net(sk);
 	int mtu = tipc_bcast_get_mtu(net);
 	struct tipc_mc_method *method = &tsk->mc_method;
-	u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
 	struct sk_buff_head pkts;
 	struct tipc_nlist dsts;
 	int rc;
@@ -788,7 +786,7 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
 	/* Lookup destination nodes */
 	tipc_nlist_init(&dsts, tipc_own_addr(net));
 	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
-				      seq->upper, domain, &dsts);
+				      seq->upper, &dsts);
 	if (!dsts.local && !dsts.remote)
 		return -EHOSTUNREACH;
 
@@ -928,21 +926,22 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
 	struct list_head *cong_links = &tsk->cong_links;
 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 	struct tipc_group *grp = tsk->group;
+	struct tipc_msg *hdr = &tsk->phdr;
 	struct tipc_member *first = NULL;
 	struct tipc_member *mbr = NULL;
 	struct net *net = sock_net(sk);
 	u32 node, port, exclude;
-	u32 type, inst, domain;
 	struct list_head dsts;
+	u32 type, inst, scope;
 	int lookups = 0;
 	int dstcnt, rc;
 	bool cong;
 
 	INIT_LIST_HEAD(&dsts);
 
-	type = dest->addr.name.name.type;
+	type = msg_nametype(hdr);
 	inst = dest->addr.name.name.instance;
-	domain = addr_domain(net, dest->scope);
+	scope = msg_lookup_scope(hdr);
 	exclude = tipc_group_exclude(grp);
 
 	while (++lookups < 4) {
@@ -950,7 +949,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
 
 		/* Look for a non-congested destination member, if any */
 		while (1) {
-			if (!tipc_nametbl_lookup(net, type, inst, domain, &dsts,
+			if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
 						 &dstcnt, exclude, false))
 				return -EHOSTUNREACH;
 			tipc_dest_pop(&dsts, &node, &port);
@@ -1079,22 +1078,23 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
 {
 	struct sock *sk = sock->sk;
 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-	struct tipc_name_seq *seq = &dest->addr.nameseq;
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct tipc_group *grp = tsk->group;
+	struct tipc_msg *hdr = &tsk->phdr;
 	struct net *net = sock_net(sk);
-	u32 domain, exclude, dstcnt;
+	u32 type, inst, scope, exclude;
 	struct list_head dsts;
+	u32 dstcnt;
 
 	INIT_LIST_HEAD(&dsts);
 
-	if (seq->lower != seq->upper)
-		return -ENOTSUPP;
-
-	domain = addr_domain(net, dest->scope);
+	type = msg_nametype(hdr);
+	inst = dest->addr.name.name.instance;
+	scope = msg_lookup_scope(hdr);
 	exclude = tipc_group_exclude(grp);
-	if (!tipc_nametbl_lookup(net, seq->type, seq->lower, domain,
-				 &dsts, &dstcnt, exclude, true))
+
+	if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
+				 &dstcnt, exclude, true))
 		return -EHOSTUNREACH;
 
 	if (dstcnt == 1) {
@@ -1116,24 +1116,29 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 		       struct sk_buff_head *inputq)
 {
-	u32 scope = TIPC_CLUSTER_SCOPE;
 	u32 self = tipc_own_addr(net);
+	u32 type, lower, upper, scope;
 	struct sk_buff *skb, *_skb;
-	u32 lower = 0, upper = ~0;
-	struct sk_buff_head tmpq;
 	u32 portid, oport, onode;
+	struct sk_buff_head tmpq;
 	struct list_head dports;
-	struct tipc_msg *msg;
-	int user, mtyp, hsz;
+	struct tipc_msg *hdr;
+	int user, mtyp, hlen;
+	bool exact;
 
 	__skb_queue_head_init(&tmpq);
 	INIT_LIST_HEAD(&dports);
 
 	skb = tipc_skb_peek(arrvq, &inputq->lock);
 	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
-		msg = buf_msg(skb);
-		user = msg_user(msg);
-		mtyp = msg_type(msg);
+		hdr = buf_msg(skb);
+		user = msg_user(hdr);
+		mtyp = msg_type(hdr);
+		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
+		oport = msg_origport(hdr);
+		onode = msg_orignode(hdr);
+		type = msg_nametype(hdr);
+
 		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
 			spin_lock_bh(&inputq->lock);
 			if (skb_peek(arrvq) == skb) {
@@ -1144,21 +1149,31 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 			spin_unlock_bh(&inputq->lock);
 			continue;
 		}
-		hsz = skb_headroom(skb) + msg_hdr_sz(msg);
-		oport = msg_origport(msg);
-		onode = msg_orignode(msg);
-		if (onode == self)
-			scope = TIPC_NODE_SCOPE;
-
-		/* Create destination port list and message clones: */
-		if (!msg_in_group(msg)) {
-			lower = msg_namelower(msg);
-			upper = msg_nameupper(msg);
+
+		/* Group messages require exact scope match */
+		if (msg_in_group(hdr)) {
+			lower = 0;
+			upper = ~0;
+			scope = msg_lookup_scope(hdr);
+			exact = true;
+		} else {
+			/* TIPC_NODE_SCOPE means "any scope" in this context */
+			if (onode == self)
+				scope = TIPC_NODE_SCOPE;
+			else
+				scope = TIPC_CLUSTER_SCOPE;
+			exact = false;
+			lower = msg_namelower(hdr);
+			upper = msg_nameupper(hdr);
 		}
-		tipc_nametbl_mc_translate(net, msg_nametype(msg), lower, upper,
-					  scope, &dports);
+
+		/* Create destination port list: */
+		tipc_nametbl_mc_lookup(net, type, lower, upper,
+				       scope, exact, &dports);
+
+		/* Clone message per destination */
 		while (tipc_dest_pop(&dports, NULL, &portid)) {
-			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
+			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
 			if (_skb) {
 				msg_set_destport(buf_msg(_skb), portid);
 				__skb_queue_tail(&tmpq, _skb);
@@ -1933,8 +1948,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
 		break;
 	case TOP_SRV:
 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
-				      skb, inputq, xmitq);
-		skb = NULL;
+				      hdr, inputq, xmitq);
 		break;
 	default:
 		break;
@@ -2640,9 +2654,7 @@ void tipc_sk_reinit(struct net *net)
 	rhashtable_walk_enter(&tn->sk_rht, &iter);
 
 	do {
-		tsk = ERR_PTR(rhashtable_walk_start(&iter));
-		if (IS_ERR(tsk))
-			goto walk_stop;
+		rhashtable_walk_start(&iter);
 
 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
 			spin_lock_bh(&tsk->sk.sk_lock.slock);
@@ -2651,7 +2663,7 @@ void tipc_sk_reinit(struct net *net)
 			msg_set_orignode(msg, tn->own_addr);
 			spin_unlock_bh(&tsk->sk.sk_lock.slock);
 		}
-walk_stop:
+
 		rhashtable_walk_stop(&iter);
 	} while (tsk == ERR_PTR(-EAGAIN));
 }
@@ -2734,7 +2746,6 @@ void tipc_sk_rht_destroy(struct net *net)
 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
 {
 	struct net *net = sock_net(&tsk->sk);
-	u32 domain = addr_domain(net, mreq->scope);
 	struct tipc_group *grp = tsk->group;
 	struct tipc_msg *hdr = &tsk->phdr;
 	struct tipc_name_seq seq;
@@ -2742,9 +2753,11 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
 
 	if (mreq->type < TIPC_RESERVED_TYPES)
 		return -EACCES;
+	if (mreq->scope > TIPC_NODE_SCOPE)
+		return -EINVAL;
 	if (grp)
 		return -EACCES;
-	grp = tipc_group_create(net, tsk->portid, mreq);
+	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
 	if (!grp)
 		return -ENOMEM;
 	tsk->group = grp;
@@ -2754,16 +2767,17 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
 	seq.type = mreq->type;
 	seq.lower = mreq->instance;
 	seq.upper = seq.lower;
-	tipc_nametbl_build_group(net, grp, mreq->type, domain);
+	tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
 	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
 	if (rc) {
 		tipc_group_delete(net, grp);
 		tsk->group = NULL;
+		return rc;
 	}
-
-	/* Eliminate any risk that a broadcast overtakes the sent JOIN */
+	/* Eliminate any risk that a broadcast overtakes sent JOINs */
 	tsk->mc_method.rcast = true;
 	tsk->mc_method.mandatory = true;
+	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
 	return rc;
 }
 
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 251065dfd8df..68e26470c516 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -118,15 +118,19 @@ void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
 
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 				 u32 found_upper, u32 event, u32 port_ref,
-				 u32 node, int must)
+				 u32 node, u32 scope, int must)
 {
+	u32 filter = htohl(sub->evt.s.filter, sub->swap);
 	struct tipc_name_seq seq;
 
 	tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
 	if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
 		return;
-	if (!must &&
-	    !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
+	if (!must && !(filter & TIPC_SUB_PORTS))
+		return;
+	if (filter & TIPC_SUB_CLUSTER_SCOPE && scope == TIPC_NODE_SCOPE)
+		return;
+	if (filter & TIPC_SUB_NODE_SCOPE && scope != TIPC_NODE_SCOPE)
 		return;
 
 	tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
@@ -285,21 +289,21 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
 	return sub;
 }
 
-static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
-				   struct tipc_subscriber *subscriber, int swap)
+static int tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
+				  struct tipc_subscriber *subscriber, int swap,
+				  bool status)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_subscription *sub = NULL;
 	u32 timeout;
 
 	sub = tipc_subscrp_create(net, s, swap);
 	if (!sub)
-		return tipc_conn_terminate(tn->topsrv, subscriber->conid);
+		return -1;
 
 	spin_lock_bh(&subscriber->lock);
 	list_add(&sub->subscrp_list, &subscriber->subscrp_list);
 	sub->subscriber = subscriber;
-	tipc_nametbl_subscribe(sub);
+	tipc_nametbl_subscribe(sub, status);
 	tipc_subscrb_get(subscriber);
 	spin_unlock_bh(&subscriber->lock);
 
@@ -308,6 +312,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 
 	if (timeout != TIPC_WAIT_FOREVER)
 		mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+	return 0;
 }
 
 /* Handle one termination request for the subscriber */
@@ -317,12 +322,13 @@ static void tipc_subscrb_release_cb(int conid, void *usr_data)
 }
 
 /* Handle one request to create a new subscription for the subscriber */
-static void tipc_subscrb_rcv_cb(struct net *net, int conid,
-				struct sockaddr_tipc *addr, void *usr_data,
-				void *buf, size_t len)
+static int tipc_subscrb_rcv_cb(struct net *net, int conid,
+			       struct sockaddr_tipc *addr, void *usr_data,
+			       void *buf, size_t len)
 {
 	struct tipc_subscriber *subscriber = usr_data;
 	struct tipc_subscr *s = (struct tipc_subscr *)buf;
+	bool status;
 	int swap;
 
 	/* Determine subscriber's endianness */
@@ -332,10 +338,11 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
 	/* Detect & process a subscription cancellation request */
 	if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
 		s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
-		return tipc_subscrp_cancel(s, subscriber);
+		tipc_subscrp_cancel(s, subscriber);
+		return 0;
 	}
-
-	tipc_subscrp_subscribe(net, s, subscriber, swap);
+	status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap));
+	return tipc_subscrp_subscribe(net, s, subscriber, swap, status);
 }
 
 /* Handle one request to establish a new subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ee52957dc952..f3edca775d9f 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -71,7 +71,7 @@ int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
 			       u32 found_upper);
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
 				 u32 found_lower, u32 found_upper, u32 event,
-				 u32 port_ref, u32 node, int must);
+				 u32 port_ref, u32 node, u32 scope, int must);
 void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
 			      struct tipc_name_seq *out);
 u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 0a9b72fbd761..f26376e954ae 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -214,7 +214,11 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
 	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
 	aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
 			       data_len, tls_ctx->iv);
-	rc = crypto_aead_encrypt(aead_req);
+
+	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  crypto_req_done, &ctx->async_wait);
+
+	rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
 
 	ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
 	ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
@@ -665,6 +669,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
 		goto out;
 	}
 
+	crypto_init_wait(&sw_ctx->async_wait);
+
 	ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
 
 	crypto_info = &ctx->crypto_send;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6b7678df41e5..0214acbd6bff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2870,7 +2870,6 @@ static int unix_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations unix_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= unix_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 413d4f4e6334..a1d10993d08a 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -126,6 +126,11 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 	wdev->ibss_fixed = params->channel_fixed;
 	wdev->ibss_dfs_possible = params->userspace_handles_dfs;
 	wdev->chandef = params->chandef;
+	if (connkeys) {
+		params->wep_keys = connkeys->params;
+		params->wep_tx_key = connkeys->def;
+	}
+
 #ifdef CONFIG_CFG80211_WEXT
 	wdev->wext.ibss.chandef = params->chandef;
 #endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index e7c64a8dce54..bbb9907bfa86 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -692,7 +692,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
 	return rdev_mgmt_tx(rdev, wdev, params, cookie);
 }
 
-bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
 		      const u8 *buf, size_t len, u32 flags)
 {
 	struct wiphy *wiphy = wdev->wiphy;
@@ -708,7 +708,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
 		cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
 	u16 stype;
 
-	trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm);
+	trace_cfg80211_rx_mgmt(wdev, freq, sig_dbm);
 	stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
 
 	if (!(stypes->rx & BIT(stype))) {
@@ -735,7 +735,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
 
 		/* Indicate the received Action frame to user space */
 		if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
-				      freq, sig_mbm,
+				      freq, sig_dbm,
 				      buf, len, flags, GFP_ATOMIC))
 			continue;
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 542a4fc0a8d7..ab0c687d0c44 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -734,11 +734,12 @@ struct key_parse {
 	bool def_uni, def_multi;
 };
 
-static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
+static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
+				 struct key_parse *k)
 {
 	struct nlattr *tb[NL80211_KEY_MAX + 1];
 	int err = nla_parse_nested(tb, NL80211_KEY_MAX, key,
-				   nl80211_key_policy, NULL);
+				   nl80211_key_policy, info->extack);
 	if (err)
 		return err;
 
@@ -771,7 +772,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
 	if (tb[NL80211_KEY_TYPE]) {
 		k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
 		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
-			return -EINVAL;
+			return genl_err_attr(info, -EINVAL,
+					     tb[NL80211_KEY_TYPE]);
 	}
 
 	if (tb[NL80211_KEY_DEFAULT_TYPES]) {
@@ -779,7 +781,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
 
 		err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
 				       tb[NL80211_KEY_DEFAULT_TYPES],
-				       nl80211_key_default_policy, NULL);
+				       nl80211_key_default_policy,
+				       info->extack);
 		if (err)
 			return err;
 
@@ -820,8 +823,10 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
 
 	if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
 		k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
-		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
+		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
+			GENL_SET_ERR_MSG(info, "key type out of range");
 			return -EINVAL;
+		}
 	}
 
 	if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
@@ -850,31 +855,42 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
 	k->type = -1;
 
 	if (info->attrs[NL80211_ATTR_KEY])
-		err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k);
+		err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k);
 	else
 		err = nl80211_parse_key_old(info, k);
 
 	if (err)
 		return err;
 
-	if (k->def && k->defmgmt)
+	if (k->def && k->defmgmt) {
+		GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid");
 		return -EINVAL;
+	}
 
 	if (k->defmgmt) {
-		if (k->def_uni || !k->def_multi)
+		if (k->def_uni || !k->def_multi) {
+			GENL_SET_ERR_MSG(info, "defmgmt key must be mcast");
 			return -EINVAL;
+		}
 	}
 
 	if (k->idx != -1) {
 		if (k->defmgmt) {
-			if (k->idx < 4 || k->idx > 5)
+			if (k->idx < 4 || k->idx > 5) {
+				GENL_SET_ERR_MSG(info,
+						 "defmgmt key idx not 4 or 5");
 				return -EINVAL;
+			}
 		} else if (k->def) {
-			if (k->idx < 0 || k->idx > 3)
+			if (k->idx < 0 || k->idx > 3) {
+				GENL_SET_ERR_MSG(info, "def key idx not 0-3");
 				return -EINVAL;
+			}
 		} else {
-			if (k->idx < 0 || k->idx > 5)
+			if (k->idx < 0 || k->idx > 5) {
+				GENL_SET_ERR_MSG(info, "key idx not 0-5");
 				return -EINVAL;
+			}
 		}
 	}
 
@@ -883,8 +899,9 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
 
 static struct cfg80211_cached_keys *
 nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
-		       struct nlattr *keys, bool *no_ht)
+		       struct genl_info *info, bool *no_ht)
 {
+	struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS];
 	struct key_parse parse;
 	struct nlattr *key;
 	struct cfg80211_cached_keys *result;
@@ -909,17 +926,22 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 		memset(&parse, 0, sizeof(parse));
 		parse.idx = -1;
 
-		err = nl80211_parse_key_new(key, &parse);
+		err = nl80211_parse_key_new(info, key, &parse);
 		if (err)
 			goto error;
 		err = -EINVAL;
 		if (!parse.p.key)
 			goto error;
-		if (parse.idx < 0 || parse.idx > 3)
+		if (parse.idx < 0 || parse.idx > 3) {
+			GENL_SET_ERR_MSG(info, "key index out of range [0-3]");
 			goto error;
+		}
 		if (parse.def) {
-			if (def)
+			if (def) {
+				GENL_SET_ERR_MSG(info,
+						 "only one key can be default");
 				goto error;
+			}
 			def = 1;
 			result->def = parse.idx;
 			if (!parse.def_uni || !parse.def_multi)
@@ -932,6 +954,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 			goto error;
 		if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
 		    parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+			GENL_SET_ERR_MSG(info, "connect key must be WEP");
 			err = -EINVAL;
 			goto error;
 		}
@@ -947,6 +970,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 
 	if (result->def < 0) {
 		err = -EINVAL;
+		GENL_SET_ERR_MSG(info, "need a default/TX key");
 		goto error;
 	}
 
@@ -7820,6 +7844,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 			      intbss->ts_boottime, NL80211_BSS_PAD))
 		goto nla_put_failure;
 
+	if (!nl80211_put_signal(msg, intbss->pub.chains,
+				intbss->pub.chain_signal,
+				NL80211_BSS_CHAIN_SIGNAL))
+		goto nla_put_failure;
+
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
 		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8616,9 +8645,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
 	if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
 		bool no_ht = false;
 
-		connkeys = nl80211_parse_connkeys(rdev,
-					  info->attrs[NL80211_ATTR_KEYS],
-					  &no_ht);
+		connkeys = nl80211_parse_connkeys(rdev, info, &no_ht);
 		if (IS_ERR(connkeys))
 			return PTR_ERR(connkeys);
 
@@ -9022,8 +9049,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 	}
 
 	if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
-		connkeys = nl80211_parse_connkeys(rdev,
-					  info->attrs[NL80211_ATTR_KEYS], NULL);
+		connkeys = nl80211_parse_connkeys(rdev, info, NULL);
 		if (IS_ERR(connkeys))
 			return PTR_ERR(connkeys);
 	}
@@ -13948,7 +13974,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    (from_ap && reason &&
+	    (reason &&
 	     nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
 	    (from_ap &&
 	     nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index f6c5fe482506..d36c3eb7b931 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -981,6 +981,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
 		found->ts = tmp->ts;
 		found->ts_boottime = tmp->ts_boottime;
 		found->parent_tsf = tmp->parent_tsf;
+		found->pub.chains = tmp->pub.chains;
+		memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
+		       IEEE80211_MAX_CHAINS);
 		ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
 	} else {
 		struct cfg80211_internal_bss *new;
@@ -1233,6 +1236,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
 	tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 	tmp.ts_boottime = data->boottime_ns;
 	tmp.parent_tsf = data->parent_tsf;
+	tmp.pub.chains = data->chains;
+	memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
 	ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
 
 	signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index f3353fe5b35b..bcfedd39e7a3 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2544,20 +2544,20 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
 );
 
 TRACE_EVENT(cfg80211_rx_mgmt,
-	TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm),
-	TP_ARGS(wdev, freq, sig_mbm),
+	TP_PROTO(struct wireless_dev *wdev, int freq, int sig_dbm),
+	TP_ARGS(wdev, freq, sig_dbm),
 	TP_STRUCT__entry(
 		WDEV_ENTRY
 		__field(int, freq)
-		__field(int, sig_mbm)
+		__field(int, sig_dbm)
 	),
 	TP_fast_assign(
 		WDEV_ASSIGN;
 		__entry->freq = freq;
-		__entry->sig_mbm = sig_mbm;
+		__entry->sig_dbm = sig_dbm;
 	),
-	TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d",
-		  WDEV_PR_ARG, __entry->freq, __entry->sig_mbm)
+	TP_printk(WDEV_PR_FMT ", freq: %d, sig dbm: %d",
+		  WDEV_PR_ARG, __entry->freq, __entry->sig_dbm)
 );
 
 TRACE_EVENT(cfg80211_mgmt_tx_status,
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6cdb054484d6..9efbfc753347 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -1035,18 +1035,23 @@ static int ioctl_standard_call(struct net_device *	dev,
 }
 
 
-int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
-		      void __user *arg)
+int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 {
 	struct iw_request_info info = { .cmd = cmd, .flags = 0 };
+	struct iwreq iwr;
 	int ret;
 
-	ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
+	if (copy_from_user(&iwr, arg, sizeof(iwr)))
+		return -EFAULT;
+
+	iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
+
+	ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
 				  ioctl_standard_call,
 				  ioctl_private_call);
 	if (ret >= 0 &&
 	    IW_IS_GET(cmd) &&
-	    copy_to_user(arg, iwr, sizeof(struct iwreq)))
+	    copy_to_user(arg, &iwr, sizeof(struct iwreq)))
 		return -EFAULT;
 
 	return ret;
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index e98a01c1034f..5511f989ef47 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -133,7 +133,6 @@ static int seq_open_wireless(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations wireless_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = seq_open_wireless,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index ac9477189d1c..8e70291e586a 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -23,32 +23,114 @@
 #include <linux/notifier.h>
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 {
 	int err;
+	unsigned long flags;
 	struct xfrm_state *x;
+	struct sk_buff *skb2;
+	struct softnet_data *sd;
+	netdev_features_t esp_features = features;
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
-	if (skb_is_gso(skb))
-		return 0;
+	if (!xo)
+		return skb;
 
-	if (xo) {
-		x = skb->sp->xvec[skb->sp->len - 1];
-		if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
-			return 0;
+	if (!(features & NETIF_F_HW_ESP))
+		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+	x = skb->sp->xvec[skb->sp->len - 1];
+	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+		return skb;
+
+	local_irq_save(flags);
+	sd = this_cpu_ptr(&softnet_data);
+	err = !skb_queue_empty(&sd->xfrm_backlog);
+	local_irq_restore(flags);
+
+	if (err) {
+		*again = true;
+		return skb;
+	}
+
+	if (skb_is_gso(skb)) {
+		struct net_device *dev = skb->dev;
+
+		if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+			struct sk_buff *segs;
+
+			/* Packet got rerouted, fixup features and segment it. */
+			esp_features = esp_features & ~(NETIF_F_HW_ESP
+							| NETIF_F_GSO_ESP);
+
+			segs = skb_gso_segment(skb, esp_features);
+			if (IS_ERR(segs)) {
+				kfree_skb(skb);
+				atomic_long_inc(&dev->tx_dropped);
+				return NULL;
+			} else {
+				consume_skb(skb);
+				skb = segs;
+			}
+		}
+	}
 
+	if (!skb->next) {
 		x->outer_mode->xmit(x, skb);
 
-		err = x->type_offload->xmit(x, skb, features);
+		xo->flags |= XFRM_DEV_RESUME;
+
+		err = x->type_offload->xmit(x, skb, esp_features);
 		if (err) {
+			if (err == -EINPROGRESS)
+				return NULL;
+
 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
-			return err;
+			kfree_skb(skb);
+			return NULL;
 		}
 
 		skb_push(skb, skb->data - skb_mac_header(skb));
+
+		return skb;
 	}
 
-	return 0;
+	skb2 = skb;
+
+	do {
+		struct sk_buff *nskb = skb2->next;
+		skb2->next = NULL;
+
+		xo = xfrm_offload(skb2);
+		xo->flags |= XFRM_DEV_RESUME;
+
+		x->outer_mode->xmit(x, skb2);
+
+		err = x->type_offload->xmit(x, skb2, esp_features);
+		if (!err) {
+			skb2->next = nskb;
+		} else if (err != -EINPROGRESS) {
+			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+			skb2->next = nskb;
+			kfree_skb_list(skb2);
+			return NULL;
+		} else {
+			if (skb == skb2)
+				skb = nskb;
+
+			if (!skb)
+				return NULL;
+
+			goto skip_push;
+		}
+
+		skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+		skb2 = nskb;
+	} while (skb2);
+
+	return skb;
 }
 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
 
@@ -65,9 +147,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 	if (!x->type_offload)
 		return -EINVAL;
 
-	/* We don't yet support UDP encapsulation, TFC padding and ESN. */
-	if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
-		return 0;
+	/* We don't yet support UDP encapsulation and TFC padding. */
+	if (x->encap || x->tfcpad)
+		return -EINVAL;
 
 	dev = dev_get_by_index(net, xuo->ifindex);
 	if (!dev) {
@@ -96,6 +178,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 		return 0;
 	}
 
+	if (x->props.flags & XFRM_STATE_ESN &&
+	    !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+		xso->dev = NULL;
+		dev_put(dev);
+		return -EINVAL;
+	}
+
 	xso->dev = dev;
 	xso->num_exthdrs = 1;
 	xso->flags = xuo->flags;
@@ -121,8 +210,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 	if (!x->type_offload || x->encap)
 		return false;
 
-	if ((x->xso.offload_handle && (dev == dst->path->dev)) &&
-	     !dst->child->xfrm && x->type->get_mtu) {
+	if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+	     (!xdst->child->xfrm && x->type->get_mtu)) {
 		mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
 
 		if (skb->len <= mtu)
@@ -141,19 +230,82 @@ ok:
 	return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	int ret = NETDEV_TX_BUSY;
+	struct netdev_queue *txq;
+	struct softnet_data *sd;
+	unsigned long flags;
+
+	rcu_read_lock();
+	txq = netdev_pick_tx(dev, skb, NULL);
+
+	HARD_TX_LOCK(dev, txq, smp_processor_id());
+	if (!netif_xmit_frozen_or_stopped(txq))
+		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+	HARD_TX_UNLOCK(dev, txq);
+
+	if (!dev_xmit_complete(ret)) {
+		local_irq_save(flags);
+		sd = this_cpu_ptr(&softnet_data);
+		skb_queue_tail(&sd->xfrm_backlog, skb);
+		raise_softirq_irqoff(NET_TX_SOFTIRQ);
+		local_irq_restore(flags);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+	struct sk_buff_head list;
+	struct sk_buff *skb;
+
+	if (skb_queue_empty(xfrm_backlog))
+		return;
+
+	__skb_queue_head_init(&list);
+
+	spin_lock(&xfrm_backlog->lock);
+	skb_queue_splice_init(xfrm_backlog, &list);
+	spin_unlock(&xfrm_backlog->lock);
+
+	while (!skb_queue_empty(&list)) {
+		skb = __skb_dequeue(&list);
+		xfrm_dev_resume(skb);
+	}
+
+}
 #endif
 
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
 {
-	if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-		return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
 	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
 	    !(dev->features & NETIF_F_HW_ESP))
 		return NOTIFY_BAD;
 
+	if ((dev->features & NETIF_F_HW_ESP) &&
+	    (!(dev->xfrmdev_ops &&
+	       dev->xfrmdev_ops->xdo_dev_state_add &&
+	       dev->xfrmdev_ops->xdo_dev_state_delete)))
+		return NOTIFY_BAD;
+#else
+	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+		return NOTIFY_BAD;
+#endif
+
 	return NOTIFY_DONE;
 }
 
+static int xfrm_dev_register(struct net_device *dev)
+{
+	return xfrm_api_check(dev);
+}
+
 static int xfrm_dev_unregister(struct net_device *dev)
 {
 	xfrm_policy_cache_flush();
@@ -162,16 +314,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
 
 static int xfrm_dev_feat_change(struct net_device *dev)
 {
-	if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-		return NOTIFY_BAD;
-	else if (!(dev->features & NETIF_F_HW_ESP))
-		dev->xfrmdev_ops = NULL;
-
-	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
-	    !(dev->features & NETIF_F_HW_ESP))
-		return NOTIFY_BAD;
-
-	return NOTIFY_DONE;
+	return xfrm_api_check(dev);
 }
 
 static int xfrm_dev_down(struct net_device *dev)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 5b2409746ae0..1472c0857975 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -257,7 +257,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
 		if (xo && (xo->flags & CRYPTO_DONE)) {
 			crypto_done = true;
-			x = xfrm_input_state(skb);
 			family = XFRM_SPI_SKB_CB(skb)->family;
 
 			if (!(xo->status & CRYPTO_SUCCESS)) {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 73ad8c8ef344..23468672a767 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -44,7 +44,7 @@ static int xfrm_skb_check_space(struct sk_buff *skb)
 
 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
 {
-	struct dst_entry *child = dst_clone(skb_dst(skb)->child);
+	struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
 
 	skb_dst_drop(skb);
 	return child;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index bd6b0e7a0ee4..7a23078132cf 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -54,7 +54,7 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
 static __read_mostly seqcount_t xfrm_policy_hash_generation;
 
-static void xfrm_init_pmtu(struct dst_entry *dst);
+static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 static void xfrm_policy_queue_process(struct timer_list *t);
@@ -1256,7 +1256,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
 
 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
 {
-	struct net *net = xp_net(pol);
+	struct net *net = sock_net(sk);
 	struct xfrm_policy *old_pol;
 
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1543,7 +1543,9 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  */
 
 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
-					    struct xfrm_state **xfrm, int nx,
+					    struct xfrm_state **xfrm,
+					    struct xfrm_dst **bundle,
+					    int nx,
 					    const struct flowi *fl,
 					    struct dst_entry *dst)
 {
@@ -1551,8 +1553,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 	unsigned long now = jiffies;
 	struct net_device *dev;
 	struct xfrm_mode *inner_mode;
-	struct dst_entry *dst_prev = NULL;
-	struct dst_entry *dst0 = NULL;
+	struct xfrm_dst *xdst_prev = NULL;
+	struct xfrm_dst *xdst0 = NULL;
 	int i = 0;
 	int err;
 	int header_len = 0;
@@ -1578,13 +1580,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 			goto put_states;
 		}
 
-		if (!dst_prev)
-			dst0 = dst1;
+		bundle[i] = xdst;
+		if (!xdst_prev)
+			xdst0 = xdst;
 		else
 			/* Ref count is taken during xfrm_alloc_dst()
 			 * No need to do dst_clone() on dst1
 			 */
-			dst_prev->child = dst1;
+			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
 
 		if (xfrm[i]->sel.family == AF_UNSPEC) {
 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
@@ -1621,8 +1624,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 		dst1->input = dst_discard;
 		dst1->output = inner_mode->afinfo->output;
 
-		dst1->next = dst_prev;
-		dst_prev = dst1;
+		xdst_prev = xdst;
 
 		header_len += xfrm[i]->props.header_len;
 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
@@ -1630,40 +1632,39 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 		trailer_len += xfrm[i]->props.trailer_len;
 	}
 
-	dst_prev->child = dst;
-	dst0->path = dst;
+	xfrm_dst_set_child(xdst_prev, dst);
+	xdst0->path = dst;
 
 	err = -ENODEV;
 	dev = dst->dev;
 	if (!dev)
 		goto free_dst;
 
-	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
-	xfrm_init_pmtu(dst_prev);
-
-	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
-		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
+	xfrm_init_path(xdst0, dst, nfheader_len);
+	xfrm_init_pmtu(bundle, nx);
 
-		err = xfrm_fill_dst(xdst, dev, fl);
+	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
+	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
+		err = xfrm_fill_dst(xdst_prev, dev, fl);
 		if (err)
 			goto free_dst;
 
-		dst_prev->header_len = header_len;
-		dst_prev->trailer_len = trailer_len;
-		header_len -= xdst->u.dst.xfrm->props.header_len;
-		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
+		xdst_prev->u.dst.header_len = header_len;
+		xdst_prev->u.dst.trailer_len = trailer_len;
+		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
+		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
 	}
 
 out:
-	return dst0;
+	return &xdst0->u.dst;
 
 put_states:
 	for (; i < nx; i++)
 		xfrm_state_put(xfrm[i]);
 free_dst:
-	if (dst0)
-		dst_release_immediate(dst0);
-	dst0 = ERR_PTR(err);
+	if (xdst0)
+		dst_release_immediate(&xdst0->u.dst);
+	xdst0 = ERR_PTR(err);
 	goto out;
 }
 
@@ -1807,7 +1808,7 @@ static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
 	for (i = 0; i < num; i++) {
 		if (!dst || dst->xfrm != xfrm[i])
 			return false;
-		dst = dst->child;
+		dst = xfrm_dst_child(dst);
 	}
 
 	return xfrm_bundle_ok(xdst);
@@ -1820,6 +1821,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 {
 	struct net *net = xp_net(pols[0]);
 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
+	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
 	struct xfrm_dst *xdst, *old;
 	struct dst_entry *dst;
 	int err;
@@ -1848,7 +1850,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
 	old = xdst;
 
-	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
 	if (IS_ERR(dst)) {
 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
 		return ERR_CAST(dst);
@@ -1888,8 +1890,8 @@ static void xfrm_policy_queue_process(struct timer_list *t)
 	xfrm_decode_session(skb, &fl, dst->ops->family);
 	spin_unlock(&pq->hold_queue.lock);
 
-	dst_hold(dst->path);
-	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
+	dst_hold(xfrm_dst_path(dst));
+	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0);
 	if (IS_ERR(dst))
 		goto purge_queue;
 
@@ -1918,8 +1920,8 @@ static void xfrm_policy_queue_process(struct timer_list *t)
 		skb = __skb_dequeue(&list);
 
 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
-		dst_hold(skb_dst(skb)->path);
-		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
+		dst_hold(xfrm_dst_path(skb_dst(skb)));
+		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
 		if (IS_ERR(dst)) {
 			kfree_skb(skb);
 			continue;
@@ -2020,8 +2022,8 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
 	dst1->output = xdst_queue_output;
 
 	dst_hold(dst);
-	dst1->child = dst;
-	dst1->path = dst;
+	xfrm_dst_set_child(xdst, dst);
+	xdst->path = dst;
 
 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
 
@@ -2590,7 +2592,7 @@ static int stale_bundle(struct dst_entry *dst)
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
 {
-	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
+	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
 		dst->dev = dev_net(dev)->loopback_dev;
 		dev_hold(dst->dev);
 		dev_put(dev);
@@ -2614,13 +2616,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
 	return dst;
 }
 
-static void xfrm_init_pmtu(struct dst_entry *dst)
+static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
 {
-	do {
-		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+	while (nr--) {
+		struct xfrm_dst *xdst = bundle[nr];
 		u32 pmtu, route_mtu_cached;
+		struct dst_entry *dst;
 
-		pmtu = dst_mtu(dst->child);
+		dst = &xdst->u.dst;
+		pmtu = dst_mtu(xfrm_dst_child(dst));
 		xdst->child_mtu_cached = pmtu;
 
 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
@@ -2632,7 +2636,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
 			pmtu = route_mtu_cached;
 
 		dst_metric_set(dst, RTAX_MTU, pmtu);
-	} while ((dst = dst->next));
+	}
 }
 
 /* Check that the bundle accepts the flow and its components are
@@ -2641,19 +2645,20 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
 
 static int xfrm_bundle_ok(struct xfrm_dst *first)
 {
+	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
 	struct dst_entry *dst = &first->u.dst;
-	struct xfrm_dst *last;
+	struct xfrm_dst *xdst;
+	int start_from, nr;
 	u32 mtu;
 
-	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
+	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
 	    (dst->dev && !netif_running(dst->dev)))
 		return 0;
 
 	if (dst->flags & DST_XFRM_QUEUE)
 		return 1;
 
-	last = NULL;
-
+	start_from = nr = 0;
 	do {
 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
@@ -2665,9 +2670,11 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
 			return 0;
 
-		mtu = dst_mtu(dst->child);
+		bundle[nr++] = xdst;
+
+		mtu = dst_mtu(xfrm_dst_child(dst));
 		if (xdst->child_mtu_cached != mtu) {
-			last = xdst;
+			start_from = nr;
 			xdst->child_mtu_cached = mtu;
 		}
 
@@ -2675,30 +2682,30 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
 			return 0;
 		mtu = dst_mtu(xdst->route);
 		if (xdst->route_mtu_cached != mtu) {
-			last = xdst;
+			start_from = nr;
 			xdst->route_mtu_cached = mtu;
 		}
 
-		dst = dst->child;
+		dst = xfrm_dst_child(dst);
 	} while (dst->xfrm);
 
-	if (likely(!last))
+	if (likely(!start_from))
 		return 1;
 
-	mtu = last->child_mtu_cached;
-	for (;;) {
-		dst = &last->u.dst;
+	xdst = bundle[start_from - 1];
+	mtu = xdst->child_mtu_cached;
+	while (start_from--) {
+		dst = &xdst->u.dst;
 
 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
-		if (mtu > last->route_mtu_cached)
-			mtu = last->route_mtu_cached;
+		if (mtu > xdst->route_mtu_cached)
+			mtu = xdst->route_mtu_cached;
 		dst_metric_set(dst, RTAX_MTU, mtu);
-
-		if (last == first)
+		if (!start_from)
 			break;
 
-		last = (struct xfrm_dst *)last->u.dst.next;
-		last->child_mtu_cached = mtu;
+		xdst = bundle[start_from - 1];
+		xdst->child_mtu_cached = mtu;
 	}
 
 	return 1;
@@ -2706,22 +2713,20 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
 
 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
 {
-	return dst_metric_advmss(dst->path);
+	return dst_metric_advmss(xfrm_dst_path(dst));
 }
 
 static unsigned int xfrm_mtu(const struct dst_entry *dst)
 {
 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
 
-	return mtu ? : dst_mtu(dst->path);
+	return mtu ? : dst_mtu(xfrm_dst_path(dst));
 }
 
 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
 					const void *daddr)
 {
-	const struct dst_entry *path = dst->path;
-
-	for (; dst != path; dst = dst->child) {
+	while (dst->xfrm) {
 		const struct xfrm_state *xfrm = dst->xfrm;
 
 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
@@ -2730,6 +2735,8 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
 			daddr = xfrm->coaddr;
 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
 			daddr = &xfrm->id.daddr;
+
+		dst = xfrm_dst_child(dst);
 	}
 	return daddr;
 }
@@ -2738,7 +2745,7 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
 					   struct sk_buff *skb,
 					   const void *daddr)
 {
-	const struct dst_entry *path = dst->path;
+	const struct dst_entry *path = xfrm_dst_path(dst);
 
 	if (!skb)
 		daddr = xfrm_get_dst_nexthop(dst, daddr);
@@ -2747,7 +2754,7 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
 
 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
 {
-	const struct dst_entry *path = dst->path;
+	const struct dst_entry *path = xfrm_dst_path(dst);
 
 	daddr = xfrm_get_dst_nexthop(dst, daddr);
 	path->ops->confirm_neigh(path, daddr);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index ba2b539879bc..6d5f85f4e672 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -71,7 +71,6 @@ static int xfrm_statistics_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations xfrm_statistics_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = xfrm_statistics_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 8b23c5bcf8e8..1d38c6acf8af 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -551,6 +551,8 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
 			bitnr = replay_esn->replay_window - (diff - pos);
 	}
 
+	xfrm_dev_state_advance_esn(x);
+
 	nr = bitnr >> 5;
 	bitnr = bitnr & 0x1F;
 	replay_esn->bmp[nr] |= (1U << bitnr);
@@ -666,7 +668,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
 		if (unlikely(oseq < replay_esn->oseq)) {
 			XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
 			xo->seq.hi = oseq_hi;
-
+			replay_esn->oseq_hi = oseq_hi;
 			if (replay_esn->oseq_hi == 0) {
 				replay_esn->oseq--;
 				replay_esn->oseq_hi--;
@@ -678,7 +680,6 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
 		}
 
 		replay_esn->oseq = oseq;
-		replay_esn->oseq_hi = oseq_hi;
 
 		if (xfrm_aevent_is_on(net))
 			x->repl->notify(x, XFRM_REPLAY_UPDATE);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a3785f538018..54e21f19d722 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2056,6 +2056,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
 	struct xfrm_mgr *km;
 	struct xfrm_policy *pol = NULL;
 
+	if (!optval && !optlen) {
+		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+		__sk_dst_reset(sk);
+		return 0;
+	}
+
 	if (optlen <= 0 || optlen > PAGE_SIZE)
 		return -EMSGSIZE;
 
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index adeaa1302f34..64335bb94f9f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -12,6 +12,7 @@ hostprogs-y += tracex3
 hostprogs-y += tracex4
 hostprogs-y += tracex5
 hostprogs-y += tracex6
+hostprogs-y += tracex7
 hostprogs-y += test_probe_write_user
 hostprogs-y += trace_output
 hostprogs-y += lathist
@@ -40,6 +41,7 @@ hostprogs-y += xdp_redirect
 hostprogs-y += xdp_redirect_map
 hostprogs-y += xdp_redirect_cpu
 hostprogs-y += xdp_monitor
+hostprogs-y += xdp_rxq_info
 hostprogs-y += syscall_tp
 
 # Libbpf dependencies
@@ -58,6 +60,7 @@ tracex3-objs := bpf_load.o $(LIBBPF) tracex3_user.o
 tracex4-objs := bpf_load.o $(LIBBPF) tracex4_user.o
 tracex5-objs := bpf_load.o $(LIBBPF) tracex5_user.o
 tracex6-objs := bpf_load.o $(LIBBPF) tracex6_user.o
+tracex7-objs := bpf_load.o $(LIBBPF) tracex7_user.o
 load_sock_ops-objs := bpf_load.o $(LIBBPF) load_sock_ops.o
 test_probe_write_user-objs := bpf_load.o $(LIBBPF) test_probe_write_user_user.o
 trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o
@@ -88,6 +91,7 @@ xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o
 xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o
 xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
+xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
 syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
 
 # Tell kbuild to always build the programs
@@ -101,6 +105,7 @@ always += tracex3_kern.o
 always += tracex4_kern.o
 always += tracex5_kern.o
 always += tracex6_kern.o
+always += tracex7_kern.o
 always += sock_flags_kern.o
 always += test_probe_write_user_kern.o
 always += trace_output_kern.o
@@ -136,6 +141,8 @@ always += xdp_redirect_kern.o
 always += xdp_redirect_map_kern.o
 always += xdp_redirect_cpu_kern.o
 always += xdp_monitor_kern.o
+always += xdp_rxq_info_kern.o
+always += xdp2skb_meta_kern.o
 always += syscall_tp_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
@@ -155,6 +162,7 @@ HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
 HOSTLOADLIBES_tracex6 += -lelf
+HOSTLOADLIBES_tracex7 += -lelf
 HOSTLOADLIBES_test_cgrp2_sock2 += -lelf
 HOSTLOADLIBES_load_sock_ops += -lelf
 HOSTLOADLIBES_test_probe_write_user += -lelf
@@ -178,6 +186,7 @@ HOSTLOADLIBES_xdp_redirect += -lelf
 HOSTLOADLIBES_xdp_redirect_map += -lelf
 HOSTLOADLIBES_xdp_redirect_cpu += -lelf
 HOSTLOADLIBES_xdp_monitor += -lelf
+HOSTLOADLIBES_xdp_rxq_info += -lelf
 HOSTLOADLIBES_syscall_tp += -lelf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
@@ -192,13 +201,16 @@ CLANG_ARCH_ARGS = -target $(ARCH)
 endif
 
 # Trick to allow make to be run from this directory
-all:
+all: $(LIBBPF)
 	$(MAKE) -C ../../ $(CURDIR)/
 
 clean:
 	$(MAKE) -C ../../ M=$(CURDIR) clean
 	@rm -f *~
 
+$(LIBBPF): FORCE
+	$(MAKE) -C $(dir $@) $(notdir $@)
+
 $(obj)/syscall_nrs.s:	$(src)/syscall_nrs.c
 	$(call if_changed_dep,cc_s_c)
 
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 370b749f5ee6..f6bbf8f50da3 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -35,12 +35,22 @@ struct geneve_opt {
 	u8	opt_data[8]; /* hard-coded to 8 byte */
 };
 
+struct erspan_md2 {
+	__be32 timestamp;
+	__be16 sgt;
+	__be16 flags;
+};
+
 struct vxlan_metadata {
 	u32     gbp;
 };
 
 struct erspan_metadata {
-	__be32 index;
+	union {
+		__be32 index;
+		struct erspan_md2 md2;
+	} u;
+	int version;
 };
 
 SEC("gre_set_tunnel")
@@ -81,6 +91,49 @@ int _gre_get_tunnel(struct __sk_buff *skb)
 	return TC_ACT_OK;
 }
 
+SEC("ip6gretap_set_tunnel")
+int _ip6gretap_set_tunnel(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key key;
+	int ret;
+
+	__builtin_memset(&key, 0x0, sizeof(key));
+	key.remote_ipv6[3] = _htonl(0x11); /* ::11 */
+	key.tunnel_id = 2;
+	key.tunnel_tos = 0;
+	key.tunnel_ttl = 64;
+	key.tunnel_label = 0xabcde;
+
+	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+				     BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	return TC_ACT_OK;
+}
+
+SEC("ip6gretap_get_tunnel")
+int _ip6gretap_get_tunnel(struct __sk_buff *skb)
+{
+	char fmt[] = "key %d remote ip6 ::%x label %x\n";
+	struct bpf_tunnel_key key;
+	int ret;
+
+	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
+				     BPF_F_TUNINFO_IPV6);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	bpf_trace_printk(fmt, sizeof(fmt),
+			 key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+
+	return TC_ACT_OK;
+}
+
 SEC("erspan_set_tunnel")
 int _erspan_set_tunnel(struct __sk_buff *skb)
 {
@@ -100,7 +153,18 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
 		return TC_ACT_SHOT;
 	}
 
-	md.index = htonl(123);
+	__builtin_memset(&md, 0, sizeof(md));
+#ifdef ERSPAN_V1
+	md.version = 1;
+	md.u.index = htonl(123);
+#else
+	u8 direction = 1;
+	u16 hwid = 7;
+
+	md.version = 2;
+	md.u.md2.flags = htons((direction << 3) | (hwid << 4));
+#endif
+
 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
 	if (ret < 0) {
 		ERROR(ret);
@@ -113,7 +177,7 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
 SEC("erspan_get_tunnel")
 int _erspan_get_tunnel(struct __sk_buff *skb)
 {
-	char fmt[] = "key %d remote ip 0x%x erspan index 0x%x\n";
+	char fmt[] = "key %d remote ip 0x%x erspan version %d\n";
 	struct bpf_tunnel_key key;
 	struct erspan_metadata md;
 	u32 index;
@@ -131,9 +195,105 @@ int _erspan_get_tunnel(struct __sk_buff *skb)
 		return TC_ACT_SHOT;
 	}
 
-	index = bpf_ntohl(md.index);
 	bpf_trace_printk(fmt, sizeof(fmt),
-			key.tunnel_id, key.remote_ipv4, index);
+			key.tunnel_id, key.remote_ipv4, md.version);
+
+#ifdef ERSPAN_V1
+	char fmt2[] = "\tindex %x\n";
+
+	index = bpf_ntohl(md.u.index);
+	bpf_trace_printk(fmt2, sizeof(fmt2), index);
+#else
+	char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
+
+	bpf_trace_printk(fmt2, sizeof(fmt2),
+		(ntohs(md.u.md2.flags) >> 3) & 0x1,
+		(ntohs(md.u.md2.flags) >> 4) & 0x3f,
+		bpf_ntohl(md.u.md2.timestamp));
+#endif
+
+	return TC_ACT_OK;
+}
+
+SEC("ip4ip6erspan_set_tunnel")
+int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key key;
+	struct erspan_metadata md;
+	int ret;
+
+	__builtin_memset(&key, 0x0, sizeof(key));
+	key.remote_ipv6[3] = _htonl(0x11);
+	key.tunnel_id = 2;
+	key.tunnel_tos = 0;
+	key.tunnel_ttl = 64;
+
+	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+				     BPF_F_TUNINFO_IPV6);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	__builtin_memset(&md, 0, sizeof(md));
+
+#ifdef ERSPAN_V1
+	md.u.index = htonl(123);
+	md.version = 1;
+#else
+	u8 direction = 0;
+	u16 hwid = 17;
+
+	md.version = 2;
+	md.u.md2.flags = htons((direction << 3) | (hwid << 4));
+#endif
+
+	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	return TC_ACT_OK;
+}
+
+SEC("ip4ip6erspan_get_tunnel")
+int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
+{
+	char fmt[] = "ip6erspan get key %d remote ip6 ::%x erspan version %d\n";
+	struct bpf_tunnel_key key;
+	struct erspan_metadata md;
+	u32 index;
+	int ret;
+
+	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	bpf_trace_printk(fmt, sizeof(fmt),
+			key.tunnel_id, key.remote_ipv4, md.version);
+
+#ifdef ERSPAN_V1
+	char fmt2[] = "\tindex %x\n";
+
+	index = bpf_ntohl(md.u.index);
+	bpf_trace_printk(fmt2, sizeof(fmt2), index);
+#else
+	char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
+
+	bpf_trace_printk(fmt2, sizeof(fmt2),
+		(ntohs(md.u.md2.flags) >> 3) & 0x1,
+		(ntohs(md.u.md2.flags) >> 4) & 0x3f,
+		bpf_ntohl(md.u.md2.timestamp));
+#endif
 
 	return TC_ACT_OK;
 }
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index 3e8232cc04a8..1af412ec6007 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -78,7 +78,8 @@ static int test_foo_bar(void)
 	if (join_cgroup(FOO))
 		goto err;
 
-	if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo");
 		goto err;
 	}
@@ -97,7 +98,8 @@ static int test_foo_bar(void)
 	printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
 	assert(system(PING_CMD) != 0);
 
-	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo/bar");
 		goto err;
 	}
@@ -114,7 +116,8 @@ static int test_foo_bar(void)
 	       "This ping in cgroup /foo/bar should fail...\n");
 	assert(system(PING_CMD) != 0);
 
-	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo/bar");
 		goto err;
 	}
@@ -128,7 +131,8 @@ static int test_foo_bar(void)
 	       "This ping in cgroup /foo/bar should pass...\n");
 	assert(system(PING_CMD) == 0);
 
-	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo/bar");
 		goto err;
 	}
@@ -161,13 +165,15 @@ static int test_foo_bar(void)
 		goto err;
 	}
 
-	if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			     BPF_F_ALLOW_OVERRIDE)) {
 		errno = 0;
 		log_err("Unexpected success attaching overridable prog to /foo/bar");
 		goto err;
 	}
 
-	if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
+			     BPF_F_ALLOW_OVERRIDE)) {
 		errno = 0;
 		log_err("Unexpected success attaching overridable prog to /foo");
 		goto err;
@@ -273,27 +279,33 @@ static int test_multiprog(void)
 	if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
 		goto err;
 
-	if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_MULTI)) {
 		log_err("Attaching prog to cg1");
 		goto err;
 	}
-	if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+			     BPF_F_ALLOW_MULTI)) {
 		log_err("Unexpected success attaching the same prog to cg1");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_MULTI)) {
 		log_err("Attaching prog2 to cg1");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to cg2");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_MULTI)) {
 		log_err("Attaching prog to cg3");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to cg4");
 		goto err;
 	}
diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
new file mode 100755
index 000000000000..e68b9ee6814b
--- /dev/null
+++ b/samples/bpf/test_override_return.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir tmpmnt
+./tracex7 $DEVICE
+if [ $? -eq 0 ]
+then
+	echo "SUCCESS!"
+else
+	echo "FAILED!"
+fi
+losetup -d $DEVICE
diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh
index 312e1722a39f..ae7f7c38309b 100755
--- a/samples/bpf/test_tunnel_bpf.sh
+++ b/samples/bpf/test_tunnel_bpf.sh
@@ -33,10 +33,43 @@ function add_gre_tunnel {
 	ip addr add dev $DEV 10.1.1.200/24
 }
 
-function add_erspan_tunnel {
+function add_ip6gretap_tunnel {
+
+	# assign ipv6 address
+	ip netns exec at_ns0 ip addr add ::11/96 dev veth0
+	ip netns exec at_ns0 ip link set dev veth0 up
+	ip addr add dev veth1 ::22/96
+	ip link set dev veth1 up
+
 	# in namespace
 	ip netns exec at_ns0 \
-		ip link add dev $DEV_NS type $TYPE seq key 2 local 172.16.1.100 remote 172.16.1.200 erspan 123
+		ip link add dev $DEV_NS type $TYPE flowlabel 0xbcdef key 2 \
+		local ::11 remote ::22
+
+	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+	ip netns exec at_ns0 ip addr add dev $DEV_NS fc80::100/96
+	ip netns exec at_ns0 ip link set dev $DEV_NS up
+
+	# out of namespace
+	ip link add dev $DEV type $TYPE external
+	ip addr add dev $DEV 10.1.1.200/24
+	ip addr add dev $DEV fc80::200/24
+	ip link set dev $DEV up
+}
+
+function add_erspan_tunnel {
+	# in namespace
+	if [ "$1" == "v1" ]; then
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 1 erspan 123
+	else
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 2 erspan_dir 1 erspan_hwid 3
+	fi
 	ip netns exec at_ns0 ip link set dev $DEV_NS up
 	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
 
@@ -46,6 +79,35 @@ function add_erspan_tunnel {
 	ip addr add dev $DEV 10.1.1.200/24
 }
 
+function add_ip6erspan_tunnel {
+
+	# assign ipv6 address
+	ip netns exec at_ns0 ip addr add ::11/96 dev veth0
+	ip netns exec at_ns0 ip link set dev veth0 up
+	ip addr add dev veth1 ::22/96
+	ip link set dev veth1 up
+
+	# in namespace
+	if [ "$1" == "v1" ]; then
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local ::11 remote ::22 \
+		erspan_ver 1 erspan 123
+	else
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local ::11 remote ::22 \
+		erspan_ver 2 erspan_dir 1 erspan_hwid 7
+	fi
+	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+	ip netns exec at_ns0 ip link set dev $DEV_NS up
+
+	# out of namespace
+	ip link add dev $DEV type $TYPE external
+	ip addr add dev $DEV 10.1.1.200/24
+	ip link set dev $DEV up
+}
+
 function add_vxlan_tunnel {
 	# Set static ARP entry here because iptables set-mark works
 	# on L3 packet, as a result not applying to ARP packets,
@@ -113,18 +175,65 @@ function test_gre {
 	cleanup
 }
 
+function test_ip6gre {
+	TYPE=ip6gre
+	DEV_NS=ip6gre00
+	DEV=ip6gre11
+	config_device
+	# reuse the ip6gretap function
+	add_ip6gretap_tunnel
+	attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
+	# underlay
+	ping6 -c 4 ::11
+	# overlay: ipv4 over ipv6
+	ip netns exec at_ns0 ping -c 1 10.1.1.200
+	ping -c 1 10.1.1.100
+	# overlay: ipv6 over ipv6
+	ip netns exec at_ns0 ping6 -c 1 fc80::200
+	cleanup
+}
+
+function test_ip6gretap {
+	TYPE=ip6gretap
+	DEV_NS=ip6gretap00
+	DEV=ip6gretap11
+	config_device
+	add_ip6gretap_tunnel
+	attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
+	# underlay
+	ping6 -c 4 ::11
+	# overlay: ipv4 over ipv6
+	ip netns exec at_ns0 ping -i .2 -c 1 10.1.1.200
+	ping -c 1 10.1.1.100
+	# overlay: ipv6 over ipv6
+	ip netns exec at_ns0 ping6 -c 1 fc80::200
+	cleanup
+}
+
 function test_erspan {
 	TYPE=erspan
 	DEV_NS=erspan00
 	DEV=erspan11
 	config_device
-	add_erspan_tunnel
+	add_erspan_tunnel $1
 	attach_bpf $DEV erspan_set_tunnel erspan_get_tunnel
 	ping -c 1 10.1.1.100
 	ip netns exec at_ns0 ping -c 1 10.1.1.200
 	cleanup
 }
 
+function test_ip6erspan {
+	TYPE=ip6erspan
+	DEV_NS=ip6erspan00
+	DEV=ip6erspan11
+	config_device
+	add_ip6erspan_tunnel $1
+	attach_bpf $DEV ip4ip6erspan_set_tunnel ip4ip6erspan_get_tunnel
+	ping6 -c 3 ::11
+	ip netns exec at_ns0 ping -c 1 10.1.1.200
+	cleanup
+}
+
 function test_vxlan {
 	TYPE=vxlan
 	DEV_NS=vxlan00
@@ -175,9 +284,12 @@ function cleanup {
 	ip link del veth1
 	ip link del ipip11
 	ip link del gretap11
+	ip link del ip6gre11
+	ip link del ip6gretap11
 	ip link del vxlan11
 	ip link del geneve11
 	ip link del erspan11
+	ip link del ip6erspan11
 	pkill tcpdump
 	pkill cat
 	set -ex
@@ -187,8 +299,16 @@ trap cleanup 0 2 3 6 9
 cleanup
 echo "Testing GRE tunnel..."
 test_gre
+echo "Testing IP6GRE tunnel..."
+test_ip6gre
+echo "Testing IP6GRETAP tunnel..."
+test_ip6gretap
 echo "Testing ERSPAN tunnel..."
-test_erspan
+test_erspan v1
+test_erspan v2
+echo "Testing IP6ERSPAN tunnel..."
+test_ip6erspan v1
+test_ip6erspan v2
 echo "Testing VXLAN tunnel..."
 test_vxlan
 echo "Testing GENEVE tunnel..."
diff --git a/samples/bpf/tracex7_kern.c b/samples/bpf/tracex7_kern.c
new file mode 100644
index 000000000000..1ab308a43e0f
--- /dev/null
+++ b/samples/bpf/tracex7_kern.c
@@ -0,0 +1,16 @@
+#include <uapi/linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+SEC("kprobe/open_ctree")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	unsigned long rc = -12;
+
+	bpf_override_return(ctx, rc);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
new file mode 100644
index 000000000000..8a52ac492e8b
--- /dev/null
+++ b/samples/bpf/tracex7_user.c
@@ -0,0 +1,28 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+int main(int argc, char **argv)
+{
+	FILE *f;
+	char filename[256];
+	char command[256];
+	int ret;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	snprintf(command, 256, "mount %s tmpmnt/", argv[1]);
+	f = popen(command, "r");
+	ret = pclose(f);
+
+	return ret ? 0 : 1;
+}
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
new file mode 100755
index 000000000000..b9c9549c4c27
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -0,0 +1,220 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+#
+# Bash-shell example on using iproute2 tools 'tc' and 'ip' to load
+# eBPF programs, both for XDP and clsbpf.  Shell script function
+# wrappers and even long options parsing is illustrated, for ease of
+# use.
+#
+# Related to sample/bpf/xdp2skb_meta_kern.c, which contains BPF-progs
+# that need to collaborate between XDP and TC hooks.  Thus, it is
+# convenient that the same tool load both programs that need to work
+# together.
+#
+BPF_FILE=xdp2skb_meta_kern.o
+DIR=$(dirname $0)
+
+export TC=/usr/sbin/tc
+export IP=/usr/sbin/ip
+
+function usage() {
+    echo ""
+    echo "Usage: $0 [-vfh] --dev ethX"
+    echo "  -d | --dev     :             Network device (required)"
+    echo "  --flush        :             Cleanup flush TC and XDP progs"
+    echo "  --list         : (\$LIST)     List TC and XDP progs"
+    echo "  -v | --verbose : (\$VERBOSE)  Verbose"
+    echo "  --dry-run      : (\$DRYRUN)   Dry-run only (echo commands)"
+    echo ""
+}
+
+## -- General shell logging cmds --
+function err() {
+    local exitcode=$1
+    shift
+    echo "ERROR: $@" >&2
+    exit $exitcode
+}
+
+function info() {
+    if [[ -n "$VERBOSE" ]]; then
+	echo "# $@"
+    fi
+}
+
+## -- Helper function calls --
+
+# Wrapper call for TC and IP
+# - Will display the offending command on failure
+function _call_cmd() {
+    local cmd="$1"
+    local allow_fail="$2"
+    shift 2
+    if [[ -n "$VERBOSE" ]]; then
+	echo "$(basename $cmd) $@"
+    fi
+    if [[ -n "$DRYRUN" ]]; then
+	return
+    fi
+    $cmd "$@"
+    local status=$?
+    if (( $status != 0 )); then
+	if [[ "$allow_fail" == "" ]]; then
+	    err 2 "Exec error($status) occurred cmd: \"$cmd $@\""
+	fi
+    fi
+}
+function call_tc() {
+    _call_cmd "$TC" "" "$@"
+}
+function call_tc_allow_fail() {
+    _call_cmd "$TC" "allow_fail" "$@"
+}
+function call_ip() {
+    _call_cmd "$IP" "" "$@"
+}
+
+##  --- Parse command line arguments / parameters ---
+# Using external program "getopt" to get --long-options
+OPTIONS=$(getopt -o vfhd: \
+    --long verbose,flush,help,list,dev:,dry-run -- "$@")
+if (( $? != 0 )); then
+    err 4 "Error calling getopt"
+fi
+eval set -- "$OPTIONS"
+
+unset DEV
+unset FLUSH
+while true; do
+    case "$1" in
+	-d | --dev ) # device
+	    DEV=$2
+	    info "Device set to: DEV=$DEV" >&2
+	    shift 2
+	    ;;
+	-v | --verbose)
+	    VERBOSE=yes
+	    # info "Verbose mode: VERBOSE=$VERBOSE" >&2
+	    shift
+	    ;;
+	--dry-run )
+	    DRYRUN=yes
+	    VERBOSE=yes
+	    info "Dry-run mode: enable VERBOSE and don't call TC+IP" >&2
+	    shift
+            ;;
+	-f | --flush )
+	    FLUSH=yes
+	    shift
+	    ;;
+	--list )
+	    LIST=yes
+	    shift
+	    ;;
+	-- )
+	    shift
+	    break
+	    ;;
+	-h | --help )
+	    usage;
+	    exit 0
+	    ;;
+	* )
+	    shift
+	    break
+	    ;;
+    esac
+done
+
+FILE="$DIR/$BPF_FILE"
+if [[ ! -e $FILE ]]; then
+    err 3 "Missing BPF object file ($FILE)"
+fi
+
+if [[ -z $DEV ]]; then
+    usage
+    err 2 "Please specify network device -- required option --dev"
+fi
+
+## -- Function calls --
+
+function list_tc()
+{
+    local device="$1"
+    shift
+    info "Listing current TC ingress rules"
+    call_tc filter show dev $device ingress
+}
+
+function list_xdp()
+{
+    local device="$1"
+    shift
+    info "Listing current XDP device($device) setting"
+    call_ip link show dev $device | grep --color=auto xdp
+}
+
+function flush_tc()
+{
+    local device="$1"
+    shift
+    info "Flush TC on device: $device"
+    call_tc_allow_fail filter del dev $device ingress
+    call_tc_allow_fail qdisc del dev $device clsact
+}
+
+function flush_xdp()
+{
+    local device="$1"
+    shift
+    info "Flush XDP on device: $device"
+    call_ip link set dev $device xdp off
+}
+
+function attach_tc_mark()
+{
+    local device="$1"
+    local file="$2"
+    local prog="tc_mark"
+    shift 2
+
+    # Re-attach clsact to clear/flush existing role
+    call_tc_allow_fail qdisc del dev $device clsact 2> /dev/null
+    call_tc            qdisc add dev $device clsact
+
+    # Attach BPF prog
+    call_tc filter add dev $device ingress \
+	    prio 1 handle 1 bpf da obj $file sec $prog
+}
+
+function attach_xdp_mark()
+{
+    local device="$1"
+    local file="$2"
+    local prog="xdp_mark"
+    shift 2
+
+    # Remove XDP prog in-case it's already loaded
+    # TODO: Need ip-link option to override/replace existing XDP prog
+    flush_xdp $device
+
+    # Attach XDP/BPF prog
+    call_ip link set dev $device xdp obj $file sec $prog
+}
+
+if [[ -n $FLUSH ]]; then
+    flush_tc  $DEV
+    flush_xdp $DEV
+    exit 0
+fi
+
+if [[ -n $LIST ]]; then
+    list_tc  $DEV
+    list_xdp $DEV
+    exit 0
+fi
+
+attach_tc_mark  $DEV $FILE
+attach_xdp_mark $DEV $FILE
diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c
new file mode 100644
index 000000000000..0c12048ac79f
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta_kern.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * Example howto transfer info from XDP to SKB, e.g. skb->mark
+ * -----------------------------------------------------------
+ * This uses the XDP data_meta infrastructure, and is a cooperation
+ * between two bpf-programs (1) XDP and (2) clsact at TC-ingress hook.
+ *
+ * Notice: This example does not use the BPF C-loader (bpf_load.c),
+ * but instead rely on the iproute2 TC tool for loading BPF-objects.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/pkt_cls.h>
+
+#include "bpf_helpers.h"
+
+/*
+ * This struct is stored in the XDP 'data_meta' area, which is located
+ * just in-front-of the raw packet payload data.  The meaning is
+ * specific to these two BPF programs that use it as a communication
+ * channel.  XDP adjust/increase the area via a bpf-helper, and TC use
+ * boundary checks to see if data have been provided.
+ *
+ * The struct must be 4 byte aligned, which here is enforced by the
+ * struct __attribute__((aligned(4))).
+ */
+struct meta_info {
+	__u32 mark;
+} __attribute__((aligned(4)));
+
+SEC("xdp_mark")
+int _xdp_mark(struct xdp_md *ctx)
+{
+	struct meta_info *meta;
+	void *data, *data_end;
+	int ret;
+
+	/* Reserve space in-front of data pointer for our meta info.
+	 * (Notice drivers not supporting data_meta will fail here!)
+	 */
+	ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
+	if (ret < 0)
+		return XDP_ABORTED;
+
+	/* Notice: Kernel-side verifier requires that loading of
+	 * ctx->data MUST happen _after_ helper bpf_xdp_adjust_meta(),
+	 * as pkt-data pointers are invalidated.  Helpers that require
+	 * this are determined/marked by bpf_helper_changes_pkt_data()
+	 */
+	data = (void *)(unsigned long)ctx->data;
+
+	/* Check data_meta have room for meta_info struct */
+	meta = (void *)(unsigned long)ctx->data_meta;
+	if (meta + 1 > data)
+		return XDP_ABORTED;
+
+	meta->mark = 42;
+
+	return XDP_PASS;
+}
+
+SEC("tc_mark")
+int _tc_mark(struct __sk_buff *ctx)
+{
+	void *data      = (void *)(unsigned long)ctx->data;
+	void *data_end  = (void *)(unsigned long)ctx->data_end;
+	void *data_meta = (void *)(unsigned long)ctx->data_meta;
+	struct meta_info *meta = data_meta;
+
+	/* Check XDP gave us some data_meta */
+	if (meta + 1 > data) {
+		ctx->mark = 41;
+		 /* Skip "accept" if no data_meta is avail */
+		return TC_ACT_OK;
+	}
+
+	/* Hint: See func tc_cls_act_is_valid_access() for BPF_WRITE access */
+	ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */
+
+	return TC_ACT_OK;
+}
+
+/* Manually attaching these programs:
+export DEV=ixgbe2
+export FILE=xdp2skb_meta_kern.o
+
+# via TC command
+tc qdisc del dev $DEV clsact 2> /dev/null
+tc qdisc add dev $DEV clsact
+tc filter  add dev $DEV ingress prio 1 handle 1 bpf da obj $FILE sec tc_mark
+tc filter show dev $DEV ingress
+
+# XDP via IP command:
+ip link set dev $DEV xdp off
+ip link set dev $DEV xdp obj $FILE sec xdp_mark
+
+# Use iptable to "see" if SKBs are marked
+iptables -I INPUT -p icmp -m mark --mark 41  # == 0x29
+iptables -I INPUT -p icmp -m mark --mark 42  # == 0x2a
+
+# Hint: catch XDP_ABORTED errors via
+perf record -e xdp:*
+perf script
+
+*/
diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 2fe2f761a0d0..211db8ded0de 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -1,6 +1,7 @@
-/* XDP monitor tool, based on tracepoints
+/* SPDX-License-Identifier: GPL-2.0
+ *  Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
  *
- *  Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * XDP monitor tool, based on tracepoints
  */
 #include <uapi/linux/bpf.h>
 #include "bpf_helpers.h"
@@ -104,7 +105,7 @@ struct xdp_exception_ctx {
 SEC("tracepoint/xdp/xdp_exception")
 int trace_xdp_exception(struct xdp_exception_ctx *ctx)
 {
-	u64 *cnt;;
+	u64 *cnt;
 	u32 key;
 
 	key = ctx->act;
@@ -118,3 +119,92 @@ int trace_xdp_exception(struct xdp_exception_ctx *ctx)
 
 	return 0;
 }
+
+/* Common stats data record shared with _user.c */
+struct datarec {
+	u64 processed;
+	u64 dropped;
+	u64 info;
+};
+#define MAX_CPUS 64
+
+struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
+	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size	= sizeof(u32),
+	.value_size	= sizeof(struct datarec),
+	.max_entries	= MAX_CPUS,
+};
+
+struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
+	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size	= sizeof(u32),
+	.value_size	= sizeof(struct datarec),
+	.max_entries	= 1,
+};
+
+/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
+ * Code in:         kernel/include/trace/events/xdp.h
+ */
+struct cpumap_enqueue_ctx {
+	u64 __pad;		// First 8 bytes are not accessible by bpf code
+	int map_id;		//	offset:8;  size:4; signed:1;
+	u32 act;		//	offset:12; size:4; signed:0;
+	int cpu;		//	offset:16; size:4; signed:1;
+	unsigned int drops;	//	offset:20; size:4; signed:0;
+	unsigned int processed;	//	offset:24; size:4; signed:0;
+	int to_cpu;		//	offset:28; size:4; signed:1;
+};
+
+SEC("tracepoint/xdp/xdp_cpumap_enqueue")
+int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
+{
+	u32 to_cpu = ctx->to_cpu;
+	struct datarec *rec;
+
+	if (to_cpu >= MAX_CPUS)
+		return 1;
+
+	rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
+	if (!rec)
+		return 0;
+	rec->processed += ctx->processed;
+	rec->dropped   += ctx->drops;
+
+	/* Record bulk events, then userspace can calc average bulk size */
+	if (ctx->processed > 0)
+		rec->info += 1;
+
+	return 0;
+}
+
+/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
+ * Code in:         kernel/include/trace/events/xdp.h
+ */
+struct cpumap_kthread_ctx {
+	u64 __pad;		// First 8 bytes are not accessible by bpf code
+	int map_id;		//	offset:8;  size:4; signed:1;
+	u32 act;		//	offset:12; size:4; signed:0;
+	int cpu;		//	offset:16; size:4; signed:1;
+	unsigned int drops;	//	offset:20; size:4; signed:0;
+	unsigned int processed;	//	offset:24; size:4; signed:0;
+	int sched;		//	offset:28; size:4; signed:1;
+};
+
+SEC("tracepoint/xdp/xdp_cpumap_kthread")
+int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
+{
+	struct datarec *rec;
+	u32 key = 0;
+
+	rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
+	if (!rec)
+		return 0;
+	rec->processed += ctx->processed;
+	rec->dropped   += ctx->drops;
+
+	/* Count times kthread yielded CPU via schedule call */
+	if (ctx->sched)
+		rec->info++;
+
+	return 0;
+}
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index eaba165b3549..eec14520d513 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -1,4 +1,5 @@
-/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
  */
 static const char *__doc__=
  "XDP monitor tool, based on tracepoints\n"
@@ -40,6 +41,9 @@ static const struct option long_options[] = {
 	{0, 0, NULL,  0 }
 };
 
+/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
+#define EXIT_FAIL_MEM	5
+
 static void usage(char *argv[])
 {
 	int i;
@@ -108,23 +112,93 @@ static const char *action2str(int action)
 	return NULL;
 }
 
+/* Common stats data record shared with _kern.c */
+struct datarec {
+	__u64 processed;
+	__u64 dropped;
+	__u64 info;
+};
+#define MAX_CPUS 64
+
+/* Userspace structs for collection of stats from maps */
 struct record {
-	__u64 counter;
 	__u64 timestamp;
+	struct datarec total;
+	struct datarec *cpu;
+};
+struct u64rec {
+	__u64 processed;
+};
+struct record_u64 {
+	/* record for _kern side __u64 values */
+	__u64 timestamp;
+	struct u64rec total;
+	struct u64rec *cpu;
 };
 
 struct stats_record {
-	struct record xdp_redir[REDIR_RES_MAX];
-	struct record xdp_exception[XDP_ACTION_MAX];
+	struct record_u64 xdp_redirect[REDIR_RES_MAX];
+	struct record_u64 xdp_exception[XDP_ACTION_MAX];
+	struct record xdp_cpumap_kthread;
+	struct record xdp_cpumap_enqueue[MAX_CPUS];
 };
 
-static void stats_print_headers(bool err_only)
+static bool map_collect_record(int fd, __u32 key, struct record *rec)
 {
-	if (err_only)
-		printf("\n%s\n", __doc_err_only__);
+	/* For percpu maps, userspace gets a value per possible CPU */
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct datarec values[nr_cpus];
+	__u64 sum_processed = 0;
+	__u64 sum_dropped = 0;
+	__u64 sum_info = 0;
+	int i;
+
+	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+		fprintf(stderr,
+			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+		return false;
+	}
+	/* Get time as close as possible to reading map contents */
+	rec->timestamp = gettime();
 
-	printf("%-14s %-11s %-10s %-18s %-9s\n",
-	       "ACTION", "result", "pps ", "pps-human-readable", "measure-period");
+	/* Record and sum values from each CPU */
+	for (i = 0; i < nr_cpus; i++) {
+		rec->cpu[i].processed = values[i].processed;
+		sum_processed        += values[i].processed;
+		rec->cpu[i].dropped = values[i].dropped;
+		sum_dropped        += values[i].dropped;
+		rec->cpu[i].info = values[i].info;
+		sum_info        += values[i].info;
+	}
+	rec->total.processed = sum_processed;
+	rec->total.dropped   = sum_dropped;
+	rec->total.info      = sum_info;
+	return true;
+}
+
+static bool map_collect_record_u64(int fd, __u32 key, struct record_u64 *rec)
+{
+	/* For percpu maps, userspace gets a value per possible CPU */
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct u64rec values[nr_cpus];
+	__u64 sum_total = 0;
+	int i;
+
+	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+		fprintf(stderr,
+			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+		return false;
+	}
+	/* Get time as close as possible to reading map contents */
+	rec->timestamp = gettime();
+
+	/* Record and sum values from each CPU */
+	for (i = 0; i < nr_cpus; i++) {
+		rec->cpu[i].processed = values[i].processed;
+		sum_total            += values[i].processed;
+	}
+	rec->total.processed = sum_total;
+	return true;
 }
 
 static double calc_period(struct record *r, struct record *p)
@@ -139,77 +213,203 @@ static double calc_period(struct record *r, struct record *p)
 	return period_;
 }
 
-static double calc_pps(struct record *r, struct record *p, double period)
+static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
+{
+	double period_ = 0;
+	__u64 period = 0;
+
+	period = r->timestamp - p->timestamp;
+	if (period > 0)
+		period_ = ((double) period / NANOSEC_PER_SEC);
+
+	return period_;
+}
+
+static double calc_pps(struct datarec *r, struct datarec *p, double period)
+{
+	__u64 packets = 0;
+	double pps = 0;
+
+	if (period > 0) {
+		packets = r->processed - p->processed;
+		pps = packets / period;
+	}
+	return pps;
+}
+
+static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
+{
+	__u64 packets = 0;
+	double pps = 0;
+
+	if (period > 0) {
+		packets = r->processed - p->processed;
+		pps = packets / period;
+	}
+	return pps;
+}
+
+static double calc_drop(struct datarec *r, struct datarec *p, double period)
+{
+	__u64 packets = 0;
+	double pps = 0;
+
+	if (period > 0) {
+		packets = r->dropped - p->dropped;
+		pps = packets / period;
+	}
+	return pps;
+}
+
+static double calc_info(struct datarec *r, struct datarec *p, double period)
 {
 	__u64 packets = 0;
 	double pps = 0;
 
 	if (period > 0) {
-		packets = r->counter - p->counter;
+		packets = r->info - p->info;
 		pps = packets / period;
 	}
 	return pps;
 }
 
-static void stats_print(struct stats_record *rec,
-			struct stats_record *prev,
+static void stats_print(struct stats_record *stats_rec,
+			struct stats_record *stats_prev,
 			bool err_only)
 {
-	double period = 0, pps = 0;
-	struct record *r, *p;
-	int i = 0;
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	int rec_i = 0, i, to_cpu;
+	double t = 0, pps = 0;
 
-	char *fmt = "%-14s %-11s %-10.0f %'-18.0f %f\n";
+	/* Header */
+	printf("%-15s %-7s %-12s %-12s %-9s\n",
+	       "XDP-event", "CPU:to", "pps", "drop-pps", "extra-info");
 
 	/* tracepoint: xdp:xdp_redirect_* */
 	if (err_only)
-		i = REDIR_ERROR;
-
-	for (; i < REDIR_RES_MAX; i++) {
-		r = &rec->xdp_redir[i];
-		p = &prev->xdp_redir[i];
-
-		if (p->timestamp) {
-			period = calc_period(r, p);
-			pps = calc_pps(r, p, period);
+		rec_i = REDIR_ERROR;
+
+	for (; rec_i < REDIR_RES_MAX; rec_i++) {
+		struct record_u64 *rec, *prev;
+		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
+		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
+
+		rec  =  &stats_rec->xdp_redirect[rec_i];
+		prev = &stats_prev->xdp_redirect[rec_i];
+		t = calc_period_u64(rec, prev);
+
+		for (i = 0; i < nr_cpus; i++) {
+			struct u64rec *r = &rec->cpu[i];
+			struct u64rec *p = &prev->cpu[i];
+
+			pps = calc_pps_u64(r, p, t);
+			if (pps > 0)
+				printf(fmt1, "XDP_REDIRECT", i,
+				       rec_i ? 0.0: pps, rec_i ? pps : 0.0,
+				       err2str(rec_i));
 		}
-		printf(fmt, "XDP_REDIRECT", err2str(i), pps, pps, period);
+		pps = calc_pps_u64(&rec->total, &prev->total, t);
+		printf(fmt2, "XDP_REDIRECT", "total",
+		       rec_i ? 0.0: pps, rec_i ? pps : 0.0, err2str(rec_i));
 	}
 
 	/* tracepoint: xdp:xdp_exception */
-	for (i = 0; i < XDP_ACTION_MAX; i++) {
-		r = &rec->xdp_exception[i];
-		p = &prev->xdp_exception[i];
-		if (p->timestamp) {
-			period = calc_period(r, p);
-			pps = calc_pps(r, p, period);
+	for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
+		struct record_u64 *rec, *prev;
+		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
+		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
+
+		rec  =  &stats_rec->xdp_exception[rec_i];
+		prev = &stats_prev->xdp_exception[rec_i];
+		t = calc_period_u64(rec, prev);
+
+		for (i = 0; i < nr_cpus; i++) {
+			struct u64rec *r = &rec->cpu[i];
+			struct u64rec *p = &prev->cpu[i];
+
+			pps = calc_pps_u64(r, p, t);
+			if (pps > 0)
+				printf(fmt1, "Exception", i,
+				       0.0, pps, err2str(rec_i));
 		}
+		pps = calc_pps_u64(&rec->total, &prev->total, t);
 		if (pps > 0)
-			printf(fmt, action2str(i), "Exception",
-			       pps, pps, period);
+			printf(fmt2, "Exception", "total",
+			       0.0, pps, action2str(rec_i));
 	}
-	printf("\n");
-}
 
-static __u64 get_key32_value64_percpu(int fd, __u32 key)
-{
-	/* For percpu maps, userspace gets a value per possible CPU */
-	unsigned int nr_cpus = bpf_num_possible_cpus();
-	__u64 values[nr_cpus];
-	__u64 sum = 0;
-	int i;
-
-	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
-		fprintf(stderr,
-			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
-		return 0;
+	/* cpumap enqueue stats */
+	for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
+		char *fmt1 = "%-15s %3d:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
+		char *fmt2 = "%-15s %3s:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
+		struct record *rec, *prev;
+		char *info_str = "";
+		double drop, info;
+
+		rec  =  &stats_rec->xdp_cpumap_enqueue[to_cpu];
+		prev = &stats_prev->xdp_cpumap_enqueue[to_cpu];
+		t = calc_period(rec, prev);
+		for (i = 0; i < nr_cpus; i++) {
+			struct datarec *r = &rec->cpu[i];
+			struct datarec *p = &prev->cpu[i];
+
+			pps  = calc_pps(r, p, t);
+			drop = calc_drop(r, p, t);
+			info = calc_info(r, p, t);
+			if (info > 0) {
+				info_str = "bulk-average";
+				info = pps / info; /* calc average bulk size */
+			}
+			if (pps > 0)
+				printf(fmt1, "cpumap-enqueue",
+				       i, to_cpu, pps, drop, info, info_str);
+		}
+		pps = calc_pps(&rec->total, &prev->total, t);
+		if (pps > 0) {
+			drop = calc_drop(&rec->total, &prev->total, t);
+			info = calc_info(&rec->total, &prev->total, t);
+			if (info > 0) {
+				info_str = "bulk-average";
+				info = pps / info; /* calc average bulk size */
+			}
+			printf(fmt2, "cpumap-enqueue",
+			       "sum", to_cpu, pps, drop, info, info_str);
+		}
 	}
 
-	/* Sum values from each CPU */
-	for (i = 0; i < nr_cpus; i++) {
-		sum += values[i];
+	/* cpumap kthread stats */
+	{
+		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.0f %s\n";
+		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.0f %s\n";
+		struct record *rec, *prev;
+		double drop, info;
+		char *i_str = "";
+
+		rec  =  &stats_rec->xdp_cpumap_kthread;
+		prev = &stats_prev->xdp_cpumap_kthread;
+		t = calc_period(rec, prev);
+		for (i = 0; i < nr_cpus; i++) {
+			struct datarec *r = &rec->cpu[i];
+			struct datarec *p = &prev->cpu[i];
+
+			pps  = calc_pps(r, p, t);
+			drop = calc_drop(r, p, t);
+			info = calc_info(r, p, t);
+			if (info > 0)
+				i_str = "sched";
+			if (pps > 0)
+				printf(fmt1, "cpumap-kthread",
+				       i, pps, drop, info, i_str);
+		}
+		pps = calc_pps(&rec->total, &prev->total, t);
+		drop = calc_drop(&rec->total, &prev->total, t);
+		info = calc_info(&rec->total, &prev->total, t);
+		if (info > 0)
+			i_str = "sched-sum";
+		printf(fmt2, "cpumap-kthread", "total", pps, drop, info, i_str);
 	}
-	return sum;
+
+	printf("\n");
 }
 
 static bool stats_collect(struct stats_record *rec)
@@ -222,25 +422,109 @@ static bool stats_collect(struct stats_record *rec)
 	 */
 
 	fd = map_data[0].fd; /* map0: redirect_err_cnt */
-	for (i = 0; i < REDIR_RES_MAX; i++) {
-		rec->xdp_redir[i].timestamp = gettime();
-		rec->xdp_redir[i].counter = get_key32_value64_percpu(fd, i);
-	}
+	for (i = 0; i < REDIR_RES_MAX; i++)
+		map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
 
 	fd = map_data[1].fd; /* map1: exception_cnt */
 	for (i = 0; i < XDP_ACTION_MAX; i++) {
-		rec->xdp_exception[i].timestamp = gettime();
-		rec->xdp_exception[i].counter = get_key32_value64_percpu(fd, i);
+		map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
 	}
 
+	fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
+	for (i = 0; i < MAX_CPUS; i++)
+		map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
+
+	fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
+	map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
+
 	return true;
 }
 
+static void *alloc_rec_per_cpu(int record_size)
+{
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	void *array;
+	size_t size;
+
+	size = record_size * nr_cpus;
+	array = malloc(size);
+	memset(array, 0, size);
+	if (!array) {
+		fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+		exit(EXIT_FAIL_MEM);
+	}
+	return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+	struct stats_record *rec;
+	int rec_sz;
+	int i;
+
+	/* Alloc main stats_record structure */
+	rec = malloc(sizeof(*rec));
+	memset(rec, 0, sizeof(*rec));
+	if (!rec) {
+		fprintf(stderr, "Mem alloc error\n");
+		exit(EXIT_FAIL_MEM);
+	}
+
+	/* Alloc stats stored per CPU for each record */
+	rec_sz = sizeof(struct u64rec);
+	for (i = 0; i < REDIR_RES_MAX; i++)
+		rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
+
+	for (i = 0; i < XDP_ACTION_MAX; i++)
+		rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
+
+	rec_sz = sizeof(struct datarec);
+	rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
+
+	for (i = 0; i < MAX_CPUS; i++)
+		rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
+
+	return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+	int i;
+
+	for (i = 0; i < REDIR_RES_MAX; i++)
+		free(r->xdp_redirect[i].cpu);
+
+	for (i = 0; i < XDP_ACTION_MAX; i++)
+		free(r->xdp_exception[i].cpu);
+
+	free(r->xdp_cpumap_kthread.cpu);
+
+	for (i = 0; i < MAX_CPUS; i++)
+		free(r->xdp_cpumap_enqueue[i].cpu);
+
+	free(r);
+}
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+	struct stats_record *tmp;
+
+	tmp = *a;
+	*a = *b;
+	*b = tmp;
+}
+
 static void stats_poll(int interval, bool err_only)
 {
-	struct stats_record rec, prev;
+	struct stats_record *rec, *prev;
 
-	memset(&rec, 0, sizeof(rec));
+	rec  = alloc_stats_record();
+	prev = alloc_stats_record();
+	stats_collect(rec);
+
+	if (err_only)
+		printf("\n%s\n", __doc_err_only__);
 
 	/* Trick to pretty printf with thousands separators use %' */
 	setlocale(LC_NUMERIC, "en_US");
@@ -258,13 +542,15 @@ static void stats_poll(int interval, bool err_only)
 	fflush(stdout);
 
 	while (1) {
-		memcpy(&prev, &rec, sizeof(rec));
-		stats_collect(&rec);
-		stats_print_headers(err_only);
-		stats_print(&rec, &prev, err_only);
+		swap(&prev, &rec);
+		stats_collect(rec);
+		stats_print(rec, prev, err_only);
 		fflush(stdout);
 		sleep(interval);
 	}
+
+	free_stats_record(rec);
+	free_stats_record(prev);
 }
 
 static void print_bpf_prog_info(void)
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
new file mode 100644
index 000000000000..3fd209291653
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ *  Example howto extract XDP RX-queue info
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* Config setup from with userspace
+ *
+ * User-side setup ifindex in config_map, to verify that
+ * ctx->ingress_ifindex is correct (against configured ifindex)
+ */
+struct config {
+	__u32 action;
+	int ifindex;
+};
+struct bpf_map_def SEC("maps") config_map = {
+	.type		= BPF_MAP_TYPE_ARRAY,
+	.key_size	= sizeof(int),
+	.value_size	= sizeof(struct config),
+	.max_entries	= 1,
+};
+
+/* Common stats data record (shared with userspace) */
+struct datarec {
+	__u64 processed;
+	__u64 issue;
+};
+
+struct bpf_map_def SEC("maps") stats_global_map = {
+	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size	= sizeof(u32),
+	.value_size	= sizeof(struct datarec),
+	.max_entries	= 1,
+};
+
+#define MAX_RXQs 64
+
+/* Stats per rx_queue_index (per CPU) */
+struct bpf_map_def SEC("maps") rx_queue_index_map = {
+	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size	= sizeof(u32),
+	.value_size	= sizeof(struct datarec),
+	.max_entries	= MAX_RXQs + 1,
+};
+
+SEC("xdp_prog0")
+int  xdp_prognum0(struct xdp_md *ctx)
+{
+	void *data_end = (void *)(long)ctx->data_end;
+	void *data     = (void *)(long)ctx->data;
+	struct datarec *rec, *rxq_rec;
+	int ingress_ifindex;
+	struct config *config;
+	u32 key = 0;
+
+	/* Global stats record */
+	rec = bpf_map_lookup_elem(&stats_global_map, &key);
+	if (!rec)
+		return XDP_ABORTED;
+	rec->processed++;
+
+	/* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
+	 * instructions inside kernel to access xdp_rxq->dev->ifindex
+	 */
+	ingress_ifindex = ctx->ingress_ifindex;
+
+	config = bpf_map_lookup_elem(&config_map, &key);
+	if (!config)
+		return XDP_ABORTED;
+
+	/* Simple test: check ctx provided ifindex is as expected */
+	if (ingress_ifindex != config->ifindex) {
+		/* count this error case */
+		rec->issue++;
+		return XDP_ABORTED;
+	}
+
+	/* Update stats per rx_queue_index. Handle if rx_queue_index
+	 * is larger than stats map can contain info for.
+	 */
+	key = ctx->rx_queue_index;
+	if (key >= MAX_RXQs)
+		key = MAX_RXQs;
+	rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
+	if (!rxq_rec)
+		return XDP_ABORTED;
+	rxq_rec->processed++;
+	if (key == MAX_RXQs)
+		rxq_rec->issue++;
+
+	return config->action;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
new file mode 100644
index 000000000000..32430e8b3a6a
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ = " XDP RX-queue info extract example\n\n"
+	"Monitor how many packets per sec (pps) are received\n"
+	"per NIC RX queue index and which CPU processed the packet\n"
+	;
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <sys/resource.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+
+#include "libbpf.h"
+#include "bpf_load.h"
+#include "bpf_util.h"
+
+static int ifindex = -1;
+static char ifname_buf[IF_NAMESIZE];
+static char *ifname;
+
+static __u32 xdp_flags;
+
+/* Exit return codes */
+#define EXIT_OK		0
+#define EXIT_FAIL		1
+#define EXIT_FAIL_OPTION	2
+#define EXIT_FAIL_XDP		3
+#define EXIT_FAIL_BPF		4
+#define EXIT_FAIL_MEM		5
+
+static const struct option long_options[] = {
+	{"help",	no_argument,		NULL, 'h' },
+	{"dev",		required_argument,	NULL, 'd' },
+	{"skb-mode",	no_argument,		NULL, 'S' },
+	{"sec",		required_argument,	NULL, 's' },
+	{"no-separators", no_argument,		NULL, 'z' },
+	{"action",	required_argument,	NULL, 'a' },
+	{0, 0, NULL,  0 }
+};
+
+static void int_exit(int sig)
+{
+	fprintf(stderr,
+		"Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+		ifindex, ifname);
+	if (ifindex > -1)
+		set_link_xdp_fd(ifindex, -1, xdp_flags);
+	exit(EXIT_OK);
+}
+
+struct config {
+	__u32 action;
+	int ifindex;
+};
+#define XDP_ACTION_MAX (XDP_TX + 1)
+#define XDP_ACTION_MAX_STRLEN 11
+static const char *xdp_action_names[XDP_ACTION_MAX] = {
+	[XDP_ABORTED]	= "XDP_ABORTED",
+	[XDP_DROP]	= "XDP_DROP",
+	[XDP_PASS]	= "XDP_PASS",
+	[XDP_TX]	= "XDP_TX",
+};
+
+static const char *action2str(int action)
+{
+	if (action < XDP_ACTION_MAX)
+		return xdp_action_names[action];
+	return NULL;
+}
+
+static int parse_xdp_action(char *action_str)
+{
+	size_t maxlen;
+	__u64 action = -1;
+	int i;
+
+	for (i = 0; i < XDP_ACTION_MAX; i++) {
+		maxlen = XDP_ACTION_MAX_STRLEN;
+		if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
+			action = i;
+			break;
+		}
+	}
+	return action;
+}
+
+static void list_xdp_actions(void)
+{
+	int i;
+
+	printf("Available XDP --action <options>\n");
+	for (i = 0; i < XDP_ACTION_MAX; i++)
+		printf("\t%s\n", xdp_action_names[i]);
+	printf("\n");
+}
+
+static void usage(char *argv[])
+{
+	int i;
+
+	printf("\nDOCUMENTATION:\n%s\n", __doc__);
+	printf(" Usage: %s (options-see-below)\n", argv[0]);
+	printf(" Listing options:\n");
+	for (i = 0; long_options[i].name != 0; i++) {
+		printf(" --%-12s", long_options[i].name);
+		if (long_options[i].flag != NULL)
+			printf(" flag (internal value:%d)",
+				*long_options[i].flag);
+		else
+			printf(" short-option: -%c",
+				long_options[i].val);
+		printf("\n");
+	}
+	printf("\n");
+	list_xdp_actions();
+}
+
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(void)
+{
+	struct timespec t;
+	int res;
+
+	res = clock_gettime(CLOCK_MONOTONIC, &t);
+	if (res < 0) {
+		fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
+		exit(EXIT_FAIL);
+	}
+	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+/* Common stats data record shared with _kern.c */
+struct datarec {
+	__u64 processed;
+	__u64 issue;
+};
+struct record {
+	__u64 timestamp;
+	struct datarec total;
+	struct datarec *cpu;
+};
+struct stats_record {
+	struct record stats;
+	struct record *rxq;
+};
+
+static struct datarec *alloc_record_per_cpu(void)
+{
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct datarec *array;
+	size_t size;
+
+	size = sizeof(struct datarec) * nr_cpus;
+	array = malloc(size);
+	memset(array, 0, size);
+	if (!array) {
+		fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+		exit(EXIT_FAIL_MEM);
+	}
+	return array;
+}
+
+static struct record *alloc_record_per_rxq(void)
+{
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	struct record *array;
+	size_t size;
+
+	size = sizeof(struct record) * nr_rxqs;
+	array = malloc(size);
+	memset(array, 0, size);
+	if (!array) {
+		fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
+		exit(EXIT_FAIL_MEM);
+	}
+	return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	struct stats_record *rec;
+	int i;
+
+	rec = malloc(sizeof(*rec));
+	memset(rec, 0, sizeof(*rec));
+	if (!rec) {
+		fprintf(stderr, "Mem alloc error\n");
+		exit(EXIT_FAIL_MEM);
+	}
+	rec->rxq = alloc_record_per_rxq();
+	for (i = 0; i < nr_rxqs; i++)
+		rec->rxq[i].cpu = alloc_record_per_cpu();
+
+	rec->stats.cpu = alloc_record_per_cpu();
+	return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	int i;
+
+	for (i = 0; i < nr_rxqs; i++)
+		free(r->rxq[i].cpu);
+
+	free(r->rxq);
+	free(r->stats.cpu);
+	free(r);
+}
+
+static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
+{
+	/* For percpu maps, userspace gets a value per possible CPU */
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct datarec values[nr_cpus];
+	__u64 sum_processed = 0;
+	__u64 sum_issue = 0;
+	int i;
+
+	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+		fprintf(stderr,
+			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+		return false;
+	}
+	/* Get time as close as possible to reading map contents */
+	rec->timestamp = gettime();
+
+	/* Record and sum values from each CPU */
+	for (i = 0; i < nr_cpus; i++) {
+		rec->cpu[i].processed = values[i].processed;
+		sum_processed        += values[i].processed;
+		rec->cpu[i].issue = values[i].issue;
+		sum_issue        += values[i].issue;
+	}
+	rec->total.processed = sum_processed;
+	rec->total.issue     = sum_issue;
+	return true;
+}
+
+static void stats_collect(struct stats_record *rec)
+{
+	int fd, i, max_rxqs;
+
+	fd = map_data[1].fd; /* map: stats_global_map */
+	map_collect_percpu(fd, 0, &rec->stats);
+
+	fd = map_data[2].fd; /* map: rx_queue_index_map */
+	max_rxqs = map_data[2].def.max_entries;
+	for (i = 0; i < max_rxqs; i++)
+		map_collect_percpu(fd, i, &rec->rxq[i]);
+}
+
+static double calc_period(struct record *r, struct record *p)
+{
+	double period_ = 0;
+	__u64 period = 0;
+
+	period = r->timestamp - p->timestamp;
+	if (period > 0)
+		period_ = ((double) period / NANOSEC_PER_SEC);
+
+	return period_;
+}
+
+static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
+{
+	__u64 packets = 0;
+	__u64 pps = 0;
+
+	if (period_ > 0) {
+		packets = r->processed - p->processed;
+		pps = packets / period_;
+	}
+	return pps;
+}
+
+static __u64 calc_errs_pps(struct datarec *r,
+			    struct datarec *p, double period_)
+{
+	__u64 packets = 0;
+	__u64 pps = 0;
+
+	if (period_ > 0) {
+		packets = r->issue - p->issue;
+		pps = packets / period_;
+	}
+	return pps;
+}
+
+static void stats_print(struct stats_record *stats_rec,
+			struct stats_record *stats_prev,
+			int action)
+{
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	double pps = 0, err = 0;
+	struct record *rec, *prev;
+	double t;
+	int rxq;
+	int i;
+
+	/* Header */
+	printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
+	       ifname, ifindex, action2str(action));
+
+	/* stats_global_map */
+	{
+		char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
+		char *fm2_rx = "%-15s %-7s %'-11.0f\n";
+		char *errstr = "";
+
+		printf("%-15s %-7s %-11s %-11s\n",
+		       "XDP stats", "CPU", "pps", "issue-pps");
+
+		rec  =  &stats_rec->stats;
+		prev = &stats_prev->stats;
+		t = calc_period(rec, prev);
+		for (i = 0; i < nr_cpus; i++) {
+			struct datarec *r = &rec->cpu[i];
+			struct datarec *p = &prev->cpu[i];
+
+			pps = calc_pps     (r, p, t);
+			err = calc_errs_pps(r, p, t);
+			if (err > 0)
+				errstr = "invalid-ifindex";
+			if (pps > 0)
+				printf(fmt_rx, "XDP-RX CPU",
+					i, pps, err, errstr);
+		}
+		pps  = calc_pps     (&rec->total, &prev->total, t);
+		err  = calc_errs_pps(&rec->total, &prev->total, t);
+		printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
+	}
+
+	/* rx_queue_index_map */
+	printf("\n%-15s %-7s %-11s %-11s\n",
+	       "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
+
+	for (rxq = 0; rxq < nr_rxqs; rxq++) {
+		char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
+		char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
+		char *errstr = "";
+		int rxq_ = rxq;
+
+		/* Last RXQ in map catch overflows */
+		if (rxq_ == nr_rxqs - 1)
+			rxq_ = -1;
+
+		rec  =  &stats_rec->rxq[rxq];
+		prev = &stats_prev->rxq[rxq];
+		t = calc_period(rec, prev);
+		for (i = 0; i < nr_cpus; i++) {
+			struct datarec *r = &rec->cpu[i];
+			struct datarec *p = &prev->cpu[i];
+
+			pps = calc_pps     (r, p, t);
+			err = calc_errs_pps(r, p, t);
+			if (err > 0) {
+				if (rxq_ == -1)
+					errstr = "map-overflow-RXQ";
+				else
+					errstr = "err";
+			}
+			if (pps > 0)
+				printf(fmt_rx, "rx_queue_index",
+				       rxq_, i, pps, err, errstr);
+		}
+		pps  = calc_pps     (&rec->total, &prev->total, t);
+		err  = calc_errs_pps(&rec->total, &prev->total, t);
+		if (pps || err)
+			printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
+	}
+}
+
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+	struct stats_record *tmp;
+
+	tmp = *a;
+	*a = *b;
+	*b = tmp;
+}
+
+static void stats_poll(int interval, int action)
+{
+	struct stats_record *record, *prev;
+
+	record = alloc_stats_record();
+	prev   = alloc_stats_record();
+	stats_collect(record);
+
+	while (1) {
+		swap(&prev, &record);
+		stats_collect(record);
+		stats_print(record, prev, action);
+		sleep(interval);
+	}
+
+	free_stats_record(record);
+	free_stats_record(prev);
+}
+
+
+int main(int argc, char **argv)
+{
+	struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+	bool use_separators = true;
+	struct config cfg = { 0 };
+	char filename[256];
+	int longindex = 0;
+	int interval = 2;
+	__u32 key = 0;
+	int opt, err;
+
+	char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
+	int action = XDP_PASS; /* Default action */
+	char *action_str = NULL;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+		perror("setrlimit(RLIMIT_MEMLOCK)");
+		return 1;
+	}
+
+	if (load_bpf_file(filename)) {
+		fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
+		return EXIT_FAIL;
+	}
+
+	if (!prog_fd[0]) {
+		fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
+		return EXIT_FAIL;
+	}
+
+	/* Parse commands line args */
+	while ((opt = getopt_long(argc, argv, "hSd:",
+				  long_options, &longindex)) != -1) {
+		switch (opt) {
+		case 'd':
+			if (strlen(optarg) >= IF_NAMESIZE) {
+				fprintf(stderr, "ERR: --dev name too long\n");
+				goto error;
+			}
+			ifname = (char *)&ifname_buf;
+			strncpy(ifname, optarg, IF_NAMESIZE);
+			ifindex = if_nametoindex(ifname);
+			if (ifindex == 0) {
+				fprintf(stderr,
+					"ERR: --dev name unknown err(%d):%s\n",
+					errno, strerror(errno));
+				goto error;
+			}
+			break;
+		case 's':
+			interval = atoi(optarg);
+			break;
+		case 'S':
+			xdp_flags |= XDP_FLAGS_SKB_MODE;
+			break;
+		case 'z':
+			use_separators = false;
+			break;
+		case 'a':
+			action_str = (char *)&action_str_buf;
+			strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
+			break;
+		case 'h':
+		error:
+		default:
+			usage(argv);
+			return EXIT_FAIL_OPTION;
+		}
+	}
+	/* Required option */
+	if (ifindex == -1) {
+		fprintf(stderr, "ERR: required option --dev missing\n");
+		usage(argv);
+		return EXIT_FAIL_OPTION;
+	}
+	cfg.ifindex = ifindex;
+
+	/* Parse action string */
+	if (action_str) {
+		action = parse_xdp_action(action_str);
+		if (action < 0) {
+			fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
+				action_str);
+			list_xdp_actions();
+			return EXIT_FAIL_OPTION;
+		}
+	}
+	cfg.action = action;
+
+	/* Trick to pretty printf with thousands separators use %' */
+	if (use_separators)
+		setlocale(LC_NUMERIC, "en_US");
+
+	/* User-side setup ifindex in config_map */
+	err = bpf_map_update_elem(map_fd[0], &key, &cfg, 0);
+	if (err) {
+		fprintf(stderr, "Store config failed (err:%d)\n", err);
+		exit(EXIT_FAIL_BPF);
+	}
+
+	/* Remove XDP program when program is interrupted */
+	signal(SIGINT, int_exit);
+
+	if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
+		fprintf(stderr, "link set xdp fd failed\n");
+		return EXIT_FAIL_XDP;
+	}
+
+	stats_poll(interval, action);
+	return EXIT_OK;
+}
diff --git a/samples/sockmap/sockmap_user.c b/samples/sockmap/sockmap_user.c
index 7cc9d228216f..7c25c0c112bc 100644
--- a/samples/sockmap/sockmap_user.c
+++ b/samples/sockmap/sockmap_user.c
@@ -23,8 +23,11 @@
 #include <stdbool.h>
 #include <signal.h>
 #include <fcntl.h>
+#include <sys/wait.h>
+#include <time.h>
 
 #include <sys/time.h>
+#include <sys/resource.h>
 #include <sys/types.h>
 
 #include <linux/netlink.h>
@@ -35,6 +38,8 @@
 #include <assert.h>
 #include <libgen.h>
 
+#include <getopt.h>
+
 #include "../bpf/bpf_load.h"
 #include "../bpf/bpf_util.h"
 #include "../bpf/libbpf.h"
@@ -46,15 +51,42 @@ void running_handler(int a);
 #define S1_PORT 10000
 #define S2_PORT 10001
 
-static int sockmap_test_sockets(int rate, int dot)
+/* global sockets */
+int s1, s2, c1, c2, p1, p2;
+
+static const struct option long_options[] = {
+	{"help",	no_argument,		NULL, 'h' },
+	{"cgroup",	required_argument,	NULL, 'c' },
+	{"rate",	required_argument,	NULL, 'r' },
+	{"verbose",	no_argument,		NULL, 'v' },
+	{"iov_count",	required_argument,	NULL, 'i' },
+	{"length",	required_argument,	NULL, 'l' },
+	{"test",	required_argument,	NULL, 't' },
+	{0, 0, NULL, 0 }
+};
+
+static void usage(char *argv[])
+{
+	int i;
+
+	printf(" Usage: %s --cgroup <cgroup_path>\n", argv[0]);
+	printf(" options:\n");
+	for (i = 0; long_options[i].name != 0; i++) {
+		printf(" --%-12s", long_options[i].name);
+		if (long_options[i].flag != NULL)
+			printf(" flag (internal value:%d)\n",
+				*long_options[i].flag);
+		else
+			printf(" -%c\n", long_options[i].val);
+	}
+	printf("\n");
+}
+
+static int sockmap_init_sockets(void)
 {
-	int i, sc, err, max_fd, one = 1;
-	int s1, s2, c1, c2, p1, p2;
+	int i, err, one = 1;
 	struct sockaddr_in addr;
-	struct timeval timeout;
-	char buf[1024] = {0};
 	int *fds[4] = {&s1, &s2, &c1, &c2};
-	fd_set w;
 
 	s1 = s2 = p1 = p2 = c1 = c2 = 0;
 
@@ -63,8 +95,7 @@ static int sockmap_test_sockets(int rate, int dot)
 		*fds[i] = socket(AF_INET, SOCK_STREAM, 0);
 		if (*fds[i] < 0) {
 			perror("socket s1 failed()");
-			err = *fds[i];
-			goto out;
+			return errno;
 		}
 	}
 
@@ -74,16 +105,16 @@ static int sockmap_test_sockets(int rate, int dot)
 				 (char *)&one, sizeof(one));
 		if (err) {
 			perror("setsockopt failed()");
-			goto out;
+			return errno;
 		}
 	}
 
 	/* Non-blocking sockets */
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < 2; i++) {
 		err = ioctl(*fds[i], FIONBIO, (char *)&one);
 		if (err < 0) {
 			perror("ioctl s1 failed()");
-			goto out;
+			return errno;
 		}
 	}
 
@@ -96,14 +127,14 @@ static int sockmap_test_sockets(int rate, int dot)
 	err = bind(s1, (struct sockaddr *)&addr, sizeof(addr));
 	if (err < 0) {
 		perror("bind s1 failed()\n");
-		goto out;
+		return errno;
 	}
 
 	addr.sin_port = htons(S2_PORT);
 	err = bind(s2, (struct sockaddr *)&addr, sizeof(addr));
 	if (err < 0) {
 		perror("bind s2 failed()\n");
-		goto out;
+		return errno;
 	}
 
 	/* Listen server sockets */
@@ -111,14 +142,14 @@ static int sockmap_test_sockets(int rate, int dot)
 	err = listen(s1, 32);
 	if (err < 0) {
 		perror("listen s1 failed()\n");
-		goto out;
+		return errno;
 	}
 
 	addr.sin_port = htons(S2_PORT);
 	err = listen(s2, 32);
 	if (err < 0) {
 		perror("listen s1 failed()\n");
-		goto out;
+		return errno;
 	}
 
 	/* Initiate Connect */
@@ -126,46 +157,232 @@ static int sockmap_test_sockets(int rate, int dot)
 	err = connect(c1, (struct sockaddr *)&addr, sizeof(addr));
 	if (err < 0 && errno != EINPROGRESS) {
 		perror("connect c1 failed()\n");
-		goto out;
+		return errno;
 	}
 
 	addr.sin_port = htons(S2_PORT);
 	err = connect(c2, (struct sockaddr *)&addr, sizeof(addr));
 	if (err < 0 && errno != EINPROGRESS) {
 		perror("connect c2 failed()\n");
-		goto out;
+		return errno;
+	} else if (err < 0) {
+		err = 0;
 	}
 
 	/* Accept Connecrtions */
 	p1 = accept(s1, NULL, NULL);
 	if (p1 < 0) {
 		perror("accept s1 failed()\n");
-		goto out;
+		return errno;
 	}
 
 	p2 = accept(s2, NULL, NULL);
 	if (p2 < 0) {
 		perror("accept s1 failed()\n");
-		goto out;
+		return errno;
 	}
 
-	max_fd = p2;
-	timeout.tv_sec = 10;
-	timeout.tv_usec = 0;
-
 	printf("connected sockets: c1 <-> p1, c2 <-> p2\n");
 	printf("cgroups binding: c1(%i) <-> s1(%i) - - - c2(%i) <-> s2(%i)\n",
 		c1, s1, c2, s2);
+	return 0;
+}
+
+struct msg_stats {
+	size_t bytes_sent;
+	size_t bytes_recvd;
+	struct timespec start;
+	struct timespec end;
+};
+
+static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+		    struct msg_stats *s, bool tx)
+{
+	struct msghdr msg = {0};
+	int err, i, flags = MSG_NOSIGNAL;
+	struct iovec *iov;
+
+	iov = calloc(iov_count, sizeof(struct iovec));
+	if (!iov)
+		return errno;
+
+	for (i = 0; i < iov_count; i++) {
+		char *d = calloc(iov_length, sizeof(char));
+
+		if (!d) {
+			fprintf(stderr, "iov_count %i/%i OOM\n", i, iov_count);
+			goto out_errno;
+		}
+		iov[i].iov_base = d;
+		iov[i].iov_len = iov_length;
+	}
+
+	msg.msg_iov = iov;
+	msg.msg_iovlen = iov_count;
+
+	if (tx) {
+		clock_gettime(CLOCK_MONOTONIC, &s->start);
+		for (i = 0; i < cnt; i++) {
+			int sent = sendmsg(fd, &msg, flags);
+
+			if (sent < 0) {
+				perror("send loop error:");
+				goto out_errno;
+			}
+			s->bytes_sent += sent;
+		}
+		clock_gettime(CLOCK_MONOTONIC, &s->end);
+	} else {
+		int slct, recv, max_fd = fd;
+		struct timeval timeout;
+		float total_bytes;
+		fd_set w;
+
+		total_bytes = (float)iov_count * (float)iov_length * (float)cnt;
+		err = clock_gettime(CLOCK_MONOTONIC, &s->start);
+		if (err < 0)
+			perror("recv start time: ");
+		while (s->bytes_recvd < total_bytes) {
+			timeout.tv_sec = 1;
+			timeout.tv_usec = 0;
+
+			/* FD sets */
+			FD_ZERO(&w);
+			FD_SET(fd, &w);
+
+			slct = select(max_fd + 1, &w, NULL, NULL, &timeout);
+			if (slct == -1) {
+				perror("select()");
+				clock_gettime(CLOCK_MONOTONIC, &s->end);
+				goto out_errno;
+			} else if (!slct) {
+				fprintf(stderr, "unexpected timeout\n");
+				errno = -EIO;
+				clock_gettime(CLOCK_MONOTONIC, &s->end);
+				goto out_errno;
+			}
+
+			recv = recvmsg(fd, &msg, flags);
+			if (recv < 0) {
+				if (errno != EWOULDBLOCK) {
+					clock_gettime(CLOCK_MONOTONIC, &s->end);
+					perror("recv failed()\n");
+					goto out_errno;
+				}
+			}
+
+			s->bytes_recvd += recv;
+		}
+		clock_gettime(CLOCK_MONOTONIC, &s->end);
+	}
+
+	for (i = 0; i < iov_count; i++)
+		free(iov[i].iov_base);
+	free(iov);
+	return 0;
+out_errno:
+	for (i = 0; i < iov_count; i++)
+		free(iov[i].iov_base);
+	free(iov);
+	return errno;
+}
+
+static float giga = 1000000000;
+
+static inline float sentBps(struct msg_stats s)
+{
+	return s.bytes_sent / (s.end.tv_sec - s.start.tv_sec);
+}
+
+static inline float recvdBps(struct msg_stats s)
+{
+	return s.bytes_recvd / (s.end.tv_sec - s.start.tv_sec);
+}
+
+static int sendmsg_test(int iov_count, int iov_buf, int cnt,
+			int verbose, bool base)
+{
+	float sent_Bps = 0, recvd_Bps = 0;
+	int rx_fd, txpid, rxpid, err = 0;
+	struct msg_stats s = {0};
+	int status;
+
+	errno = 0;
+
+	if (base)
+		rx_fd = p1;
+	else
+		rx_fd = p2;
+
+	rxpid = fork();
+	if (rxpid == 0) {
+		err = msg_loop(rx_fd, iov_count, iov_buf, cnt, &s, false);
+		if (err)
+			fprintf(stderr,
+				"msg_loop_rx: iov_count %i iov_buf %i cnt %i err %i\n",
+				iov_count, iov_buf, cnt, err);
+		shutdown(p2, SHUT_RDWR);
+		shutdown(p1, SHUT_RDWR);
+		if (s.end.tv_sec - s.start.tv_sec) {
+			sent_Bps = sentBps(s);
+			recvd_Bps = recvdBps(s);
+		}
+		fprintf(stdout,
+			"rx_sendmsg: TX: %zuB %fB/s %fGB/s RX: %zuB %fB/s %fGB/s\n",
+			s.bytes_sent, sent_Bps, sent_Bps/giga,
+			s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
+		exit(1);
+	} else if (rxpid == -1) {
+		perror("msg_loop_rx: ");
+		return errno;
+	}
+
+	txpid = fork();
+	if (txpid == 0) {
+		err = msg_loop(c1, iov_count, iov_buf, cnt, &s, true);
+		if (err)
+			fprintf(stderr,
+				"msg_loop_tx: iov_count %i iov_buf %i cnt %i err %i\n",
+				iov_count, iov_buf, cnt, err);
+		shutdown(c1, SHUT_RDWR);
+		if (s.end.tv_sec - s.start.tv_sec) {
+			sent_Bps = sentBps(s);
+			recvd_Bps = recvdBps(s);
+		}
+		fprintf(stdout,
+			"tx_sendmsg: TX: %zuB %fB/s %f GB/s RX: %zuB %fB/s %fGB/s\n",
+			s.bytes_sent, sent_Bps, sent_Bps/giga,
+			s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
+		exit(1);
+	} else if (txpid == -1) {
+		perror("msg_loop_tx: ");
+		return errno;
+	}
+
+	assert(waitpid(rxpid, &status, 0) == rxpid);
+	assert(waitpid(txpid, &status, 0) == txpid);
+	return err;
+}
+
+static int forever_ping_pong(int rate, int verbose)
+{
+	struct timeval timeout;
+	char buf[1024] = {0};
+	int sc;
+
+	timeout.tv_sec = 10;
+	timeout.tv_usec = 0;
 
 	/* Ping/Pong data from client to server */
 	sc = send(c1, buf, sizeof(buf), 0);
 	if (sc < 0) {
 		perror("send failed()\n");
-		goto out;
+		return sc;
 	}
 
 	do {
-		int s, rc, i;
+		int s, rc, i, max_fd = p2;
+		fd_set w;
 
 		/* FD sets */
 		FD_ZERO(&w);
@@ -193,7 +410,7 @@ static int sockmap_test_sockets(int rate, int dot)
 			if (rc < 0) {
 				if (errno != EWOULDBLOCK) {
 					perror("recv failed()\n");
-					break;
+					return rc;
 				}
 			}
 
@@ -205,35 +422,92 @@ static int sockmap_test_sockets(int rate, int dot)
 			sc = send(i, buf, rc, 0);
 			if (sc < 0) {
 				perror("send failed()\n");
-				break;
+				return sc;
 			}
 		}
-		sleep(rate);
-		if (dot) {
+
+		if (rate)
+			sleep(rate);
+
+		if (verbose) {
 			printf(".");
 			fflush(stdout);
 
 		}
 	} while (running);
 
-out:
-	close(s1);
-	close(s2);
-	close(p1);
-	close(p2);
-	close(c1);
-	close(c2);
-	return err;
+	return 0;
 }
 
+enum {
+	PING_PONG,
+	SENDMSG,
+	BASE,
+};
+
 int main(int argc, char **argv)
 {
-	int rate = 1, dot = 1;
+	int iov_count = 1, length = 1024, rate = 1, verbose = 0;
+	struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+	int opt, longindex, err, cg_fd = 0;
+	int test = PING_PONG;
 	char filename[256];
-	int err, cg_fd;
-	char *cg_path;
 
-	cg_path = argv[argc - 1];
+	while ((opt = getopt_long(argc, argv, "hvc:r:i:l:t:",
+				  long_options, &longindex)) != -1) {
+		switch (opt) {
+		/* Cgroup configuration */
+		case 'c':
+			cg_fd = open(optarg, O_DIRECTORY, O_RDONLY);
+			if (cg_fd < 0) {
+				fprintf(stderr,
+					"ERROR: (%i) open cg path failed: %s\n",
+					cg_fd, optarg);
+				return cg_fd;
+			}
+			break;
+		case 'r':
+			rate = atoi(optarg);
+			break;
+		case 'v':
+			verbose = 1;
+			break;
+		case 'i':
+			iov_count = atoi(optarg);
+			break;
+		case 'l':
+			length = atoi(optarg);
+			break;
+		case 't':
+			if (strcmp(optarg, "ping") == 0) {
+				test = PING_PONG;
+			} else if (strcmp(optarg, "sendmsg") == 0) {
+				test = SENDMSG;
+			} else if (strcmp(optarg, "base") == 0) {
+				test = BASE;
+			} else {
+				usage(argv);
+				return -1;
+			}
+			break;
+		case 'h':
+		default:
+			usage(argv);
+			return -1;
+		}
+	}
+
+	if (!cg_fd) {
+		fprintf(stderr, "%s requires cgroup option: --cgroup <path>\n",
+			argv[0]);
+		return -1;
+	}
+
+	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+		perror("setrlimit(RLIMIT_MEMLOCK)");
+		return 1;
+	}
+
 	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
 
 	running = 1;
@@ -241,20 +515,16 @@ int main(int argc, char **argv)
 	/* catch SIGINT */
 	signal(SIGINT, running_handler);
 
+	/* If base test skip BPF setup */
+	if (test == BASE)
+		goto run;
+
 	if (load_bpf_file(filename)) {
 		fprintf(stderr, "load_bpf_file: (%s) %s\n",
 			filename, strerror(errno));
 		return 1;
 	}
 
-	/* Cgroup configuration */
-	cg_fd = open(cg_path, O_DIRECTORY, O_RDONLY);
-	if (cg_fd < 0) {
-		fprintf(stderr, "ERROR: (%i) open cg path failed: %s\n",
-			cg_fd, cg_path);
-		return cg_fd;
-	}
-
 	/* Attach programs to sockmap */
 	err = bpf_prog_attach(prog_fd[0], map_fd[0],
 				BPF_SK_SKB_STREAM_PARSER, 0);
@@ -280,12 +550,30 @@ int main(int argc, char **argv)
 		return err;
 	}
 
-	err = sockmap_test_sockets(rate, dot);
+run:
+	err = sockmap_init_sockets();
 	if (err) {
 		fprintf(stderr, "ERROR: test socket failed: %d\n", err);
-		return err;
+		goto out;
 	}
-	return 0;
+
+	if (test == PING_PONG)
+		err = forever_ping_pong(rate, verbose);
+	else if (test == SENDMSG)
+		err = sendmsg_test(iov_count, length, rate, verbose, false);
+	else if (test == BASE)
+		err = sendmsg_test(iov_count, length, rate, verbose, true);
+	else
+		fprintf(stderr, "unknown test\n");
+out:
+	close(s1);
+	close(s2);
+	close(p1);
+	close(p2);
+	close(c1);
+	close(c2);
+	close(cg_fd);
+	return err;
 }
 
 void running_handler(int a)
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 56e354fcdfc6..928188902901 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -452,7 +452,7 @@ int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
 	if (dst) {
 		struct dst_entry *iter;
 
-		for (iter = dst; iter != NULL; iter = iter->child) {
+		for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
 			struct xfrm_state *x = iter->xfrm;
 
 			if (x && selinux_authorizable_xfrm(x))
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 07a6697466ef..c8ec0ae16bf0 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -9,6 +9,35 @@ MAKE = make
 CFLAGS += -Wall -O2
 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
 
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+FEATURE_USER = .bpf
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 %.yacc.c: %.y
 	$(YACC) -o $@ -d $<
 
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 75bf526a0168..58c2bab4ef6e 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -72,7 +72,14 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
 
 	disassemble_init_for_target(&info);
 
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+	disassemble = disassembler(info.arch,
+				   bfd_big_endian(bfdf),
+				   info.mach,
+				   bfdf);
+#else
 	disassemble = disassembler(bfdf);
+#endif
 	assert(disassemble);
 
 	do {
@@ -165,7 +172,8 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
 {
 	char *ptr, *pptr, *tmp;
 	off_t off = 0;
-	int ret, flen, proglen, pass, ulen = 0;
+	unsigned int proglen;
+	int ret, flen, pass, ulen = 0;
 	regmatch_t pmatch[1];
 	unsigned long base;
 	regex_t regex;
@@ -192,7 +200,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
 	}
 
 	ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
-	ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
+	ret = sscanf(ptr, "flen=%d proglen=%u pass=%d image=%lx",
 		     &flen, &proglen, &pass, &base);
 	if (ret != 4) {
 		regfree(&regex);
@@ -232,7 +240,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
 	}
 
 	assert(ulen == proglen);
-	printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
+	printf("%u bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
 	       proglen, pass, flen);
 	printf("%lx + <x>:\n", base);
 
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 37292bb5ce60..c462a928e03d 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -3,12 +3,16 @@ include ../../../scripts/utilities.mak
 
 INSTALL ?= install
 RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
 
-# Make the path relative to DESTDIR, not prefix
-ifndef DESTDIR
-prefix ?= /usr/local
+ifeq ($(V),1)
+  Q =
+else
+  Q = @
 endif
-mandir ?= $(prefix)/share/man
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
 man8dir = $(mandir)/man8
 
 MAN8_RST = $(wildcard *.rst)
@@ -20,15 +24,21 @@ man: man8
 man8: $(DOC_MAN8)
 
 $(OUTPUT)%.8: %.rst
-	rst2man $< > $@
+	$(QUIET_GEN)rst2man $< > $@
 
 clean:
-	$(call QUIET_CLEAN, Documentation) $(RM) $(DOC_MAN8)
+	$(call QUIET_CLEAN, Documentation)
+	$(Q)$(RM) $(DOC_MAN8)
 
 install: man
-	$(call QUIET_INSTALL, Documentation-man) \
-		$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir); \
-		$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir);
+	$(call QUIET_INSTALL, Documentation-man)
+	$(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
+	$(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
+
+uninstall:
+	$(call QUIET_UNINST, Documentation-man)
+	$(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
+	$(Q)$(RMDIR) $(DESTDIR)$(man8dir)
 
-.PHONY: man man8 clean install
+.PHONY: man man8 clean install uninstall
 .DEFAULT_GOAL := man
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
new file mode 100644
index 000000000000..2fe2a1bdbe3e
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -0,0 +1,118 @@
+================
+bpftool-cgroup
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF progs
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+	**bpftool** [*OPTIONS*] **cgroup** *COMMAND*
+
+	*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+
+	*COMMANDS* :=
+	{ **show** | **list** | **attach** | **detach** | **help** }
+
+MAP COMMANDS
+=============
+
+|	**bpftool** **cgroup { show | list }** *CGROUP*
+|	**bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+|	**bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+|	**bpftool** **cgroup help**
+|
+|	*PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+|	*ATTACH_TYPE* := { *ingress* | *egress* | *sock_create* | *sock_ops* | *device* }
+|	*ATTACH_FLAGS* := { *multi* | *override* }
+
+DESCRIPTION
+===========
+	**bpftool cgroup { show | list }** *CGROUP*
+		  List all programs attached to the cgroup *CGROUP*.
+
+		  Output will start with program ID followed by attach type,
+		  attach flags and program name.
+
+	**bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+		  Attach program *PROG* to the cgroup *CGROUP* with attach type
+		  *ATTACH_TYPE* and optional *ATTACH_FLAGS*.
+
+		  *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs
+		  some bpf program, the program in this cgroup yields to sub-cgroup
+		  program; **multi** if a sub-cgroup installs some bpf program,
+		  that cgroup program gets run in addition to the program in this
+		  cgroup.
+
+		  Only one program is allowed to be attached to a cgroup with
+		  no attach flags or the **override** flag. Attaching another
+		  program will release old program and attach the new one.
+
+		  Multiple programs are allowed to be attached to a cgroup with
+		  **multi**. They are executed in FIFO order (those that were
+		  attached first, run first).
+
+		  Non-default *ATTACH_FLAGS* are supported by kernel version 4.14
+		  and later.
+
+		  *ATTACH_TYPE* can be on of:
+		  **ingress** ingress path of the inet socket (since 4.10);
+		  **egress** egress path of the inet socket (since 4.10);
+		  **sock_create** opening of an inet socket (since 4.10);
+		  **sock_ops** various socket operations (since 4.12);
+		  **device** device access (since 4.15).
+
+	**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+		  Detach *PROG* from the cgroup *CGROUP* and attach type
+		  *ATTACH_TYPE*.
+
+	**bpftool prog help**
+		  Print short help message.
+
+OPTIONS
+=======
+	-h, --help
+		  Print short generic help message (similar to **bpftool help**).
+
+	-v, --version
+		  Print version number (similar to **bpftool version**).
+
+	-j, --json
+		  Generate JSON output. For commands that cannot produce JSON, this
+		  option has no effect.
+
+	-p, --pretty
+		  Generate human-readable JSON output. Implies **-j**.
+
+	-f, --bpffs
+		  Show file names of pinned programs.
+
+EXAMPLES
+========
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# mkdir /sys/fs/cgroup/test.slice**
+| **# bpftool prog load ./device_cgroup.o /sys/fs/bpf/prog**
+| **# bpftool cgroup attach /sys/fs/cgroup/test.slice/ device id 1 allow_multi**
+
+**# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+    ID       AttachType      AttachFlags     Name
+    1        device          allow_multi     bpf_prog1
+
+|
+| **# bpftool cgroup detach /sys/fs/cgroup/test.slice/ device id 1**
+| **# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+    ID       AttachType      AttachFlags     Name
+
+SEE ALSO
+========
+	**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 9f51a268eb06..0ab32b312aec 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -15,13 +15,13 @@ SYNOPSIS
 	*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
 	*COMMANDS* :=
-	{ **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+	{ **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
 	| **pin** | **help** }
 
 MAP COMMANDS
 =============
 
-|	**bpftool** **map show**   [*MAP*]
+|	**bpftool** **map { show | list }**   [*MAP*]
 |	**bpftool** **map dump**    *MAP*
 |	**bpftool** **map update**  *MAP*  **key** *BYTES*   **value** *VALUE* [*UPDATE_FLAGS*]
 |	**bpftool** **map lookup**  *MAP*  **key** *BYTES*
@@ -36,7 +36,7 @@ MAP COMMANDS
 
 DESCRIPTION
 ===========
-	**bpftool map show**   [*MAP*]
+	**bpftool map { show | list }**   [*MAP*]
 		  Show information about loaded maps.  If *MAP* is specified
 		  show information only about given map, otherwise list all
 		  maps currently loaded on the system.
@@ -128,4 +128,4 @@ EXAMPLES
 
 SEE ALSO
 ========
-	**bpftool**\ (8), **bpftool-prog**\ (8)
+	**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 36e8d1c3c40d..e4ceee7f2dff 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -15,22 +15,23 @@ SYNOPSIS
 	*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
 	*COMMANDS* :=
-	{ **show** | **dump xlated** | **dump jited** | **pin** | **help** }
+	{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
 
 MAP COMMANDS
 =============
 
-|	**bpftool** **prog show** [*PROG*]
+|	**bpftool** **prog { show | list }** [*PROG*]
 |	**bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
 |	**bpftool** **prog dump jited**  *PROG* [{**file** *FILE* | **opcodes**}]
 |	**bpftool** **prog pin** *PROG* *FILE*
+|	**bpftool** **prog load** *OBJ* *FILE*
 |	**bpftool** **prog help**
 |
 |	*PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
 
 DESCRIPTION
 ===========
-	**bpftool prog show** [*PROG*]
+	**bpftool prog { show | list }** [*PROG*]
 		  Show information about loaded programs.  If *PROG* is
 		  specified show information only about given program, otherwise
 		  list all programs currently loaded on the system.
@@ -57,6 +58,11 @@ DESCRIPTION
 
 		  Note: *FILE* must be located in *bpffs* mount.
 
+	**bpftool prog load** *OBJ* *FILE*
+		  Load bpf program from binary *OBJ* and pin as *FILE*.
+
+		  Note: *FILE* must be located in *bpffs* mount.
+
 	**bpftool prog help**
 		  Print short help message.
 
@@ -126,8 +132,10 @@ EXAMPLES
 |
 | **# mount -t bpf none /sys/fs/bpf/**
 | **# bpftool prog pin id 10 /sys/fs/bpf/prog**
+| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
 | **# ls -l /sys/fs/bpf/**
 |   -rw------- 1 root root 0 Jul 22 01:43 prog
+|   -rw------- 1 root root 0 Jul 22 01:44 prog2
 
 **# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
 
@@ -147,4 +155,4 @@ EXAMPLES
 
 SEE ALSO
 ========
-	**bpftool**\ (8), **bpftool-map**\ (8)
+	**bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 926c03d5a8da..20689a321ffe 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,17 +16,19 @@ SYNOPSIS
 
 	**bpftool** **version**
 
-	*OBJECT* := { **map** | **program** }
+	*OBJECT* := { **map** | **program** | **cgroup** }
 
 	*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
 	| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
 
 	*MAP-COMMANDS* :=
-	{ **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+	{ **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
 	| **pin** | **help** }
 
-	*PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin**
-	| **help** }
+	*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
+	| **load** | **help** }
+
+	*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
 
 DESCRIPTION
 ===========
@@ -53,4 +55,4 @@ OPTIONS
 
 SEE ALSO
 ========
-	**bpftool-map**\ (8), **bpftool-prog**\ (8)
+	**bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index ec3052c0b004..26901ec87361 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,25 +1,10 @@
 include ../../scripts/Makefile.include
-
 include ../../scripts/utilities.mak
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-ifneq ($(objtree),)
-#$(info Determined 'objtree' to be $(objtree))
-endif
-
-ifneq ($(OUTPUT),)
-#$(info Determined 'OUTPUT' to be $(OUTPUT))
-# Adding $(OUTPUT) as a directory to look for source files,
-# because use generated output files as sources dependency
-# for flex/bison parsers.
-VPATH += $(OUTPUT)
-export VPATH
 endif
 
 ifeq ($(V),1)
@@ -28,16 +13,18 @@ else
   Q = @
 endif
 
-BPF_DIR	= $(srctree)/tools/lib/bpf/
+BPF_DIR = $(srctree)/tools/lib/bpf/
 
 ifneq ($(OUTPUT),)
-  BPF_PATH=$(OUTPUT)
+  BPF_PATH = $(OUTPUT)
 else
-  BPF_PATH=$(BPF_DIR)
+  BPF_PATH = $(BPF_DIR)
 endif
 
 LIBBPF = $(BPF_PATH)libbpf.a
 
+BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+
 $(LIBBPF): FORCE
 	$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
@@ -45,22 +32,50 @@ $(LIBBPF)-clean:
 	$(call QUIET_CLEAN, libbpf)
 	$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
 
-prefix = /usr/local
+prefix ?= /usr/local
 bash_compdir ?= /usr/share/bash-completion/completions
 
 CC = gcc
 
 CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
-CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
 LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
 
+INSTALL ?= install
+RM ?= rm -f
+
+FEATURE_USER = .bpftool
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 include $(wildcard *.d)
 
 all: $(OUTPUT)bpftool
 
-SRCS=$(wildcard *.c)
-OBJS=$(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+SRCS = $(wildcard *.c)
+OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
 
 $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
 	$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -73,21 +88,34 @@ $(OUTPUT)%.o: %.c
 
 clean: $(LIBBPF)-clean
 	$(call QUIET_CLEAN, bpftool)
-	$(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+	$(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+
+install: $(OUTPUT)bpftool
+	$(call QUIET_INSTALL, bpftool)
+	$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
+	$(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
+	$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
+	$(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
 
-install:
-	install -m 0755 -d $(prefix)/sbin
-	install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
-	install -m 0755 -d $(bash_compdir)
-	install -m 0644 bash-completion/bpftool $(bash_compdir)
+uninstall:
+	$(call QUIET_UNINST, bpftool)
+	$(Q)$(RM) $(DESTDIR)$(prefix)/sbin/bpftool
+	$(Q)$(RM) $(DESTDIR)$(bash_compdir)/bpftool
 
 doc:
-	$(Q)$(MAKE) -C Documentation/
+	$(call descend,Documentation)
+
+doc-clean:
+	$(call descend,Documentation,clean)
 
 doc-install:
-	$(Q)$(MAKE) -C Documentation/ install
+	$(call descend,Documentation,install)
+
+doc-uninstall:
+	$(call descend,Documentation,uninstall)
 
 FORCE:
 
-.PHONY: all clean FORCE install doc doc-install
+.PHONY: all FORCE clean install uninstall
+.PHONY: doc doc-clean doc-install doc-uninstall
 .DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 7febee05c8e7..0137866bb8f6 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -197,7 +197,7 @@ _bpftool()
 
             local PROG_TYPE='id pinned tag'
             case $command in
-                show)
+                show|list)
                     [[ $prev != "$command" ]] && return 0
                     COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
                     return 0
@@ -232,7 +232,7 @@ _bpftool()
                     ;;
                 *)
                     [[ $prev == $object ]] && \
-                        COMPREPLY=( $( compgen -W 'dump help pin show' -- \
+                        COMPREPLY=( $( compgen -W 'dump help pin show list' -- \
                             "$cur" ) )
                     ;;
             esac
@@ -240,7 +240,7 @@ _bpftool()
         map)
             local MAP_TYPE='id pinned'
             case $command in
-                show|dump)
+                show|list|dump)
                     case $prev in
                         $command)
                             COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
@@ -343,7 +343,7 @@ _bpftool()
                 *)
                     [[ $prev == $object ]] && \
                         COMPREPLY=( $( compgen -W 'delete dump getnext help \
-                            lookup pin show update' -- "$cur" ) )
+                            lookup pin show list update' -- "$cur" ) )
                     ;;
             esac
             ;;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
new file mode 100644
index 000000000000..cae32a61cb18
--- /dev/null
+++ b/tools/bpf/bpftool/cgroup.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2017 Facebook
+// Author: Roman Gushchin <guro@fb.com>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <bpf.h>
+
+#include "main.h"
+
+#define HELP_SPEC_ATTACH_FLAGS						\
+	"ATTACH_FLAGS := { multi | override }"
+
+#define HELP_SPEC_ATTACH_TYPES						\
+	"ATTACH_TYPE := { ingress | egress | sock_create | sock_ops | device }"
+
+static const char * const attach_type_strings[] = {
+	[BPF_CGROUP_INET_INGRESS] = "ingress",
+	[BPF_CGROUP_INET_EGRESS] = "egress",
+	[BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
+	[BPF_CGROUP_SOCK_OPS] = "sock_ops",
+	[BPF_CGROUP_DEVICE] = "device",
+	[__MAX_BPF_ATTACH_TYPE] = NULL,
+};
+
+static enum bpf_attach_type parse_attach_type(const char *str)
+{
+	enum bpf_attach_type type;
+
+	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+		if (attach_type_strings[type] &&
+		    is_prefix(str, attach_type_strings[type]))
+			return type;
+	}
+
+	return __MAX_BPF_ATTACH_TYPE;
+}
+
+static int show_bpf_prog(int id, const char *attach_type_str,
+			 const char *attach_flags_str)
+{
+	struct bpf_prog_info info = {};
+	__u32 info_len = sizeof(info);
+	int prog_fd;
+
+	prog_fd = bpf_prog_get_fd_by_id(id);
+	if (prog_fd < 0)
+		return -1;
+
+	if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+		close(prog_fd);
+		return -1;
+	}
+
+	if (json_output) {
+		jsonw_start_object(json_wtr);
+		jsonw_uint_field(json_wtr, "id", info.id);
+		jsonw_string_field(json_wtr, "attach_type",
+				   attach_type_str);
+		jsonw_string_field(json_wtr, "attach_flags",
+				   attach_flags_str);
+		jsonw_string_field(json_wtr, "name", info.name);
+		jsonw_end_object(json_wtr);
+	} else {
+		printf("%-8u %-15s %-15s %-15s\n", info.id,
+		       attach_type_str,
+		       attach_flags_str,
+		       info.name);
+	}
+
+	close(prog_fd);
+	return 0;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+	__u32 prog_ids[1024] = {0};
+	char *attach_flags_str;
+	__u32 prog_cnt, iter;
+	__u32 attach_flags;
+	char buf[32];
+	int ret;
+
+	prog_cnt = ARRAY_SIZE(prog_ids);
+	ret = bpf_prog_query(cgroup_fd, type, 0, &attach_flags, prog_ids,
+			     &prog_cnt);
+	if (ret)
+		return ret;
+
+	if (prog_cnt == 0)
+		return 0;
+
+	switch (attach_flags) {
+	case BPF_F_ALLOW_MULTI:
+		attach_flags_str = "multi";
+		break;
+	case BPF_F_ALLOW_OVERRIDE:
+		attach_flags_str = "override";
+		break;
+	case 0:
+		attach_flags_str = "";
+		break;
+	default:
+		snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+		attach_flags_str = buf;
+	}
+
+	for (iter = 0; iter < prog_cnt; iter++)
+		show_bpf_prog(prog_ids[iter], attach_type_strings[type],
+			      attach_flags_str);
+
+	return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+	enum bpf_attach_type type;
+	int cgroup_fd;
+	int ret = -1;
+
+	if (argc < 1) {
+		p_err("too few parameters for cgroup show");
+		goto exit;
+	} else if (argc > 1) {
+		p_err("too many parameters for cgroup show");
+		goto exit;
+	}
+
+	cgroup_fd = open(argv[0], O_RDONLY);
+	if (cgroup_fd < 0) {
+		p_err("can't open cgroup %s", argv[1]);
+		goto exit;
+	}
+
+	if (json_output)
+		jsonw_start_array(json_wtr);
+	else
+		printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
+		       "AttachFlags", "Name");
+
+	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+		/*
+		 * Not all attach types may be supported, so it's expected,
+		 * that some requests will fail.
+		 * If we were able to get the show for at least one
+		 * attach type, let's return 0.
+		 */
+		if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+			ret = 0;
+	}
+
+	if (json_output)
+		jsonw_end_array(json_wtr);
+
+	close(cgroup_fd);
+exit:
+	return ret;
+}
+
+static int do_attach(int argc, char **argv)
+{
+	enum bpf_attach_type attach_type;
+	int cgroup_fd, prog_fd;
+	int attach_flags = 0;
+	int ret = -1;
+	int i;
+
+	if (argc < 4) {
+		p_err("too few parameters for cgroup attach");
+		goto exit;
+	}
+
+	cgroup_fd = open(argv[0], O_RDONLY);
+	if (cgroup_fd < 0) {
+		p_err("can't open cgroup %s", argv[1]);
+		goto exit;
+	}
+
+	attach_type = parse_attach_type(argv[1]);
+	if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+		p_err("invalid attach type");
+		goto exit_cgroup;
+	}
+
+	argc -= 2;
+	argv = &argv[2];
+	prog_fd = prog_parse_fd(&argc, &argv);
+	if (prog_fd < 0)
+		goto exit_cgroup;
+
+	for (i = 0; i < argc; i++) {
+		if (is_prefix(argv[i], "multi")) {
+			attach_flags |= BPF_F_ALLOW_MULTI;
+		} else if (is_prefix(argv[i], "override")) {
+			attach_flags |= BPF_F_ALLOW_OVERRIDE;
+		} else {
+			p_err("unknown option: %s", argv[i]);
+			goto exit_cgroup;
+		}
+	}
+
+	if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, attach_flags)) {
+		p_err("failed to attach program");
+		goto exit_prog;
+	}
+
+	if (json_output)
+		jsonw_null(json_wtr);
+
+	ret = 0;
+
+exit_prog:
+	close(prog_fd);
+exit_cgroup:
+	close(cgroup_fd);
+exit:
+	return ret;
+}
+
+static int do_detach(int argc, char **argv)
+{
+	enum bpf_attach_type attach_type;
+	int prog_fd, cgroup_fd;
+	int ret = -1;
+
+	if (argc < 4) {
+		p_err("too few parameters for cgroup detach");
+		goto exit;
+	}
+
+	cgroup_fd = open(argv[0], O_RDONLY);
+	if (cgroup_fd < 0) {
+		p_err("can't open cgroup %s", argv[1]);
+		goto exit;
+	}
+
+	attach_type = parse_attach_type(argv[1]);
+	if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+		p_err("invalid attach type");
+		goto exit_cgroup;
+	}
+
+	argc -= 2;
+	argv = &argv[2];
+	prog_fd = prog_parse_fd(&argc, &argv);
+	if (prog_fd < 0)
+		goto exit_cgroup;
+
+	if (bpf_prog_detach2(prog_fd, cgroup_fd, attach_type)) {
+		p_err("failed to detach program");
+		goto exit_prog;
+	}
+
+	if (json_output)
+		jsonw_null(json_wtr);
+
+	ret = 0;
+
+exit_prog:
+	close(prog_fd);
+exit_cgroup:
+	close(cgroup_fd);
+exit:
+	return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+	if (json_output) {
+		jsonw_null(json_wtr);
+		return 0;
+	}
+
+	fprintf(stderr,
+		"Usage: %s %s { show | list } CGROUP\n"
+		"       %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
+		"       %s %s detach CGROUP ATTACH_TYPE PROG\n"
+		"       %s %s help\n"
+		"\n"
+		"       " HELP_SPEC_ATTACH_TYPES "\n"
+		"       " HELP_SPEC_ATTACH_FLAGS "\n"
+		"       " HELP_SPEC_PROGRAM "\n"
+		"       " HELP_SPEC_OPTIONS "\n"
+		"",
+		bin_name, argv[-2], bin_name, argv[-2],
+		bin_name, argv[-2], bin_name, argv[-2]);
+
+	return 0;
+}
+
+static const struct cmd cmds[] = {
+	{ "show",	do_show },
+	{ "list",	do_show },
+	{ "attach",	do_attach },
+	{ "detach",	do_detach },
+	{ "help",	do_help },
+	{ 0 }
+};
+
+int do_cgroup(int argc, char **argv)
+{
+	return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 2bd3b280e6dd..0b482c0070e0 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -34,6 +34,7 @@
 /* Author: Jakub Kicinski <kubakici@wp.pl> */
 
 #include <errno.h>
+#include <fcntl.h>
 #include <fts.h>
 #include <libgen.h>
 #include <mntent.h>
@@ -44,7 +45,9 @@
 #include <unistd.h>
 #include <linux/limits.h>
 #include <linux/magic.h>
+#include <net/if.h>
 #include <sys/mount.h>
+#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
 
@@ -163,13 +166,49 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
 	return fd;
 }
 
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_fd(int fd, const char *name)
 {
 	char err_str[ERR_MAX_LEN];
-	unsigned int id;
-	char *endptr;
 	char *file;
 	char *dir;
+	int err = 0;
+
+	err = bpf_obj_pin(fd, name);
+	if (!err)
+		goto out;
+
+	file = malloc(strlen(name) + 1);
+	strcpy(file, name);
+	dir = dirname(file);
+
+	if (errno != EPERM || is_bpffs(dir)) {
+		p_err("can't pin the object (%s): %s", name, strerror(errno));
+		goto out_free;
+	}
+
+	/* Attempt to mount bpffs, then retry pinning. */
+	err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
+	if (!err) {
+		err = bpf_obj_pin(fd, name);
+		if (err)
+			p_err("can't pin the object (%s): %s", name,
+			      strerror(errno));
+	} else {
+		err_str[ERR_MAX_LEN - 1] = '\0';
+		p_err("can't mount BPF file system to pin the object (%s): %s",
+		      name, err_str);
+	}
+
+out_free:
+	free(file);
+out:
+	return err;
+}
+
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+{
+	unsigned int id;
+	char *endptr;
 	int err;
 	int fd;
 
@@ -195,35 +234,8 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
 		return -1;
 	}
 
-	err = bpf_obj_pin(fd, *argv);
-	if (!err)
-		goto out_close;
-
-	file = malloc(strlen(*argv) + 1);
-	strcpy(file, *argv);
-	dir = dirname(file);
-
-	if (errno != EPERM || is_bpffs(dir)) {
-		p_err("can't pin the object (%s): %s", *argv, strerror(errno));
-		goto out_free;
-	}
-
-	/* Attempt to mount bpffs, then retry pinning. */
-	err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
-	if (!err) {
-		err = bpf_obj_pin(fd, *argv);
-		if (err)
-			p_err("can't pin the object (%s): %s", *argv,
-			      strerror(errno));
-	} else {
-		err_str[ERR_MAX_LEN - 1] = '\0';
-		p_err("can't mount BPF file system to pin the object (%s): %s",
-		      *argv, err_str);
-	}
+	err = do_pin_fd(fd, *argv);
 
-out_free:
-	free(file);
-out_close:
 	close(fd);
 	return err;
 }
@@ -403,3 +415,124 @@ void delete_pinned_obj_table(struct pinned_obj_table *tab)
 		free(obj);
 	}
 }
+
+static char *
+ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
+{
+	struct stat st;
+	int err;
+
+	err = stat("/proc/self/ns/net", &st);
+	if (err) {
+		p_err("Can't stat /proc/self: %s", strerror(errno));
+		return NULL;
+	}
+
+	if (st.st_dev != ns_dev || st.st_ino != ns_ino)
+		return NULL;
+
+	return if_indextoname(ifindex, buf);
+}
+
+static int read_sysfs_hex_int(char *path)
+{
+	char vendor_id_buf[8];
+	int len;
+	int fd;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0) {
+		p_err("Can't open %s: %s", path, strerror(errno));
+		return -1;
+	}
+
+	len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
+	close(fd);
+	if (len < 0) {
+		p_err("Can't read %s: %s", path, strerror(errno));
+		return -1;
+	}
+	if (len >= (int)sizeof(vendor_id_buf)) {
+		p_err("Value in %s too long", path);
+		return -1;
+	}
+
+	vendor_id_buf[len] = 0;
+
+	return strtol(vendor_id_buf, NULL, 0);
+}
+
+static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
+{
+	char full_path[64];
+
+	snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
+		 devname, entry_name);
+
+	return read_sysfs_hex_int(full_path);
+}
+
+const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino)
+{
+	char devname[IF_NAMESIZE];
+	int vendor_id;
+	int device_id;
+
+	if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
+		p_err("Can't get net device name for ifindex %d: %s", ifindex,
+		      strerror(errno));
+		return NULL;
+	}
+
+	vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
+	if (vendor_id < 0) {
+		p_err("Can't get device vendor id for %s", devname);
+		return NULL;
+	}
+
+	switch (vendor_id) {
+	case 0x19ee:
+		device_id = read_sysfs_netdev_hex_int(devname, "device");
+		if (device_id != 0x4000 &&
+		    device_id != 0x6000 &&
+		    device_id != 0x6003)
+			p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
+		return "NFP-6xxx";
+	default:
+		p_err("Can't get bfd arch name for device vendor id 0x%04x",
+		      vendor_id);
+		return NULL;
+	}
+}
+
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+	char name[IF_NAMESIZE];
+
+	if (!ifindex)
+		return;
+
+	printf(" dev ");
+	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+		printf("%s", name);
+	else
+		printf("ifindex %u ns_dev %llu ns_ino %llu",
+		       ifindex, ns_dev, ns_inode);
+}
+
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+	char name[IF_NAMESIZE];
+
+	if (!ifindex)
+		return;
+
+	jsonw_name(json_wtr, "dev");
+	jsonw_start_object(json_wtr);
+	jsonw_uint_field(json_wtr, "ifindex", ifindex);
+	jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
+	jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
+	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+		jsonw_string_field(json_wtr, "ifname", name);
+	jsonw_end_object(json_wtr);
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 1551d3918d4c..87439320ef70 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -76,7 +76,8 @@ static int fprintf_json(void *out, const char *fmt, ...)
 	return 0;
 }
 
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+		       const char *arch)
 {
 	disassembler_ftype disassemble;
 	struct disassemble_info info;
@@ -100,6 +101,19 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
 	else
 		init_disassemble_info(&info, stdout,
 				      (fprintf_ftype) fprintf);
+
+	/* Update architecture info for offload. */
+	if (arch) {
+		const bfd_arch_info_type *inf = bfd_scan_arch(arch);
+
+		if (inf) {
+			bfdf->arch_info = inf;
+		} else {
+			p_err("No libfd support for %s", arch);
+			return;
+		}
+	}
+
 	info.arch = bfd_get_arch(bfdf);
 	info.mach = bfd_get_mach(bfdf);
 	info.buffer = image;
@@ -107,7 +121,14 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
 
 	disassemble_init_for_target(&info);
 
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+	disassemble = disassembler(info.arch,
+				   bfd_big_endian(bfdf),
+				   info.mach,
+				   bfdf);
+#else
 	disassemble = disassembler(bfdf);
+#endif
 	assert(disassemble);
 
 	if (json_output)
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d294bc8168be..3a0396d87c42 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -38,7 +38,6 @@
 #include <errno.h>
 #include <getopt.h>
 #include <linux/bpf.h>
-#include <linux/version.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -85,7 +84,7 @@ static int do_help(int argc, char **argv)
 		"       %s batch file FILE\n"
 		"       %s version\n"
 		"\n"
-		"       OBJECT := { prog | map }\n"
+		"       OBJECT := { prog | map | cgroup }\n"
 		"       " HELP_SPEC_OPTIONS "\n"
 		"",
 		bin_name, bin_name, bin_name);
@@ -95,21 +94,13 @@ static int do_help(int argc, char **argv)
 
 static int do_version(int argc, char **argv)
 {
-	unsigned int version[3];
-
-	version[0] = LINUX_VERSION_CODE >> 16;
-	version[1] = LINUX_VERSION_CODE >> 8 & 0xf;
-	version[2] = LINUX_VERSION_CODE & 0xf;
-
 	if (json_output) {
 		jsonw_start_object(json_wtr);
 		jsonw_name(json_wtr, "version");
-		jsonw_printf(json_wtr, "\"%u.%u.%u\"",
-			     version[0], version[1], version[2]);
+		jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
 		jsonw_end_object(json_wtr);
 	} else {
-		printf("%s v%u.%u.%u\n", bin_name,
-		       version[0], version[1], version[2]);
+		printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
 	}
 	return 0;
 }
@@ -173,6 +164,7 @@ static const struct cmd cmds[] = {
 	{ "batch",	do_batch },
 	{ "prog",	do_prog },
 	{ "map",	do_map },
+	{ "cgroup",	do_cgroup },
 	{ "version",	do_version },
 	{ 0 }
 };
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index bff330b49791..b8e9584d6246 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -96,6 +96,8 @@ struct pinned_obj {
 int build_pinned_obj_table(struct pinned_obj_table *table,
 			   enum bpf_obj_type type);
 void delete_pinned_obj_table(struct pinned_obj_table *tab);
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
 
 struct cmd {
 	const char *cmd;
@@ -111,13 +113,18 @@ char *get_fdinfo(int fd, const char *key);
 int open_obj_pinned(char *path);
 int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
 int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_fd(int fd, const char *name);
 
 int do_prog(int argc, char **arg);
 int do_map(int argc, char **arg);
+int do_cgroup(int argc, char **arg);
 
 int prog_parse_fd(int *argc, char ***argv);
 
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes);
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+		       const char *arch);
 void print_hex_data_json(uint8_t *data, size_t len);
 
+const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
+
 #endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index a8c3a33dd185..f95fa67bb498 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -66,6 +66,7 @@ static const char * const map_type_name[] = {
 	[BPF_MAP_TYPE_HASH_OF_MAPS]	= "hash_of_maps",
 	[BPF_MAP_TYPE_DEVMAP]		= "devmap",
 	[BPF_MAP_TYPE_SOCKMAP]		= "sockmap",
+	[BPF_MAP_TYPE_CPUMAP]		= "cpumap",
 };
 
 static unsigned int get_possible_cpus(void)
@@ -428,6 +429,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
 
 	jsonw_name(json_wtr, "flags");
 	jsonw_printf(json_wtr, "%#x", info->map_flags);
+
+	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
 	jsonw_uint_field(json_wtr, "bytes_key", info->key_size);
 	jsonw_uint_field(json_wtr, "bytes_value", info->value_size);
 	jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
@@ -469,7 +473,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
 	if (*info->name)
 		printf("name %s  ", info->name);
 
-	printf("flags 0x%x\n", info->map_flags);
+	printf("flags 0x%x", info->map_flags);
+	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
+	printf("\n");
 	printf("\tkey %uB  value %uB  max_entries %u",
 	       info->key_size, info->value_size, info->max_entries);
 
@@ -861,7 +867,7 @@ static int do_help(int argc, char **argv)
 	}
 
 	fprintf(stderr,
-		"Usage: %s %s show   [MAP]\n"
+		"Usage: %s %s { show | list }   [MAP]\n"
 		"       %s %s dump    MAP\n"
 		"       %s %s update  MAP  key BYTES value VALUE [UPDATE_FLAGS]\n"
 		"       %s %s lookup  MAP  key BYTES\n"
@@ -885,6 +891,7 @@ static int do_help(int argc, char **argv)
 
 static const struct cmd cmds[] = {
 	{ "show",	do_show },
+	{ "list",	do_show },
 	{ "help",	do_help },
 	{ "dump",	do_dump },
 	{ "update",	do_update },
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index dded77345bfb..e8e2baaf93c2 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -45,6 +45,7 @@
 #include <sys/stat.h>
 
 #include <bpf.h>
+#include <libbpf.h>
 
 #include "main.h"
 #include "disasm.h"
@@ -65,6 +66,7 @@ static const char * const prog_type_name[] = {
 	[BPF_PROG_TYPE_LWT_XMIT]	= "lwt_xmit",
 	[BPF_PROG_TYPE_SOCK_OPS]	= "sock_ops",
 	[BPF_PROG_TYPE_SK_SKB]		= "sk_skb",
+	[BPF_PROG_TYPE_CGROUP_DEVICE]	= "cgroup_device",
 };
 
 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
@@ -229,6 +231,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
 		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 
+	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
 	if (info->load_time) {
 		char buf[32];
 
@@ -286,6 +290,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
 
 	printf("tag ");
 	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
+	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 	printf("\n");
 
 	if (info->load_time) {
@@ -402,6 +407,88 @@ static int do_show(int argc, char **argv)
 	return err;
 }
 
+#define SYM_MAX_NAME	256
+
+struct kernel_sym {
+	unsigned long address;
+	char name[SYM_MAX_NAME];
+};
+
+struct dump_data {
+	unsigned long address_call_base;
+	struct kernel_sym *sym_mapping;
+	__u32 sym_count;
+	char scratch_buff[SYM_MAX_NAME];
+};
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+	return ((struct kernel_sym *)sym_a)->address -
+	       ((struct kernel_sym *)sym_b)->address;
+}
+
+static void kernel_syms_load(struct dump_data *dd)
+{
+	struct kernel_sym *sym;
+	char buff[256];
+	void *tmp, *address;
+	FILE *fp;
+
+	fp = fopen("/proc/kallsyms", "r");
+	if (!fp)
+		return;
+
+	while (!feof(fp)) {
+		if (!fgets(buff, sizeof(buff), fp))
+			break;
+		tmp = realloc(dd->sym_mapping,
+			      (dd->sym_count + 1) *
+			      sizeof(*dd->sym_mapping));
+		if (!tmp) {
+out:
+			free(dd->sym_mapping);
+			dd->sym_mapping = NULL;
+			fclose(fp);
+			return;
+		}
+		dd->sym_mapping = tmp;
+		sym = &dd->sym_mapping[dd->sym_count];
+		if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
+			continue;
+		sym->address = (unsigned long)address;
+		if (!strcmp(sym->name, "__bpf_call_base")) {
+			dd->address_call_base = sym->address;
+			/* sysctl kernel.kptr_restrict was set */
+			if (!sym->address)
+				goto out;
+		}
+		if (sym->address)
+			dd->sym_count++;
+	}
+
+	fclose(fp);
+
+	qsort(dd->sym_mapping, dd->sym_count,
+	      sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+static void kernel_syms_destroy(struct dump_data *dd)
+{
+	free(dd->sym_mapping);
+}
+
+static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+					     unsigned long key)
+{
+	struct kernel_sym sym = {
+		.address = key,
+	};
+
+	return dd->sym_mapping ?
+	       bsearch(&sym, dd->sym_mapping, dd->sym_count,
+		       sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
 static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
 {
 	va_list args;
@@ -411,8 +498,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
 	va_end(args);
 }
 
-static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
+static const char *print_call_pcrel(struct dump_data *dd,
+				    struct kernel_sym *sym,
+				    unsigned long address,
+				    const struct bpf_insn *insn)
+{
+	if (sym)
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "%+d#%s", insn->off, sym->name);
+	else
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "%+d#0x%lx", insn->off, address);
+	return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+				     struct kernel_sym *sym,
+				     unsigned long address)
+{
+	if (sym)
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "%s", sym->name);
+	else
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "0x%lx", address);
+	return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+			      const struct bpf_insn *insn)
+{
+	struct dump_data *dd = private_data;
+	unsigned long address = dd->address_call_base + insn->imm;
+	struct kernel_sym *sym;
+
+	sym = kernel_syms_search(dd, address);
+	if (insn->src_reg == BPF_PSEUDO_CALL)
+		return print_call_pcrel(dd, sym, address, insn);
+	else
+		return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+			     const struct bpf_insn *insn,
+			     __u64 full_imm)
+{
+	struct dump_data *dd = private_data;
+
+	if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "map[id:%u]", insn->imm);
+	else
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "0x%llx", (unsigned long long)full_imm);
+	return dd->scratch_buff;
+}
+
+static void dump_xlated_plain(struct dump_data *dd, void *buf,
+			      unsigned int len, bool opcodes)
 {
+	const struct bpf_insn_cbs cbs = {
+		.cb_print	= print_insn,
+		.cb_call	= print_call,
+		.cb_imm		= print_imm,
+		.private_data	= dd,
+	};
 	struct bpf_insn *insn = buf;
 	bool double_insn = false;
 	unsigned int i;
@@ -426,7 +576,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
 		double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
 
 		printf("% 4d: ", i);
-		print_bpf_insn(print_insn, NULL, insn + i, true);
+		print_bpf_insn(&cbs, NULL, insn + i, true);
 
 		if (opcodes) {
 			printf("       ");
@@ -455,8 +605,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
 	va_end(args);
 }
 
-static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
+static void dump_xlated_json(struct dump_data *dd, void *buf,
+			     unsigned int len, bool opcodes)
 {
+	const struct bpf_insn_cbs cbs = {
+		.cb_print	= print_insn_json,
+		.cb_call	= print_call,
+		.cb_imm		= print_imm,
+		.private_data	= dd,
+	};
 	struct bpf_insn *insn = buf;
 	bool double_insn = false;
 	unsigned int i;
@@ -471,7 +628,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
 
 		jsonw_start_object(json_wtr);
 		jsonw_name(json_wtr, "disasm");
-		print_bpf_insn(print_insn_json, NULL, insn + i, true);
+		print_bpf_insn(&cbs, NULL, insn + i, true);
 
 		if (opcodes) {
 			jsonw_name(json_wtr, "opcodes");
@@ -506,6 +663,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
 static int do_dump(int argc, char **argv)
 {
 	struct bpf_prog_info info = {};
+	struct dump_data dd = {};
 	__u32 len = sizeof(info);
 	unsigned int buf_size;
 	char *filepath = NULL;
@@ -593,6 +751,14 @@ static int do_dump(int argc, char **argv)
 		goto err_free;
 	}
 
+	if ((member_len == &info.jited_prog_len &&
+	     info.jited_prog_insns == 0) ||
+	    (member_len == &info.xlated_prog_len &&
+	     info.xlated_prog_insns == 0)) {
+		p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+		goto err_free;
+	}
+
 	if (filepath) {
 		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 		if (fd < 0) {
@@ -609,17 +775,29 @@ static int do_dump(int argc, char **argv)
 			goto err_free;
 		}
 	} else {
-		if (member_len == &info.jited_prog_len)
-			disasm_print_insn(buf, *member_len, opcodes);
-		else
+		if (member_len == &info.jited_prog_len) {
+			const char *name = NULL;
+
+			if (info.ifindex) {
+				name = ifindex_to_bfd_name_ns(info.ifindex,
+							      info.netns_dev,
+							      info.netns_ino);
+				if (!name)
+					goto err_free;
+			}
+
+			disasm_print_insn(buf, *member_len, opcodes, name);
+		} else {
+			kernel_syms_load(&dd);
 			if (json_output)
-				dump_xlated_json(buf, *member_len, opcodes);
+				dump_xlated_json(&dd, buf, *member_len, opcodes);
 			else
-				dump_xlated_plain(buf, *member_len, opcodes);
+				dump_xlated_plain(&dd, buf, *member_len, opcodes);
+			kernel_syms_destroy(&dd);
+		}
 	}
 
 	free(buf);
-
 	return 0;
 
 err_free:
@@ -637,6 +815,30 @@ static int do_pin(int argc, char **argv)
 	return err;
 }
 
+static int do_load(int argc, char **argv)
+{
+	struct bpf_object *obj;
+	int prog_fd;
+
+	if (argc != 2)
+		usage();
+
+	if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
+		p_err("failed to load program");
+		return -1;
+	}
+
+	if (do_pin_fd(prog_fd, argv[1])) {
+		p_err("failed to pin program");
+		return -1;
+	}
+
+	if (json_output)
+		jsonw_null(json_wtr);
+
+	return 0;
+}
+
 static int do_help(int argc, char **argv)
 {
 	if (json_output) {
@@ -645,26 +847,29 @@ static int do_help(int argc, char **argv)
 	}
 
 	fprintf(stderr,
-		"Usage: %s %s show [PROG]\n"
+		"Usage: %s %s { show | list } [PROG]\n"
 		"       %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
 		"       %s %s dump jited  PROG [{ file FILE | opcodes }]\n"
 		"       %s %s pin   PROG FILE\n"
+		"       %s %s load  OBJ  FILE\n"
 		"       %s %s help\n"
 		"\n"
 		"       " HELP_SPEC_PROGRAM "\n"
 		"       " HELP_SPEC_OPTIONS "\n"
 		"",
 		bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
-		bin_name, argv[-2], bin_name, argv[-2]);
+		bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
 
 	return 0;
 }
 
 static const struct cmd cmds[] = {
 	{ "show",	do_show },
+	{ "list",	do_show },
 	{ "help",	do_help },
 	{ "dump",	do_dump },
 	{ "pin",	do_pin },
+	{ "load",	do_load },
 	{ 0 }
 };
 
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 59585fe20221..0a490cb15149 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -13,6 +13,7 @@ FILES=                                          \
          test-hello.bin                         \
          test-libaudit.bin                      \
          test-libbfd.bin                        \
+         test-disassembler-four-args.bin        \
          test-liberty.bin                       \
          test-liberty-z.bin                     \
          test-cplus-demangle.bin                \
@@ -197,6 +198,9 @@ $(OUTPUT)test-libpython-version.bin:
 $(OUTPUT)test-libbfd.bin:
 	$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
 
+$(OUTPUT)test-disassembler-four-args.bin:
+	$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+
 $(OUTPUT)test-liberty.bin:
 	$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
 
diff --git a/tools/build/feature/test-disassembler-four-args.c b/tools/build/feature/test-disassembler-four-args.c
new file mode 100644
index 000000000000..45ce65cfddf0
--- /dev/null
+++ b/tools/build/feature/test-disassembler-four-args.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+	bfd *abfd = bfd_openr(NULL, NULL);
+
+	disassembler(bfd_get_arch(abfd),
+		     bfd_big_endian(abfd),
+		     bfd_get_mach(abfd),
+		     abfd);
+
+	return 0;
+}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
 #define BPF_ALU64	0x07	/* alu mode in double word width */
 
 /* ld/ldx fields */
-#define BPF_DW		0x18	/* double word */
+#define BPF_DW		0x18	/* double word (64-bit) */
 #define BPF_XADD	0xc0	/* exclusive add */
 
 /* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
  */
 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
 
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
 #define BPF_PSEUDO_MAP_FD	1
 
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL		1
+
 /* flags for BPF_MAP_UPDATE_ELEM command */
 #define BPF_ANY		0 /* create new element or update existing */
 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
 					 * BPF_F_NUMA_NODE is set).
 					 */
 		char	map_name[BPF_OBJ_NAME_LEN];
+		__u32	map_ifindex;	/* ifindex of netdev to create on */
 	};
 
 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
  *     @optlen: length of optval in bytes
  *     Return: 0 or negative error
  *
+ * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
+ *     Set callback flags for sock_ops
+ *     @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
+ *     @flags: flags value
+ *     Return: 0 for no error
+ *             -EINVAL if there is no full tcp socket
+ *             bits in flags that are not supported by current kernel
+ *
  * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
  *     Grow or shrink room in sk_buff.
  *     @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
  *     @buf: buf to fill
  *     @buf_size: size of the buf
  *     Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ *	@pt_regs: pointer to struct pt_regs
+ *	@rc: the return value to set
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -736,7 +755,9 @@ union bpf_attr {
 	FN(xdp_adjust_meta),		\
 	FN(perf_event_read_value),	\
 	FN(perf_prog_read_value),	\
-	FN(getsockopt),
+	FN(getsockopt),			\
+	FN(override_return),		\
+	FN(sock_ops_cb_flags_set),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
 	__u32 data;
 	__u32 data_end;
 	__u32 data_meta;
+	/* Below access go through struct xdp_rxq_info */
+	__u32 ingress_ifindex; /* rxq->dev->ifindex */
+	__u32 rx_queue_index;  /* rxq->queue_index  */
 };
 
 enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
 	__u32 nr_map_ids;
 	__aligned_u64 map_ids;
 	char name[BPF_OBJ_NAME_LEN];
+	__u32 ifindex;
+	__u64 netns_dev;
+	__u64 netns_ino;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
 	__u32 max_entries;
 	__u32 map_flags;
 	char  name[BPF_OBJ_NAME_LEN];
+	__u32 ifindex;
+	__u64 netns_dev;
+	__u64 netns_ino;
 } __attribute__((aligned(8)));
 
 /* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
 struct bpf_sock_ops {
 	__u32 op;
 	union {
-		__u32 reply;
-		__u32 replylong[4];
+		__u32 args[4];		/* Optionally passed to bpf program */
+		__u32 reply;		/* Returned by bpf program	    */
+		__u32 replylong[4];	/* Optionally returned by bpf prog  */
 	};
 	__u32 family;
 	__u32 remote_ip4;	/* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
 	__u32 local_ip6[4];	/* Stored in network byte order */
 	__u32 remote_port;	/* Stored in network byte order */
 	__u32 local_port;	/* stored in host byte order */
+	__u32 is_fullsock;	/* Some TCP fields are only valid if
+				 * there is a full socket. If not, the
+				 * fields read as zero.
+				 */
+	__u32 snd_cwnd;
+	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
+	__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
+	__u32 state;
+	__u32 rtt_min;
+	__u32 snd_ssthresh;
+	__u32 rcv_nxt;
+	__u32 snd_nxt;
+	__u32 snd_una;
+	__u32 mss_cache;
+	__u32 ecn_flags;
+	__u32 rate_delivered;
+	__u32 rate_interval_us;
+	__u32 packets_out;
+	__u32 retrans_out;
+	__u32 total_retrans;
+	__u32 segs_in;
+	__u32 data_segs_in;
+	__u32 segs_out;
+	__u32 data_segs_out;
+	__u32 lost_out;
+	__u32 sacked_out;
+	__u32 sk_txhash;
+	__u64 bytes_received;
+	__u64 bytes_acked;
 };
 
+/* Definitions for bpf_sock_ops_cb_flags */
+#define BPF_SOCK_OPS_RTO_CB_FLAG	(1<<0)
+#define BPF_SOCK_OPS_RETRANS_CB_FLAG	(1<<1)
+#define BPF_SOCK_OPS_STATE_CB_FLAG	(1<<2)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS       0x7		/* Mask of all currently
+							 * supported cb flags
+							 */
+
 /* List of known BPF sock_ops operators.
  * New entries can only be added at the end
  */
@@ -976,6 +1044,43 @@ enum {
 					 * a congestion threshold. RTTs above
 					 * this indicate congestion
 					 */
+	BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered.
+					 * Arg1: value of icsk_retransmits
+					 * Arg2: value of icsk_rto
+					 * Arg3: whether RTO has expired
+					 */
+	BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted.
+					 * Arg1: sequence number of 1st byte
+					 * Arg2: # segments
+					 * Arg3: return value of
+					 *       tcp_transmit_skb (0 => success)
+					 */
+	BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state.
+					 * Arg1: old_state
+					 * Arg2: new_state
+					 */
+};
+
+/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
+ * changes between the TCP and BPF versions. Ideally this should never happen.
+ * If it does, we need to add code to convert them before calling
+ * the BPF sock_ops function.
+ */
+enum {
+	BPF_TCP_ESTABLISHED = 1,
+	BPF_TCP_SYN_SENT,
+	BPF_TCP_SYN_RECV,
+	BPF_TCP_FIN_WAIT1,
+	BPF_TCP_FIN_WAIT2,
+	BPF_TCP_TIME_WAIT,
+	BPF_TCP_CLOSE,
+	BPF_TCP_CLOSE_WAIT,
+	BPF_TCP_LAST_ACK,
+	BPF_TCP_LISTEN,
+	BPF_TCP_CLOSING,	/* Now a valid state */
+	BPF_TCP_NEW_SYN_RECV,
+
+	BPF_TCP_MAX_STATES	/* Leave at the end! */
 };
 
 #define TCP_BPF_IW		1001	/* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
 #define BPF_DEVCG_DEV_CHAR	(1ULL << 1)
 
 struct bpf_cgroup_dev_ctx {
-	__u32 access_type; /* (access << 16) | type */
+	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+	__u32 access_type;
 	__u32 major;
 	__u32 minor;
 };
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index c77c9a2ebbbb..e0739a1aa4b2 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
 	__u16	__reserved_2;	/* align to __u64 */
 };
 
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+	/*
+	 * The below ids array length
+	 */
+	__u32	ids_len;
+	/*
+	 * Set by the kernel to indicate the number of
+	 * available programs
+	 */
+	__u32	prog_cnt;
+	/*
+	 * User provided buffer to store program ids
+	 */
+	__u32	ids[0];
+};
+
 #define perf_flags(attr)	(*(&(attr)->read_format + 1))
 
 /*
@@ -433,6 +454,7 @@ struct perf_event_attr {
 #define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
 #define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
 #define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF	_IOWR('$', 10, struct perf_event_query_bpf *)
 
 enum perf_event_ioc_flags {
 	PERF_IOC_FLAG_GROUP		= 1U << 0,
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 4555304dc18e..83714ca1f22b 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -93,7 +93,6 @@ export prefix libdir src obj
 # Shell quotes
 libdir_SQ = $(subst ','\'',$(libdir))
 libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
 
 LIB_FILE = libbpf.a libbpf.so
 
@@ -150,7 +149,7 @@ CMD_TARGETS = $(LIB_FILE)
 
 TARGETS = $(CMD_TARGETS)
 
-all: fixdep $(VERSION_FILES) all_cmd
+all: fixdep all_cmd
 
 all_cmd: $(CMD_TARGETS)
 
@@ -169,21 +168,11 @@ $(OUTPUT)libbpf.so: $(BPF_IN)
 $(OUTPUT)libbpf.a: $(BPF_IN)
 	$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
 
-define update_dir
-  (echo $1 > $@.tmp;				\
-   if [ -r $@ ] && cmp -s $@ $@.tmp; then	\
-     rm -f $@.tmp;				\
-   else						\
-     echo '  UPDATE                 $@';	\
-     mv -f $@.tmp $@;				\
-   fi);
-endef
-
 define do_install
 	if [ ! -d '$(DESTDIR_SQ)$2' ]; then		\
 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2';	\
 	fi;						\
-	$(INSTALL) $1 '$(DESTDIR_SQ)$2'
+	$(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
 endef
 
 install_lib: all_cmd
@@ -192,7 +181,8 @@ install_lib: all_cmd
 
 install_headers:
 	$(call QUIET_INSTALL, headers) \
-		$(call do_install,bpf.h,$(prefix)/include/bpf,644)
+		$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
+		$(call do_install,libbpf.h,$(prefix)/include/bpf,644);
 
 install: install_lib
 
@@ -203,7 +193,7 @@ config-clean:
 	$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
 
 clean:
-	$(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
+	$(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so .*.d .*.cmd \
 		$(RM) LIBBPF-CFLAGS
 	$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
@@ -213,10 +203,10 @@ PHONY += force elfdep bpfdep
 force:
 
 elfdep:
-	@if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit -1 ; fi
+	@if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit 1 ; fi
 
 bpfdep:
-	@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit -1 ; fi
+	@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
 
 # Declare the contents of the .PHONY variable as phony.  We keep that
 # information in a variable so we can use it in if_changed and friends.
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6534889e2b2f..9f44c196931e 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
 			  __u32 map_flags);
 
 /* Recommend log buffer size */
-#define BPF_LOG_BUF_SIZE 65536
+#define BPF_LOG_BUF_SIZE (256 * 1024)
 int bpf_load_program_name(enum bpf_prog_type type, const char *name,
 			  const struct bpf_insn *insns,
 			  size_t insns_cnt, const char *license,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5aa45f89da93..30c776375118 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -174,12 +174,19 @@ struct bpf_program {
 	char *name;
 	char *section_name;
 	struct bpf_insn *insns;
-	size_t insns_cnt;
+	size_t insns_cnt, main_prog_cnt;
 	enum bpf_prog_type type;
 
-	struct {
+	struct reloc_desc {
+		enum {
+			RELO_LD64,
+			RELO_CALL,
+		} type;
 		int insn_idx;
-		int map_idx;
+		union {
+			int map_idx;
+			int text_off;
+		};
 	} *reloc_desc;
 	int nr_reloc;
 
@@ -234,6 +241,7 @@ struct bpf_object {
 		} *reloc;
 		int nr_reloc;
 		int maps_shndx;
+		int text_shndx;
 	} efile;
 	/*
 	 * All loaded bpf_object is linked in a list, which is
@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj)
 	size_t pi, si;
 
 	for (pi = 0; pi < obj->nr_programs; pi++) {
-		char *name = NULL;
+		const char *name = NULL;
 
 		prog = &obj->programs[pi];
+		if (prog->idx == obj->efile.text_shndx) {
+			name = ".text";
+			goto skip_search;
+		}
 
 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
 		     si++) {
@@ -387,6 +399,8 @@ bpf_object__init_prog_names(struct bpf_object *obj)
 				continue;
 			if (sym.st_shndx != prog->idx)
 				continue;
+			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
+				continue;
 
 			name = elf_strptr(obj->efile.elf,
 					  obj->efile.strtabidx,
@@ -403,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj)
 				   prog->section_name);
 			return -EINVAL;
 		}
-
+skip_search:
 		prog->name = strdup(name);
 		if (!prog->name) {
 			pr_warning("failed to allocate memory for prog sym %s\n",
@@ -793,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
 		} else if ((sh.sh_type == SHT_PROGBITS) &&
 			   (sh.sh_flags & SHF_EXECINSTR) &&
 			   (data->d_size > 0)) {
+			if (strcmp(name, ".text") == 0)
+				obj->efile.text_shndx = idx;
 			err = bpf_object__add_program(obj, data->d_buf,
 						      data->d_size, name, idx);
 			if (err) {
@@ -854,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
 }
 
 static int
-bpf_program__collect_reloc(struct bpf_program *prog,
-			   size_t nr_maps, GElf_Shdr *shdr,
-			   Elf_Data *data, Elf_Data *symbols,
-			   int maps_shndx, struct bpf_map *maps)
+bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
+			   Elf_Data *data, struct bpf_object *obj)
 {
+	Elf_Data *symbols = obj->efile.symbols;
+	int text_shndx = obj->efile.text_shndx;
+	int maps_shndx = obj->efile.maps_shndx;
+	struct bpf_map *maps = obj->maps;
+	size_t nr_maps = obj->nr_maps;
 	int i, nrels;
 
 	pr_debug("collecting relocating info for: '%s'\n",
@@ -891,8 +910,11 @@ bpf_program__collect_reloc(struct bpf_program *prog,
 				   GELF_R_SYM(rel.r_info));
 			return -LIBBPF_ERRNO__FORMAT;
 		}
+		pr_debug("relo for %lld value %lld name %d\n",
+			 (long long) (rel.r_info >> 32),
+			 (long long) sym.st_value, sym.st_name);
 
-		if (sym.st_shndx != maps_shndx) {
+		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
 				   prog->section_name, sym.st_shndx);
 			return -LIBBPF_ERRNO__RELOC;
@@ -901,6 +923,17 @@ bpf_program__collect_reloc(struct bpf_program *prog,
 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
 
+		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
+			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
+				pr_warning("incorrect bpf_call opcode\n");
+				return -LIBBPF_ERRNO__RELOC;
+			}
+			prog->reloc_desc[i].type = RELO_CALL;
+			prog->reloc_desc[i].insn_idx = insn_idx;
+			prog->reloc_desc[i].text_off = sym.st_value;
+			continue;
+		}
+
 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
 				   insn_idx, insns[insn_idx].code);
@@ -922,6 +955,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
 			return -LIBBPF_ERRNO__RELOC;
 		}
 
+		prog->reloc_desc[i].type = RELO_LD64;
 		prog->reloc_desc[i].insn_idx = insn_idx;
 		prog->reloc_desc[i].map_idx = map_idx;
 	}
@@ -961,27 +995,76 @@ bpf_object__create_maps(struct bpf_object *obj)
 }
 
 static int
+bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
+			struct reloc_desc *relo)
+{
+	struct bpf_insn *insn, *new_insn;
+	struct bpf_program *text;
+	size_t new_cnt;
+
+	if (relo->type != RELO_CALL)
+		return -LIBBPF_ERRNO__RELOC;
+
+	if (prog->idx == obj->efile.text_shndx) {
+		pr_warning("relo in .text insn %d into off %d\n",
+			   relo->insn_idx, relo->text_off);
+		return -LIBBPF_ERRNO__RELOC;
+	}
+
+	if (prog->main_prog_cnt == 0) {
+		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
+		if (!text) {
+			pr_warning("no .text section found yet relo into text exist\n");
+			return -LIBBPF_ERRNO__RELOC;
+		}
+		new_cnt = prog->insns_cnt + text->insns_cnt;
+		new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+		if (!new_insn) {
+			pr_warning("oom in prog realloc\n");
+			return -ENOMEM;
+		}
+		memcpy(new_insn + prog->insns_cnt, text->insns,
+		       text->insns_cnt * sizeof(*insn));
+		prog->insns = new_insn;
+		prog->main_prog_cnt = prog->insns_cnt;
+		prog->insns_cnt = new_cnt;
+	}
+	insn = &prog->insns[relo->insn_idx];
+	insn->imm += prog->main_prog_cnt - relo->insn_idx;
+	pr_debug("added %zd insn from %s to prog %s\n",
+		 text->insns_cnt, text->section_name, prog->section_name);
+	return 0;
+}
+
+static int
 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
 {
-	int i;
+	int i, err;
 
 	if (!prog || !prog->reloc_desc)
 		return 0;
 
 	for (i = 0; i < prog->nr_reloc; i++) {
-		int insn_idx, map_idx;
-		struct bpf_insn *insns = prog->insns;
+		if (prog->reloc_desc[i].type == RELO_LD64) {
+			struct bpf_insn *insns = prog->insns;
+			int insn_idx, map_idx;
 
-		insn_idx = prog->reloc_desc[i].insn_idx;
-		map_idx = prog->reloc_desc[i].map_idx;
+			insn_idx = prog->reloc_desc[i].insn_idx;
+			map_idx = prog->reloc_desc[i].map_idx;
 
-		if (insn_idx >= (int)prog->insns_cnt) {
-			pr_warning("relocation out of range: '%s'\n",
-				   prog->section_name);
-			return -LIBBPF_ERRNO__RELOC;
+			if (insn_idx >= (int)prog->insns_cnt) {
+				pr_warning("relocation out of range: '%s'\n",
+					   prog->section_name);
+				return -LIBBPF_ERRNO__RELOC;
+			}
+			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+			insns[insn_idx].imm = obj->maps[map_idx].fd;
+		} else {
+			err = bpf_program__reloc_text(prog, obj,
+						      &prog->reloc_desc[i]);
+			if (err)
+				return err;
 		}
-		insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
-		insns[insn_idx].imm = obj->maps[map_idx].fd;
 	}
 
 	zfree(&prog->reloc_desc);
@@ -1024,7 +1107,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
 		Elf_Data *data = obj->efile.reloc[i].data;
 		int idx = shdr->sh_info;
 		struct bpf_program *prog;
-		size_t nr_maps = obj->nr_maps;
 
 		if (shdr->sh_type != SHT_REL) {
 			pr_warning("internal error at %d\n", __LINE__);
@@ -1038,11 +1120,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
 			return -LIBBPF_ERRNO__RELOC;
 		}
 
-		err = bpf_program__collect_reloc(prog, nr_maps,
+		err = bpf_program__collect_reloc(prog,
 						 shdr, data,
-						 obj->efile.symbols,
-						 obj->efile.maps_shndx,
-						 obj->maps);
+						 obj);
 		if (err)
 			return err;
 	}
@@ -1195,6 +1275,8 @@ bpf_object__load_progs(struct bpf_object *obj)
 	int err;
 
 	for (i = 0; i < obj->nr_programs; i++) {
+		if (obj->programs[i].idx == obj->efile.text_shndx)
+			continue;
 		err = bpf_program__load(&obj->programs[i],
 					obj->license,
 					obj->kern_version);
@@ -1721,6 +1803,45 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 
+#define BPF_PROG_SEC(string, type) { string, sizeof(string) - 1, type }
+static const struct {
+	const char *sec;
+	size_t len;
+	enum bpf_prog_type prog_type;
+} section_names[] = {
+	BPF_PROG_SEC("socket",		BPF_PROG_TYPE_SOCKET_FILTER),
+	BPF_PROG_SEC("kprobe/",		BPF_PROG_TYPE_KPROBE),
+	BPF_PROG_SEC("kretprobe/",	BPF_PROG_TYPE_KPROBE),
+	BPF_PROG_SEC("tracepoint/",	BPF_PROG_TYPE_TRACEPOINT),
+	BPF_PROG_SEC("xdp",		BPF_PROG_TYPE_XDP),
+	BPF_PROG_SEC("perf_event",	BPF_PROG_TYPE_PERF_EVENT),
+	BPF_PROG_SEC("cgroup/skb",	BPF_PROG_TYPE_CGROUP_SKB),
+	BPF_PROG_SEC("cgroup/sock",	BPF_PROG_TYPE_CGROUP_SOCK),
+	BPF_PROG_SEC("cgroup/dev",	BPF_PROG_TYPE_CGROUP_DEVICE),
+	BPF_PROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS),
+	BPF_PROG_SEC("sk_skb",		BPF_PROG_TYPE_SK_SKB),
+};
+#undef BPF_PROG_SEC
+
+static enum bpf_prog_type bpf_program__guess_type(struct bpf_program *prog)
+{
+	int i;
+
+	if (!prog->section_name)
+		goto err;
+
+	for (i = 0; i < ARRAY_SIZE(section_names); i++)
+		if (strncmp(prog->section_name, section_names[i].sec,
+			    section_names[i].len) == 0)
+			return section_names[i].prog_type;
+
+err:
+	pr_warning("failed to guess program type based on section name %s\n",
+		   prog->section_name);
+
+	return BPF_PROG_TYPE_UNSPEC;
+}
+
 int bpf_map__fd(struct bpf_map *map)
 {
 	return map ? map->fd : -EINVAL;
@@ -1818,7 +1939,7 @@ long libbpf_get_error(const void *ptr)
 int bpf_prog_load(const char *file, enum bpf_prog_type type,
 		  struct bpf_object **pobj, int *prog_fd)
 {
-	struct bpf_program *prog;
+	struct bpf_program *prog, *first_prog = NULL;
 	struct bpf_object *obj;
 	int err;
 
@@ -1826,13 +1947,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
 	if (IS_ERR(obj))
 		return -ENOENT;
 
-	prog = bpf_program__next(NULL, obj);
-	if (!prog) {
+	bpf_object__for_each_program(prog, obj) {
+		/*
+		 * If type is not specified, try to guess it based on
+		 * section name.
+		 */
+		if (type == BPF_PROG_TYPE_UNSPEC) {
+			type = bpf_program__guess_type(prog);
+			if (type == BPF_PROG_TYPE_UNSPEC) {
+				bpf_object__close(obj);
+				return -EINVAL;
+			}
+		}
+
+		bpf_program__set_type(prog, type);
+		if (prog->idx != obj->efile.text_shndx && !first_prog)
+			first_prog = prog;
+	}
+
+	if (!first_prog) {
+		pr_warning("object file doesn't contain bpf program\n");
 		bpf_object__close(obj);
 		return -ENOENT;
 	}
 
-	bpf_program__set_type(prog, type);
 	err = bpf_object__load(obj);
 	if (err) {
 		bpf_object__close(obj);
@@ -1840,6 +1978,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
 	}
 
 	*pobj = obj;
-	*prog_fd = bpf_program__fd(prog);
+	*prog_fd = bpf_program__fd(first_prog);
 	return 0;
 }
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 3fab179b1aba..fcb3ed0be5f8 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -99,5 +99,6 @@ ifneq ($(silent),1)
 
 	QUIET_CLEAN    = @printf '  CLEAN    %s\n' $1;
 	QUIET_INSTALL  = @printf '  INSTALL  %s\n' $1;
+	QUIET_UNINST   = @printf '  UNINST   %s\n' $1;
   endif
 endif
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 541d9d7fad5a..1e09d77f1948 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -3,3 +3,10 @@ test_maps
 test_lru_map
 test_lpm_map
 test_tag
+FEATURE-DUMP.libbpf
+fixdep
+test_align
+test_dev_cgroup
+test_progs
+test_verifier_log
+feature
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 9316e648a880..bf05bc5e36e5 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -11,16 +11,19 @@ ifneq ($(wildcard $(GENHDR)),)
 endif
 
 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf -lrt
+LDLIBS += -lcap -lelf -lrt -lpthread
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
-	test_align test_verifier_log test_dev_cgroup
+	test_align test_verifier_log test_dev_cgroup test_tcpbpf_user
 
 TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
 	test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o     \
-	sockmap_verdict_prog.o dev_cgroup.o
+	sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
+	test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
+	sample_map_ret0.o test_tcpbpf_kern.o
 
-TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh
+TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
+	test_offload.py
 
 include ../lib.mk
 
@@ -49,8 +52,13 @@ else
   CPU ?= generic
 endif
 
+CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
+	      -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
+$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
+
 %.o: %.c
-	$(CLANG) -I. -I./include/uapi -I../../../include/uapi \
-		 -Wno-compare-distinct-pointer-types          \
+	$(CLANG) $(CLANG_FLAGS) \
 		 -O2 -target bpf -emit-llvm -c $< -o - |      \
 	$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index fd9a17fa8a8b..dde2c11d7771 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -71,6 +71,8 @@ static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
 static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
 			     int optlen) =
 	(void *) BPF_FUNC_getsockopt;
+static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
+	(void *) BPF_FUNC_sock_ops_cb_flags_set;
 static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
 	(void *) BPF_FUNC_sk_redirect_map;
 static int (*bpf_sock_map_update)(void *map, void *key, void *value,
@@ -82,7 +84,8 @@ static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
 static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
 				       unsigned int buf_size) =
 	(void *) BPF_FUNC_perf_prog_read_value;
-
+static int (*bpf_override_return)(void *ctx, unsigned long rc) =
+	(void *) BPF_FUNC_override_return;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 52d53ed08769..983dd25d49f4 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -3,3 +3,5 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_NET_CLS_BPF=m
 CONFIG_BPF_EVENTS=y
 CONFIG_TEST_BPF=m
+CONFIG_CGROUP_BPF=y
+CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/sample_map_ret0.c
new file mode 100644
index 000000000000..0756303676ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_map_ret0.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") htab = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(long),
+	.max_entries = 2,
+};
+
+struct bpf_map_def SEC("maps") array = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(long),
+	.max_entries = 2,
+};
+
+/* Sample program which should always load for testing control paths. */
+SEC(".text") int func()
+{
+	__u64 key64 = 0;
+	__u32 key = 0;
+	long *value;
+
+	value = bpf_map_lookup_elem(&htab, &key);
+	if (!value)
+		return 1;
+	value = bpf_map_lookup_elem(&array, &key64);
+	if (!value)
+		return 1;
+
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/sample_ret0.c
new file mode 100644
index 000000000000..fec99750d6ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_ret0.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+
+/* Sample program which should always load for testing control paths. */
+int func()
+{
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
new file mode 100755
index 000000000000..481dccdf140c
--- /dev/null
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+import sys, os, os.path, getopt
+import socket, time
+import subprocess
+import select
+
+def read(sock, n):
+    buf = ''
+    while len(buf) < n:
+        rem = n - len(buf)
+        try: s = sock.recv(rem)
+        except (socket.error), e: return ''
+        buf += s
+    return buf
+
+def send(sock, s):
+    total = len(s)
+    count = 0
+    while count < total:
+        try: n = sock.send(s)
+        except (socket.error), e: n = 0
+        if n == 0:
+            return count;
+        count += n
+    return count
+
+
+serverPort = int(sys.argv[1])
+HostName = socket.gethostname()
+
+# create active socket
+sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+try:
+    sock.connect((HostName, serverPort))
+except socket.error as e:
+    sys.exit(1)
+
+buf = ''
+n = 0
+while n < 1000:
+    buf += '+'
+    n += 1
+
+sock.settimeout(1);
+n = send(sock, buf)
+n = read(sock, 500)
+sys.exit(0)
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
new file mode 100755
index 000000000000..bc454d7d0be2
--- /dev/null
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python2
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+import sys, os, os.path, getopt
+import socket, time
+import subprocess
+import select
+
+def read(sock, n):
+    buf = ''
+    while len(buf) < n:
+        rem = n - len(buf)
+        try: s = sock.recv(rem)
+        except (socket.error), e: return ''
+        buf += s
+    return buf
+
+def send(sock, s):
+    total = len(s)
+    count = 0
+    while count < total:
+        try: n = sock.send(s)
+        except (socket.error), e: n = 0
+        if n == 0:
+            return count;
+        count += n
+    return count
+
+
+SERVER_PORT = 12877
+MAX_PORTS = 2
+
+serverPort = SERVER_PORT
+serverSocket = None
+
+HostName = socket.gethostname()
+
+# create passive socket
+serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+host = socket.gethostname()
+
+try: serverSocket.bind((host, 0))
+except socket.error as msg:
+    print 'bind fails: ', msg
+
+sn = serverSocket.getsockname()
+serverPort = sn[1]
+
+cmdStr = ("./tcp_client.py %d &") % (serverPort)
+os.system(cmdStr)
+
+buf = ''
+n = 0
+while n < 500:
+    buf += '.'
+    n += 1
+
+serverSocket.listen(MAX_PORTS)
+readList = [serverSocket]
+
+while True:
+    readyRead, readyWrite, inError = \
+        select.select(readList, [], [], 2)
+
+    if len(readyRead) > 0:
+        waitCount = 0
+        for sock in readyRead:
+            if sock == serverSocket:
+                (clientSocket, address) = serverSocket.accept()
+                address = str(address[0])
+                readList.append(clientSocket)
+            else:
+                sock.settimeout(1);
+                s = read(sock, 1000)
+                n = send(sock, buf)
+                sock.close()
+                serverSocket.close()
+                sys.exit(0)
+    else:
+        print 'Select timeout!'
+        sys.exit(1)
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 471bbbdb94db..ff8bd7e3e50c 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -64,11 +64,11 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv2"},
-			{2, "R3=inv4"},
-			{3, "R3=inv8"},
-			{4, "R3=inv16"},
-			{5, "R3=inv32"},
+			{1, "R3_w=inv2"},
+			{2, "R3_w=inv4"},
+			{3, "R3_w=inv8"},
+			{4, "R3_w=inv16"},
+			{5, "R3_w=inv32"},
 		},
 	},
 	{
@@ -92,17 +92,17 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv1"},
-			{2, "R3=inv2"},
-			{3, "R3=inv4"},
-			{4, "R3=inv8"},
-			{5, "R3=inv16"},
-			{6, "R3=inv1"},
-			{7, "R4=inv32"},
-			{8, "R4=inv16"},
-			{9, "R4=inv8"},
-			{10, "R4=inv4"},
-			{11, "R4=inv2"},
+			{1, "R3_w=inv1"},
+			{2, "R3_w=inv2"},
+			{3, "R3_w=inv4"},
+			{4, "R3_w=inv8"},
+			{5, "R3_w=inv16"},
+			{6, "R3_w=inv1"},
+			{7, "R4_w=inv32"},
+			{8, "R4_w=inv16"},
+			{9, "R4_w=inv8"},
+			{10, "R4_w=inv4"},
+			{11, "R4_w=inv2"},
 		},
 	},
 	{
@@ -121,12 +121,12 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv4"},
-			{2, "R3=inv8"},
-			{3, "R3=inv10"},
-			{4, "R4=inv8"},
-			{5, "R4=inv12"},
-			{6, "R4=inv14"},
+			{1, "R3_w=inv4"},
+			{2, "R3_w=inv8"},
+			{3, "R3_w=inv10"},
+			{4, "R4_w=inv8"},
+			{5, "R4_w=inv12"},
+			{6, "R4_w=inv14"},
 		},
 	},
 	{
@@ -143,10 +143,10 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv7"},
-			{2, "R3=inv7"},
-			{3, "R3=inv14"},
-			{4, "R3=inv56"},
+			{1, "R3_w=inv7"},
+			{2, "R3_w=inv7"},
+			{3, "R3_w=inv14"},
+			{4, "R3_w=inv56"},
 		},
 	},
 
@@ -185,18 +185,18 @@ static struct bpf_align_test tests[] = {
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.matches = {
 			{7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
-			{7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
-			{9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-			{10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-			{11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+			{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+			{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
 			{18, "R3=pkt_end(id=0,off=0,imm=0)"},
-			{18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
-			{20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
-			{21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-			{22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-			{23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+			{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
+			{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+			{21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+			{22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
 		},
 	},
 	{
@@ -217,16 +217,16 @@ static struct bpf_align_test tests[] = {
 		},
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.matches = {
-			{7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
-			{12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-			{14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-			{16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+			{12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+			{16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
 		},
 	},
 	{
@@ -257,14 +257,14 @@ static struct bpf_align_test tests[] = {
 		},
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.matches = {
-			{4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
-			{5, "R5=pkt(id=0,off=14,r=0,imm=0)"},
-			{6, "R4=pkt(id=0,off=14,r=0,imm=0)"},
+			{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
+			{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
+			{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
 			{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
 			{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
-			{10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
-			{15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+			{15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
 		},
 	},
 	{
@@ -320,11 +320,11 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Offset is added to packet pointer R5, resulting in
 			 * known fixed offset, and variable offset from R6.
 			 */
-			{11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* At the time the word size load is performed from R5,
 			 * it's total offset is NET_IP_ALIGN + reg->off (0) +
 			 * reg->aux_off (14) which is 16.  Then the variable
@@ -336,11 +336,11 @@ static struct bpf_align_test tests[] = {
 			/* Variable offset is added to R5 packet pointer,
 			 * resulting in auxiliary alignment of 4.
 			 */
-			{18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Constant offset is added to R5, resulting in
 			 * reg->off of 14.
 			 */
-			{19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off
 			 * (14) which is 16.  Then the variable offset is 4-byte
@@ -352,18 +352,18 @@ static struct bpf_align_test tests[] = {
 			/* Constant offset is added to R5 packet pointer,
 			 * resulting in reg->off value of 14.
 			 */
-			{26, "R5=pkt(id=0,off=14,r=8"},
+			{26, "R5_w=pkt(id=0,off=14,r=8"},
 			/* Variable offset is added to R5, resulting in a
 			 * variable offset of (4n).
 			 */
-			{27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Constant is added to R5 again, setting reg->off to 18. */
-			{28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* And once more we add a variable; resulting var_off
 			 * is still (4n), fixed offset is not changed.
 			 * Also, we create a new reg->id.
 			 */
-			{29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
+			{29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
 			 * which is 20.  Then the variable offset is (4n), so
@@ -410,11 +410,11 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Adding 14 makes R6 be (4n+2) */
-			{9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+			{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			/* Packet pointer has (4n+2) offset */
-			{11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+			{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -426,11 +426,11 @@ static struct bpf_align_test tests[] = {
 			/* Newly read value in R6 was shifted left by 2, so has
 			 * known alignment of 4.
 			 */
-			{18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Added (4n) to packet pointer's (4n+2) var_off, giving
 			 * another (4n+2).
 			 */
-			{19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+			{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
 			{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -446,11 +446,9 @@ static struct bpf_align_test tests[] = {
 		.insns = {
 			PREP_PKT_POINTERS,
 			BPF_MOV64_IMM(BPF_REG_0, 0),
-			/* ptr & const => unknown & const */
-			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
-			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40),
-			/* ptr << const => unknown << const */
-			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+			/* (ptr - ptr) << 2 */
+			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
 			/* We have a (4n) value.  Let's make a packet offset
 			 * out of it.  First add 14, to make it a (4n+2)
@@ -473,8 +471,26 @@ static struct bpf_align_test tests[] = {
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.result = REJECT,
 		.matches = {
-			{4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
-			/* R5 bitwise operator &= on pointer prohibited */
+			{4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+			/* (ptr - ptr) << 2 == unknown, (4n) */
+			{6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
+			/* (4n) + 14 == (4n+2).  We blow our bounds, because
+			 * the add could overflow.
+			 */
+			{7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
+			/* Checked s>=0 */
+			{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+			/* packet pointer + nonnegative (4n+2) */
+			{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+			{13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
+			 * We checked the bounds, but it might have been able
+			 * to overflow if the packet pointer started in the
+			 * upper half of the address space.
+			 * So we did not get a 'range' on R6, and the access
+			 * attempt will fail.
+			 */
+			{15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
 		}
 	},
 	{
@@ -510,11 +526,11 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Adding 14 makes R6 be (4n+2) */
-			{10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+			{10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			/* New unknown value in R7 is (4n) */
-			{11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Subtracting it from R6 blows our unsigned bounds */
 			{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
 			/* Checked s>= 0 */
@@ -563,15 +579,15 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+			{10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
 			/* Adding 14 makes R6 be (4n+2) */
-			{11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+			{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
 			/* Subtracting from packet pointer overflows ubounds */
-			{13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
+			{13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
 			/* New unknown value in R7 is (4n), >= 76 */
-			{15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+			{15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
 			/* Adding it to packet pointer gives nice bounds again */
-			{16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
+			{16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
 			 * which is 2.  Then the variable offset is (4n+2), so
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 02c85d6c89b0..3489cc283433 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -10,6 +10,8 @@
 #include <string.h>
 #include <errno.h>
 #include <assert.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include <linux/bpf.h>
 #include <bpf/bpf.h>
@@ -19,19 +21,23 @@
 
 #define DEV_CGROUP_PROG "./dev_cgroup.o"
 
-#define TEST_CGROUP "test-bpf-based-device-cgroup/"
+#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
 
 int main(int argc, char **argv)
 {
+	struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
 	struct bpf_object *obj;
 	int error = EXIT_FAILURE;
 	int prog_fd, cgroup_fd;
 	__u32 prog_cnt;
 
+	if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+		perror("Unable to lift memlock rlimit");
+
 	if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
 			  &obj, &prog_fd)) {
 		printf("Failed to load DEV_CGROUP program\n");
-		goto err;
+		goto out;
 	}
 
 	if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
 err:
 	cleanup_cgroup_environment();
 
+out:
 	return error;
 }
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/test_l4lb_noinline.c
new file mode 100644
index 000000000000..ba44a14e6dc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_l4lb_noinline.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+#include "test_iptunnel_common.h"
+#include "bpf_endian.h"
+
+int _version SEC("version") = 1;
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+	return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c)			\
+{						\
+	a -= c;  a ^= rol32(c, 4);  c += b;	\
+	b -= a;  b ^= rol32(a, 6);  a += c;	\
+	c -= b;  c ^= rol32(b, 8);  b += a;	\
+	a -= c;  a ^= rol32(c, 16); c += b;	\
+	b -= a;  b ^= rol32(a, 19); a += c;	\
+	c -= b;  c ^= rol32(b, 4);  b += a;	\
+}
+
+#define __jhash_final(a, b, c)			\
+{						\
+	c ^= b; c -= rol32(b, 14);		\
+	a ^= c; a -= rol32(c, 11);		\
+	b ^= a; b -= rol32(a, 25);		\
+	c ^= b; c -= rol32(b, 16);		\
+	a ^= c; a -= rol32(c, 4);		\
+	b ^= a; b -= rol32(a, 14);		\
+	c ^= b; c -= rol32(b, 24);		\
+}
+
+#define JHASH_INITVAL		0xdeadbeef
+
+typedef unsigned int u32;
+
+static u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c;
+	const unsigned char *k = key;
+
+	a = b = c = JHASH_INITVAL + length + initval;
+
+	while (length > 12) {
+		a += *(u32 *)(k);
+		b += *(u32 *)(k + 4);
+		c += *(u32 *)(k + 8);
+		__jhash_mix(a, b, c);
+		length -= 12;
+		k += 12;
+	}
+	switch (length) {
+	case 12: c += (u32)k[11]<<24;
+	case 11: c += (u32)k[10]<<16;
+	case 10: c += (u32)k[9]<<8;
+	case 9:  c += k[8];
+	case 8:  b += (u32)k[7]<<24;
+	case 7:  b += (u32)k[6]<<16;
+	case 6:  b += (u32)k[5]<<8;
+	case 5:  b += k[4];
+	case 4:  a += (u32)k[3]<<24;
+	case 3:  a += (u32)k[2]<<16;
+	case 2:  a += (u32)k[1]<<8;
+	case 1:  a += k[0];
+		 __jhash_final(a, b, c);
+	case 0: /* Nothing left to add */
+		break;
+	}
+
+	return c;
+}
+
+static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+	a += initval;
+	b += initval;
+	c += initval;
+	__jhash_final(a, b, c);
+	return c;
+}
+
+static u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+	return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+	union {
+		__be32 src;
+		__be32 srcv6[4];
+	};
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	union {
+		__u32 ports;
+		__u16 port16[2];
+	};
+	__u8 proto;
+	__u8 flags;
+};
+
+struct ctl_value {
+	union {
+		__u64 value;
+		__u32 ifindex;
+		__u8 mac[6];
+	};
+};
+
+struct vip_meta {
+	__u32 flags;
+	__u32 vip_num;
+};
+
+struct real_definition {
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	__u8 flags;
+};
+
+struct vip_stats {
+	__u64 bytes;
+	__u64 pkts;
+};
+
+struct eth_hdr {
+	unsigned char eth_dest[ETH_ALEN];
+	unsigned char eth_source[ETH_ALEN];
+	unsigned short eth_proto;
+};
+
+struct bpf_map_def SEC("maps") vip_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(struct vip),
+	.value_size = sizeof(struct vip_meta),
+	.max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ch_rings = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = CH_RINGS_SIZE,
+};
+
+struct bpf_map_def SEC("maps") reals = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct real_definition),
+	.max_entries = MAX_REALS,
+};
+
+struct bpf_map_def SEC("maps") stats = {
+	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct vip_stats),
+	.max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ctl_array = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct ctl_value),
+	.max_entries = CTL_MAP_SIZE,
+};
+
+static __u32 get_packet_hash(struct packet_description *pckt,
+			     bool ipv6)
+{
+	if (ipv6)
+		return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+				    pckt->ports, CH_RINGS_SIZE);
+	else
+		return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static bool get_packet_dst(struct real_definition **real,
+			   struct packet_description *pckt,
+			   struct vip_meta *vip_info,
+			   bool is_ipv6)
+{
+	__u32 hash = get_packet_hash(pckt, is_ipv6);
+	__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
+	__u32 *real_pos;
+
+	if (hash != 0x358459b7 /* jhash of ipv4 packet */  &&
+	    hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+		return 0;
+
+	real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+	if (!real_pos)
+		return false;
+	key = *real_pos;
+	*real = bpf_map_lookup_elem(&reals, &key);
+	if (!(*real))
+		return false;
+	return true;
+}
+
+static int parse_icmpv6(void *data, void *data_end, __u64 off,
+			struct packet_description *pckt)
+{
+	struct icmp6hdr *icmp_hdr;
+	struct ipv6hdr *ip6h;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return TC_ACT_SHOT;
+	if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+		return TC_ACT_OK;
+	off += sizeof(struct icmp6hdr);
+	ip6h = data + off;
+	if (ip6h + 1 > data_end)
+		return TC_ACT_SHOT;
+	pckt->proto = ip6h->nexthdr;
+	pckt->flags |= F_ICMP;
+	memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+	memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+	return TC_ACT_UNSPEC;
+}
+
+static int parse_icmp(void *data, void *data_end, __u64 off,
+		      struct packet_description *pckt)
+{
+	struct icmphdr *icmp_hdr;
+	struct iphdr *iph;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return TC_ACT_SHOT;
+	if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+	    icmp_hdr->code != ICMP_FRAG_NEEDED)
+		return TC_ACT_OK;
+	off += sizeof(struct icmphdr);
+	iph = data + off;
+	if (iph + 1 > data_end)
+		return TC_ACT_SHOT;
+	if (iph->ihl != 5)
+		return TC_ACT_SHOT;
+	pckt->proto = iph->protocol;
+	pckt->flags |= F_ICMP;
+	pckt->src = iph->daddr;
+	pckt->dst = iph->saddr;
+	return TC_ACT_UNSPEC;
+}
+
+static bool parse_udp(void *data, __u64 off, void *data_end,
+		      struct packet_description *pckt)
+{
+	struct udphdr *udp;
+	udp = data + off;
+
+	if (udp + 1 > data_end)
+		return false;
+
+	if (!(pckt->flags & F_ICMP)) {
+		pckt->port16[0] = udp->source;
+		pckt->port16[1] = udp->dest;
+	} else {
+		pckt->port16[0] = udp->dest;
+		pckt->port16[1] = udp->source;
+	}
+	return true;
+}
+
+static bool parse_tcp(void *data, __u64 off, void *data_end,
+		      struct packet_description *pckt)
+{
+	struct tcphdr *tcp;
+
+	tcp = data + off;
+	if (tcp + 1 > data_end)
+		return false;
+
+	if (tcp->syn)
+		pckt->flags |= F_SYN_SET;
+
+	if (!(pckt->flags & F_ICMP)) {
+		pckt->port16[0] = tcp->source;
+		pckt->port16[1] = tcp->dest;
+	} else {
+		pckt->port16[0] = tcp->dest;
+		pckt->port16[1] = tcp->source;
+	}
+	return true;
+}
+
+static int process_packet(void *data, __u64 off, void *data_end,
+			  bool is_ipv6, struct __sk_buff *skb)
+{
+	void *pkt_start = (void *)(long)skb->data;
+	struct packet_description pckt = {};
+	struct eth_hdr *eth = pkt_start;
+	struct bpf_tunnel_key tkey = {};
+	struct vip_stats *data_stats;
+	struct real_definition *dst;
+	struct vip_meta *vip_info;
+	struct ctl_value *cval;
+	__u32 v4_intf_pos = 1;
+	__u32 v6_intf_pos = 2;
+	struct ipv6hdr *ip6h;
+	struct vip vip = {};
+	struct iphdr *iph;
+	int tun_flag = 0;
+	__u16 pkt_bytes;
+	__u64 iph_len;
+	__u32 ifindex;
+	__u8 protocol;
+	__u32 vip_num;
+	int action;
+
+	tkey.tunnel_ttl = 64;
+	if (is_ipv6) {
+		ip6h = data + off;
+		if (ip6h + 1 > data_end)
+			return TC_ACT_SHOT;
+
+		iph_len = sizeof(struct ipv6hdr);
+		protocol = ip6h->nexthdr;
+		pckt.proto = protocol;
+		pkt_bytes = bpf_ntohs(ip6h->payload_len);
+		off += iph_len;
+		if (protocol == IPPROTO_FRAGMENT) {
+			return TC_ACT_SHOT;
+		} else if (protocol == IPPROTO_ICMPV6) {
+			action = parse_icmpv6(data, data_end, off, &pckt);
+			if (action >= 0)
+				return action;
+			off += IPV6_PLUS_ICMP_HDR;
+		} else {
+			memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+			memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+		}
+	} else {
+		iph = data + off;
+		if (iph + 1 > data_end)
+			return TC_ACT_SHOT;
+		if (iph->ihl != 5)
+			return TC_ACT_SHOT;
+
+		protocol = iph->protocol;
+		pckt.proto = protocol;
+		pkt_bytes = bpf_ntohs(iph->tot_len);
+		off += IPV4_HDR_LEN_NO_OPT;
+
+		if (iph->frag_off & PCKT_FRAGMENTED)
+			return TC_ACT_SHOT;
+		if (protocol == IPPROTO_ICMP) {
+			action = parse_icmp(data, data_end, off, &pckt);
+			if (action >= 0)
+				return action;
+			off += IPV4_PLUS_ICMP_HDR;
+		} else {
+			pckt.src = iph->saddr;
+			pckt.dst = iph->daddr;
+		}
+	}
+	protocol = pckt.proto;
+
+	if (protocol == IPPROTO_TCP) {
+		if (!parse_tcp(data, off, data_end, &pckt))
+			return TC_ACT_SHOT;
+	} else if (protocol == IPPROTO_UDP) {
+		if (!parse_udp(data, off, data_end, &pckt))
+			return TC_ACT_SHOT;
+	} else {
+		return TC_ACT_SHOT;
+	}
+
+	if (is_ipv6)
+		memcpy(vip.daddr.v6, pckt.dstv6, 16);
+	else
+		vip.daddr.v4 = pckt.dst;
+
+	vip.dport = pckt.port16[1];
+	vip.protocol = pckt.proto;
+	vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+	if (!vip_info) {
+		vip.dport = 0;
+		vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+		if (!vip_info)
+			return TC_ACT_SHOT;
+		pckt.port16[1] = 0;
+	}
+
+	if (vip_info->flags & F_HASH_NO_SRC_PORT)
+		pckt.port16[0] = 0;
+
+	if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+		return TC_ACT_SHOT;
+
+	if (dst->flags & F_IPV6) {
+		cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+		if (!cval)
+			return TC_ACT_SHOT;
+		ifindex = cval->ifindex;
+		memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+		tun_flag = BPF_F_TUNINFO_IPV6;
+	} else {
+		cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+		if (!cval)
+			return TC_ACT_SHOT;
+		ifindex = cval->ifindex;
+		tkey.remote_ipv4 = dst->dst;
+	}
+	vip_num = vip_info->vip_num;
+	data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+	if (!data_stats)
+		return TC_ACT_SHOT;
+	data_stats->pkts++;
+	data_stats->bytes += pkt_bytes;
+	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+	*(u32 *)eth->eth_dest = tkey.remote_ipv4;
+	return bpf_redirect(ifindex, 0);
+}
+
+SEC("l4lb-demo")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+	void *data_end = (void *)(long)ctx->data_end;
+	void *data = (void *)(long)ctx->data;
+	struct eth_hdr *eth = data;
+	__u32 eth_proto;
+	__u32 nh_off;
+
+	nh_off = sizeof(struct eth_hdr);
+	if (data + nh_off > data_end)
+		return TC_ACT_SHOT;
+	eth_proto = eth->eth_proto;
+	if (eth_proto == bpf_htons(ETH_P_IP))
+		return process_packet(data, nh_off, data_end, false, ctx);
+	else if (eth_proto == bpf_htons(ETH_P_IPV6))
+		return process_packet(data, nh_off, data_end, true, ctx);
+	else
+		return TC_ACT_SHOT;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index f61480641b6e..2be87e9ee28d 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -14,6 +14,7 @@
 #include <errno.h>
 #include <inttypes.h>
 #include <linux/bpf.h>
+#include <pthread.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -521,6 +522,218 @@ static void test_lpm_delete(void)
 	close(map_fd);
 }
 
+static void test_lpm_get_next_key(void)
+{
+	struct bpf_lpm_trie_key *key_p, *next_key_p;
+	size_t key_size;
+	__u32 value = 0;
+	int map_fd;
+
+	key_size = sizeof(*key_p) + sizeof(__u32);
+	key_p = alloca(key_size);
+	next_key_p = alloca(key_size);
+
+	map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, sizeof(value),
+				100, BPF_F_NO_PREALLOC);
+	assert(map_fd >= 0);
+
+	/* empty tree. get_next_key should return ENOENT */
+	assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
+	       errno == ENOENT);
+
+	/* get and verify the first key, get the second one should fail. */
+	key_p->prefixlen = 16;
+	inet_pton(AF_INET, "192.168.0.0", key_p->data);
+	assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+	memset(key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+	assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
+	       key_p->data[1] == 168);
+
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+	       errno == ENOENT);
+
+	/* no exact matching key should get the first one in post order. */
+	key_p->prefixlen = 8;
+	assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+	assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
+	       key_p->data[1] == 168);
+
+	/* add one more element (total two) */
+	key_p->prefixlen = 24;
+	inet_pton(AF_INET, "192.168.0.0", key_p->data);
+	assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+	memset(key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+	assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+	       key_p->data[1] == 168 && key_p->data[2] == 0);
+
+	memset(next_key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168);
+
+	memcpy(key_p, next_key_p, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+	       errno == ENOENT);
+
+	/* Add one more element (total three) */
+	key_p->prefixlen = 24;
+	inet_pton(AF_INET, "192.168.128.0", key_p->data);
+	assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+	memset(key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+	assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+	       key_p->data[1] == 168 && key_p->data[2] == 0);
+
+	memset(next_key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+	memcpy(key_p, next_key_p, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168);
+
+	memcpy(key_p, next_key_p, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+	       errno == ENOENT);
+
+	/* Add one more element (total four) */
+	key_p->prefixlen = 24;
+	inet_pton(AF_INET, "192.168.1.0", key_p->data);
+	assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+	memset(key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+	assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+	       key_p->data[1] == 168 && key_p->data[2] == 0);
+
+	memset(next_key_p, 0, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
+
+	memcpy(key_p, next_key_p, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+	memcpy(key_p, next_key_p, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168);
+
+	memcpy(key_p, next_key_p, key_size);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+	       errno == ENOENT);
+
+	/* no exact matching key should return the first one in post order */
+	key_p->prefixlen = 22;
+	inet_pton(AF_INET, "192.168.1.0", key_p->data);
+	assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+	assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+	       next_key_p->data[1] == 168 && next_key_p->data[2] == 0);
+
+	close(map_fd);
+}
+
+#define MAX_TEST_KEYS	4
+struct lpm_mt_test_info {
+	int cmd; /* 0: update, 1: delete, 2: lookup, 3: get_next_key */
+	int iter;
+	int map_fd;
+	struct {
+		__u32 prefixlen;
+		__u32 data;
+	} key[MAX_TEST_KEYS];
+};
+
+static void *lpm_test_command(void *arg)
+{
+	int i, j, ret, iter, key_size;
+	struct lpm_mt_test_info *info = arg;
+	struct bpf_lpm_trie_key *key_p;
+
+	key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
+	key_p = alloca(key_size);
+	for (iter = 0; iter < info->iter; iter++)
+		for (i = 0; i < MAX_TEST_KEYS; i++) {
+			/* first half of iterations in forward order,
+			 * and second half in backward order.
+			 */
+			j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1;
+			key_p->prefixlen = info->key[j].prefixlen;
+			memcpy(key_p->data, &info->key[j].data, sizeof(__u32));
+			if (info->cmd == 0) {
+				__u32 value = j;
+				/* update must succeed */
+				assert(bpf_map_update_elem(info->map_fd, key_p, &value, 0) == 0);
+			} else if (info->cmd == 1) {
+				ret = bpf_map_delete_elem(info->map_fd, key_p);
+				assert(ret == 0 || errno == ENOENT);
+			} else if (info->cmd == 2) {
+				__u32 value;
+				ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
+				assert(ret == 0 || errno == ENOENT);
+			} else {
+				struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
+				ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
+				assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
+			}
+		}
+
+	// Pass successful exit info back to the main thread
+	pthread_exit((void *)info);
+}
+
+static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd)
+{
+	info->iter = 2000;
+	info->map_fd = map_fd;
+	info->key[0].prefixlen = 16;
+	inet_pton(AF_INET, "192.168.0.0", &info->key[0].data);
+	info->key[1].prefixlen = 24;
+	inet_pton(AF_INET, "192.168.0.0", &info->key[1].data);
+	info->key[2].prefixlen = 24;
+	inet_pton(AF_INET, "192.168.128.0", &info->key[2].data);
+	info->key[3].prefixlen = 24;
+	inet_pton(AF_INET, "192.168.1.0", &info->key[3].data);
+}
+
+static void test_lpm_multi_thread(void)
+{
+	struct lpm_mt_test_info info[4];
+	size_t key_size, value_size;
+	pthread_t thread_id[4];
+	int i, map_fd;
+	void *ret;
+
+	/* create a trie */
+	value_size = sizeof(__u32);
+	key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
+	map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, value_size,
+				100, BPF_F_NO_PREALLOC);
+
+	/* create 4 threads to test update, delete, lookup and get_next_key */
+	setup_lpm_mt_test_info(&info[0], map_fd);
+	for (i = 0; i < 4; i++) {
+		if (i != 0)
+			memcpy(&info[i], &info[0], sizeof(info[i]));
+		info[i].cmd = i;
+		assert(pthread_create(&thread_id[i], NULL, &lpm_test_command, &info[i]) == 0);
+	}
+
+	for (i = 0; i < 4; i++)
+		assert(pthread_join(thread_id[i], &ret) == 0 && ret == (void *)&info[i]);
+
+	close(map_fd);
+}
+
 int main(void)
 {
 	struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
@@ -545,6 +758,10 @@ int main(void)
 
 	test_lpm_delete();
 
+	test_lpm_get_next_key();
+
+	test_lpm_multi_thread();
+
 	printf("test_lpm: OK\n");
 	return 0;
 }
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 040356ecc862..436c4c72414f 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -242,7 +242,7 @@ static void test_hashmap_percpu(int task, void *data)
 
 static void test_hashmap_walk(int task, void *data)
 {
-	int fd, i, max_entries = 100000;
+	int fd, i, max_entries = 1000;
 	long long key, value, next_key;
 	bool next_key_valid = true;
 
@@ -463,7 +463,7 @@ static void test_devmap(int task, void *data)
 #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
 static void test_sockmap(int tasks, void *data)
 {
-	int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
+	int one = 1, map_fd_rx = 0, map_fd_tx = 0, map_fd_break, s, sc, rc;
 	struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
 	int ports[] = {50200, 50201, 50202, 50204};
 	int err, i, fd, udp, sfd[6] = {0xdeadbeef};
@@ -868,9 +868,12 @@ static void test_sockmap(int tasks, void *data)
 		goto out_sockmap;
 	}
 
-	/* Test map close sockets */
-	for (i = 0; i < 6; i++)
+	/* Test map close sockets and empty maps */
+	for (i = 0; i < 6; i++) {
+		bpf_map_delete_elem(map_fd_tx, &i);
+		bpf_map_delete_elem(map_fd_rx, &i);
 		close(sfd[i]);
+	}
 	close(fd);
 	close(map_fd_rx);
 	bpf_object__close(obj);
@@ -881,8 +884,13 @@ out:
 	printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno));
 	exit(1);
 out_sockmap:
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++) {
+		if (map_fd_tx)
+			bpf_map_delete_elem(map_fd_tx, &i);
+		if (map_fd_rx)
+			bpf_map_delete_elem(map_fd_rx, &i);
 		close(sfd[i]);
+	}
 	close(fd);
 	exit(1);
 }
@@ -931,8 +939,12 @@ static void test_map_large(void)
 	close(fd);
 }
 
-static void run_parallel(int tasks, void (*fn)(int task, void *data),
-			 void *data)
+#define run_parallel(N, FN, DATA) \
+	printf("Fork %d tasks to '" #FN "'\n", N); \
+	__run_parallel(N, FN, DATA)
+
+static void __run_parallel(int tasks, void (*fn)(int task, void *data),
+			   void *data)
 {
 	pid_t pid[tasks];
 	int i;
@@ -972,7 +984,7 @@ static void test_map_stress(void)
 #define DO_UPDATE 1
 #define DO_DELETE 0
 
-static void do_work(int fn, void *data)
+static void test_update_delete(int fn, void *data)
 {
 	int do_update = ((int *)data)[1];
 	int fd = ((int *)data)[0];
@@ -1012,7 +1024,7 @@ static void test_map_parallel(void)
 	 */
 	data[0] = fd;
 	data[1] = DO_UPDATE;
-	run_parallel(TASKS, do_work, data);
+	run_parallel(TASKS, test_update_delete, data);
 
 	/* Check that key=0 is already there. */
 	assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
@@ -1035,7 +1047,7 @@ static void test_map_parallel(void)
 
 	/* Now let's delete all elemenets in parallel. */
 	data[1] = DO_DELETE;
-	run_parallel(TASKS, do_work, data);
+	run_parallel(TASKS, test_update_delete, data);
 
 	/* Nothing should be left. */
 	key = -1;
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
new file mode 100755
index 000000000000..e78aad0a68bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2017 Netronome Systems, Inc.
+#
+# This software is licensed under the GNU General License Version 2,
+# June 1991 as shown in the file COPYING in the top-level directory of this
+# source tree.
+#
+# THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+# OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+# THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+from datetime import datetime
+import argparse
+import json
+import os
+import pprint
+import random
+import string
+import struct
+import subprocess
+import time
+
+logfile = None
+log_level = 1
+skip_extack = False
+bpf_test_dir = os.path.dirname(os.path.realpath(__file__))
+pp = pprint.PrettyPrinter()
+devs = [] # devices we created for clean up
+files = [] # files to be removed
+netns = [] # net namespaces to be removed
+
+def log_get_sec(level=0):
+    return "*" * (log_level + level)
+
+def log_level_inc(add=1):
+    global log_level
+    log_level += add
+
+def log_level_dec(sub=1):
+    global log_level
+    log_level -= sub
+
+def log_level_set(level):
+    global log_level
+    log_level = level
+
+def log(header, data, level=None):
+    """
+    Output to an optional log.
+    """
+    if logfile is None:
+        return
+    if level is not None:
+        log_level_set(level)
+
+    if not isinstance(data, str):
+        data = pp.pformat(data)
+
+    if len(header):
+        logfile.write("\n" + log_get_sec() + " ")
+        logfile.write(header)
+    if len(header) and len(data.strip()):
+        logfile.write("\n")
+    logfile.write(data)
+
+def skip(cond, msg):
+    if not cond:
+        return
+    print("SKIP: " + msg)
+    log("SKIP: " + msg, "", level=1)
+    os.sys.exit(0)
+
+def fail(cond, msg):
+    if not cond:
+        return
+    print("FAIL: " + msg)
+    log("FAIL: " + msg, "", level=1)
+    os.sys.exit(1)
+
+def start_test(msg):
+    log(msg, "", level=1)
+    log_level_inc()
+    print(msg)
+
+def cmd(cmd, shell=True, include_stderr=False, background=False, fail=True):
+    """
+    Run a command in subprocess and return tuple of (retval, stdout);
+    optionally return stderr as well as third value.
+    """
+    proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    if background:
+        msg = "%s START: %s" % (log_get_sec(1),
+                                datetime.now().strftime("%H:%M:%S.%f"))
+        log("BKG " + proc.args, msg)
+        return proc
+
+    return cmd_result(proc, include_stderr=include_stderr, fail=fail)
+
+def cmd_result(proc, include_stderr=False, fail=False):
+    stdout, stderr = proc.communicate()
+    stdout = stdout.decode("utf-8")
+    stderr = stderr.decode("utf-8")
+    proc.stdout.close()
+    proc.stderr.close()
+
+    stderr = "\n" + stderr
+    if stderr[-1] == "\n":
+        stderr = stderr[:-1]
+
+    sec = log_get_sec(1)
+    log("CMD " + proc.args,
+        "RETCODE: %d\n%s STDOUT:\n%s%s STDERR:%s\n%s END: %s" %
+        (proc.returncode, sec, stdout, sec, stderr,
+         sec, datetime.now().strftime("%H:%M:%S.%f")))
+
+    if proc.returncode != 0 and fail:
+        if len(stderr) > 0 and stderr[-1] == "\n":
+            stderr = stderr[:-1]
+        raise Exception("Command failed: %s\n%s" % (proc.args, stderr))
+
+    if include_stderr:
+        return proc.returncode, stdout, stderr
+    else:
+        return proc.returncode, stdout
+
+def rm(f):
+    cmd("rm -f %s" % (f))
+    if f in files:
+        files.remove(f)
+
+def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False):
+    params = ""
+    if JSON:
+        params += "%s " % (flags["json"])
+
+    if ns != "":
+        ns = "ip netns exec %s " % (ns)
+
+    if include_stderr:
+        ret, stdout, stderr = cmd(ns + name + " " + params + args,
+                                  fail=fail, include_stderr=True)
+    else:
+        ret, stdout = cmd(ns + name + " " + params + args,
+                          fail=fail, include_stderr=False)
+
+    if JSON and len(stdout.strip()) != 0:
+        out = json.loads(stdout)
+    else:
+        out = stdout
+
+    if include_stderr:
+        return ret, out, stderr
+    else:
+        return ret, out
+
+def bpftool(args, JSON=True, ns="", fail=True):
+    return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+
+def bpftool_prog_list(expected=None, ns=""):
+    _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+    if expected is not None:
+        if len(progs) != expected:
+            fail(True, "%d BPF programs loaded, expected %d" %
+                 (len(progs), expected))
+    return progs
+
+def bpftool_map_list(expected=None, ns=""):
+    _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+    if expected is not None:
+        if len(maps) != expected:
+            fail(True, "%d BPF maps loaded, expected %d" %
+                 (len(maps), expected))
+    return maps
+
+def bpftool_prog_list_wait(expected=0, n_retry=20):
+    for i in range(n_retry):
+        nprogs = len(bpftool_prog_list())
+        if nprogs == expected:
+            return
+        time.sleep(0.05)
+    raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs))
+
+def bpftool_map_list_wait(expected=0, n_retry=20):
+    for i in range(n_retry):
+        nmaps = len(bpftool_map_list())
+        if nmaps == expected:
+            return
+        time.sleep(0.05)
+    raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps))
+
+def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False):
+    if force:
+        args = "-force " + args
+    return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns,
+                fail=fail, include_stderr=include_stderr)
+
+def tc(args, JSON=True, ns="", fail=True, include_stderr=False):
+    return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns,
+                fail=fail, include_stderr=include_stderr)
+
+def ethtool(dev, opt, args, fail=True):
+    return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail)
+
+def bpf_obj(name, sec=".text", path=bpf_test_dir,):
+    return "obj %s sec %s" % (os.path.join(path, name), sec)
+
+def bpf_pinned(name):
+    return "pinned %s" % (name)
+
+def bpf_bytecode(bytecode):
+    return "bytecode \"%s\"" % (bytecode)
+
+def mknetns(n_retry=10):
+    for i in range(n_retry):
+        name = ''.join([random.choice(string.ascii_letters) for i in range(8)])
+        ret, _ = ip("netns add %s" % (name), fail=False)
+        if ret == 0:
+            netns.append(name)
+            return name
+    return None
+
+def int2str(fmt, val):
+    ret = []
+    for b in struct.pack(fmt, val):
+        ret.append(int(b))
+    return " ".join(map(lambda x: str(x), ret))
+
+def str2int(strtab):
+    inttab = []
+    for i in strtab:
+        inttab.append(int(i, 16))
+    ba = bytearray(inttab)
+    if len(strtab) == 4:
+        fmt = "I"
+    elif len(strtab) == 8:
+        fmt = "Q"
+    else:
+        raise Exception("String array of len %d can't be unpacked to an int" %
+                        (len(strtab)))
+    return struct.unpack(fmt, ba)[0]
+
+class DebugfsDir:
+    """
+    Class for accessing DebugFS directories as a dictionary.
+    """
+
+    def __init__(self, path):
+        self.path = path
+        self._dict = self._debugfs_dir_read(path)
+
+    def __len__(self):
+        return len(self._dict.keys())
+
+    def __getitem__(self, key):
+        if type(key) is int:
+            key = list(self._dict.keys())[key]
+        return self._dict[key]
+
+    def __setitem__(self, key, value):
+        log("DebugFS set %s = %s" % (key, value), "")
+        log_level_inc()
+
+        cmd("echo '%s' > %s/%s" % (value, self.path, key))
+        log_level_dec()
+
+        _, out = cmd('cat %s/%s' % (self.path, key))
+        self._dict[key] = out.strip()
+
+    def _debugfs_dir_read(self, path):
+        dfs = {}
+
+        log("DebugFS state for %s" % (path), "")
+        log_level_inc(add=2)
+
+        _, out = cmd('ls ' + path)
+        for f in out.split():
+            p = os.path.join(path, f)
+            if os.path.isfile(p):
+                _, out = cmd('cat %s/%s' % (path, f))
+                dfs[f] = out.strip()
+            elif os.path.isdir(p):
+                dfs[f] = DebugfsDir(p)
+            else:
+                raise Exception("%s is neither file nor directory" % (p))
+
+        log_level_dec()
+        log("DebugFS state", dfs)
+        log_level_dec()
+
+        return dfs
+
+class NetdevSim:
+    """
+    Class for netdevsim netdevice and its attributes.
+    """
+
+    def __init__(self):
+        self.dev = self._netdevsim_create()
+        devs.append(self)
+
+        self.ns = ""
+
+        self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
+        self.dfs_refresh()
+
+    def __getitem__(self, key):
+        return self.dev[key]
+
+    def _netdevsim_create(self):
+        _, old  = ip("link show")
+        ip("link add sim%d type netdevsim")
+        _, new  = ip("link show")
+
+        for dev in new:
+            f = filter(lambda x: x["ifname"] == dev["ifname"], old)
+            if len(list(f)) == 0:
+                return dev
+
+        raise Exception("failed to create netdevsim device")
+
+    def remove(self):
+        devs.remove(self)
+        ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns)
+
+    def dfs_refresh(self):
+        self.dfs = DebugfsDir(self.dfs_dir)
+        return self.dfs
+
+    def dfs_num_bound_progs(self):
+        path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+        _, progs = cmd('ls %s' % (path))
+        return len(progs.split())
+
+    def dfs_get_bound_progs(self, expected):
+        progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+        if expected is not None:
+            if len(progs) != expected:
+                fail(True, "%d BPF programs bound, expected %d" %
+                     (len(progs), expected))
+        return progs
+
+    def wait_for_flush(self, bound=0, total=0, n_retry=20):
+        for i in range(n_retry):
+            nbound = self.dfs_num_bound_progs()
+            nprogs = len(bpftool_prog_list())
+            if nbound == bound and nprogs == total:
+                return
+            time.sleep(0.05)
+        raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs))
+
+    def set_ns(self, ns):
+        name = "1" if ns == "" else ns
+        ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns)
+        self.ns = ns
+
+    def set_mtu(self, mtu, fail=True):
+        return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu),
+                  fail=fail)
+
+    def set_xdp(self, bpf, mode, force=False, JSON=True, verbose=False,
+                fail=True, include_stderr=False):
+        if verbose:
+            bpf += " verbose"
+        return ip("link set dev %s xdp%s %s" % (self.dev["ifname"], mode, bpf),
+                  force=force, JSON=JSON,
+                  fail=fail, include_stderr=include_stderr)
+
+    def unset_xdp(self, mode, force=False, JSON=True,
+                  fail=True, include_stderr=False):
+        return ip("link set dev %s xdp%s off" % (self.dev["ifname"], mode),
+                  force=force, JSON=JSON,
+                  fail=fail, include_stderr=include_stderr)
+
+    def ip_link_show(self, xdp):
+        _, link = ip("link show dev %s" % (self['ifname']))
+        if len(link) > 1:
+            raise Exception("Multiple objects on ip link show")
+        if len(link) < 1:
+            return {}
+        fail(xdp != "xdp" in link,
+             "XDP program not reporting in iplink (reported %s, expected %s)" %
+             ("xdp" in link, xdp))
+        return link[0]
+
+    def tc_add_ingress(self):
+        tc("qdisc add dev %s ingress" % (self['ifname']))
+
+    def tc_del_ingress(self):
+        tc("qdisc del dev %s ingress" % (self['ifname']))
+
+    def tc_flush_filters(self, bound=0, total=0):
+        self.tc_del_ingress()
+        self.tc_add_ingress()
+        self.wait_for_flush(bound=bound, total=total)
+
+    def tc_show_ingress(self, expected=None):
+        # No JSON support, oh well...
+        flags = ["skip_sw", "skip_hw", "in_hw"]
+        named = ["protocol", "pref", "chain", "handle", "id", "tag"]
+
+        args = "-s filter show dev %s ingress" % (self['ifname'])
+        _, out = tc(args, JSON=False)
+
+        filters = []
+        lines = out.split('\n')
+        for line in lines:
+            words = line.split()
+            if "handle" not in words:
+                continue
+            fltr = {}
+            for flag in flags:
+                fltr[flag] = flag in words
+            for name in named:
+                try:
+                    idx = words.index(name)
+                    fltr[name] = words[idx + 1]
+                except ValueError:
+                    pass
+            filters.append(fltr)
+
+        if expected is not None:
+            fail(len(filters) != expected,
+                 "%d ingress filters loaded, expected %d" %
+                 (len(filters), expected))
+        return filters
+
+    def cls_filter_op(self, op, qdisc="ingress", prio=None, handle=None,
+                      chain=None, cls="", params="",
+                      fail=True, include_stderr=False):
+        spec = ""
+        if prio is not None:
+            spec += " prio %d" % (prio)
+        if handle:
+            spec += " handle %s" % (handle)
+        if chain is not None:
+            spec += " chain %d" % (chain)
+
+        return tc("filter {op} dev {dev} {qdisc} {spec} {cls} {params}"\
+                  .format(op=op, dev=self['ifname'], qdisc=qdisc, spec=spec,
+                          cls=cls, params=params),
+                  fail=fail, include_stderr=include_stderr)
+
+    def cls_bpf_add_filter(self, bpf, op="add", prio=None, handle=None,
+                           chain=None, da=False, verbose=False,
+                           skip_sw=False, skip_hw=False,
+                           fail=True, include_stderr=False):
+        cls = "bpf " + bpf
+
+        params = ""
+        if da:
+            params += " da"
+        if verbose:
+            params += " verbose"
+        if skip_sw:
+            params += " skip_sw"
+        if skip_hw:
+            params += " skip_hw"
+
+        return self.cls_filter_op(op=op, prio=prio, handle=handle, cls=cls,
+                                  chain=chain, params=params,
+                                  fail=fail, include_stderr=include_stderr)
+
+    def set_ethtool_tc_offloads(self, enable, fail=True):
+        args = "hw-tc-offload %s" % ("on" if enable else "off")
+        return ethtool(self, "-K", args, fail=fail)
+
+################################################################################
+def clean_up():
+    global files, netns, devs
+
+    for dev in devs:
+        dev.remove()
+    for f in files:
+        cmd("rm -f %s" % (f))
+    for ns in netns:
+        cmd("ip netns delete %s" % (ns))
+    files = []
+    netns = []
+
+def pin_prog(file_name, idx=0):
+    progs = bpftool_prog_list(expected=(idx + 1))
+    prog = progs[idx]
+    bpftool("prog pin id %d %s" % (prog["id"], file_name))
+    files.append(file_name)
+
+    return file_name, bpf_pinned(file_name)
+
+def pin_map(file_name, idx=0, expected=1):
+    maps = bpftool_map_list(expected=expected)
+    m = maps[idx]
+    bpftool("map pin id %d %s" % (m["id"], file_name))
+    files.append(file_name)
+
+    return file_name, bpf_pinned(file_name)
+
+def check_dev_info_removed(prog_file=None, map_file=None):
+    bpftool_prog_list(expected=0)
+    ret, err = bpftool("prog show pin %s" % (prog_file), fail=False)
+    fail(ret == 0, "Showing prog with removed device did not fail")
+    fail(err["error"].find("No such device") == -1,
+         "Showing prog with removed device expected ENODEV, error is %s" %
+         (err["error"]))
+
+    bpftool_map_list(expected=0)
+    ret, err = bpftool("map show pin %s" % (map_file), fail=False)
+    fail(ret == 0, "Showing map with removed device did not fail")
+    fail(err["error"].find("No such device") == -1,
+         "Showing map with removed device expected ENODEV, error is %s" %
+         (err["error"]))
+
+def check_dev_info(other_ns, ns, prog_file=None, map_file=None, removed=False):
+    progs = bpftool_prog_list(expected=1, ns=ns)
+    prog = progs[0]
+
+    fail("dev" not in prog.keys(), "Device parameters not reported")
+    dev = prog["dev"]
+    fail("ifindex" not in dev.keys(), "Device parameters not reported")
+    fail("ns_dev" not in dev.keys(), "Device parameters not reported")
+    fail("ns_inode" not in dev.keys(), "Device parameters not reported")
+
+    if not other_ns:
+        fail("ifname" not in dev.keys(), "Ifname not reported")
+        fail(dev["ifname"] != sim["ifname"],
+             "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"]))
+    else:
+        fail("ifname" in dev.keys(), "Ifname is reported for other ns")
+
+    maps = bpftool_map_list(expected=2, ns=ns)
+    for m in maps:
+        fail("dev" not in m.keys(), "Device parameters not reported")
+        fail(dev != m["dev"], "Map's device different than program's")
+
+def check_extack(output, reference, args):
+    if skip_extack:
+        return
+    lines = output.split("\n")
+    comp = len(lines) >= 2 and lines[1] == reference
+    fail(not comp, "Missing or incorrect netlink extack message")
+
+def check_extack_nsim(output, reference, args):
+    check_extack(output, "Error: netdevsim: " + reference, args)
+
+def check_no_extack(res, needle):
+    fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
+         "Found '%s' in command output, leaky extack?" % (needle))
+
+def check_verifier_log(output, reference):
+    lines = output.split("\n")
+    for l in reversed(lines):
+        if l == reference:
+            return
+    fail(True, "Missing or incorrect message from netdevsim in verifier log")
+
+def test_spurios_extack(sim, obj, skip_hw, needle):
+    res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
+                                 include_stderr=True)
+    check_no_extack(res, needle)
+    res = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
+                                 skip_hw=skip_hw, include_stderr=True)
+    check_no_extack(res, needle)
+    res = sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf",
+                            include_stderr=True)
+    check_no_extack(res, needle)
+
+
+# Parse command line
+parser = argparse.ArgumentParser()
+parser.add_argument("--log", help="output verbose log to given file")
+args = parser.parse_args()
+if args.log:
+    logfile = open(args.log, 'w+')
+    logfile.write("# -*-Org-*-")
+
+log("Prepare...", "", level=1)
+log_level_inc()
+
+# Check permissions
+skip(os.getuid() != 0, "test must be run as root")
+
+# Check tools
+ret, progs = bpftool("prog", fail=False)
+skip(ret != 0, "bpftool not installed")
+# Check no BPF programs are loaded
+skip(len(progs) != 0, "BPF programs already loaded on the system")
+
+# Check netdevsim
+ret, out = cmd("modprobe netdevsim", fail=False)
+skip(ret != 0, "netdevsim module could not be loaded")
+
+# Check debugfs
+_, out = cmd("mount")
+if out.find("/sys/kernel/debug type debugfs") == -1:
+    cmd("mount -t debugfs none /sys/kernel/debug")
+
+# Check samples are compiled
+samples = ["sample_ret0.o", "sample_map_ret0.o"]
+for s in samples:
+    ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False)
+    skip(ret != 0, "sample %s/%s not found, please compile it" %
+         (bpf_test_dir, s))
+
+# Check if iproute2 is built with libmnl (needed by extack support)
+_, _, err = cmd("tc qdisc delete dev lo handle 0",
+                fail=False, include_stderr=True)
+if err.find("Error: Failed to find qdisc with specified handle.") == -1:
+    print("Warning: no extack message in iproute2 output, libmnl missing?")
+    log("Warning: no extack message in iproute2 output, libmnl missing?", "")
+    skip_extack = True
+
+# Check if net namespaces seem to work
+ns = mknetns()
+skip(ns is None, "Could not create a net namespace")
+cmd("ip netns delete %s" % (ns))
+netns = []
+
+try:
+    obj = bpf_obj("sample_ret0.o")
+    bytecode = bpf_bytecode("1,6 0 0 4294967295,")
+
+    start_test("Test destruction of generic XDP...")
+    sim = NetdevSim()
+    sim.set_xdp(obj, "generic")
+    sim.remove()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.tc_add_ingress()
+
+    start_test("Test TC non-offloaded...")
+    ret, _ = sim.cls_bpf_add_filter(obj, skip_hw=True, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+
+    start_test("Test TC non-offloaded isn't getting bound...")
+    ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+    sim.dfs_get_bound_progs(expected=0)
+
+    sim.tc_flush_filters()
+
+    start_test("Test TC offloads are off by default...")
+    ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret == 0, "TC filter loaded without enabling TC offloads")
+    check_extack(err, "Error: TC offload is disabled on net device.", args)
+    sim.wait_for_flush()
+
+    sim.set_ethtool_tc_offloads(True)
+    sim.dfs["bpf_tc_non_bound_accept"] = "Y"
+
+    start_test("Test TC offload by default...")
+    ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+    sim.dfs_get_bound_progs(expected=0)
+    ingress = sim.tc_show_ingress(expected=1)
+    fltr = ingress[0]
+    fail(not fltr["in_hw"], "Filter not offloaded by default")
+
+    sim.tc_flush_filters()
+
+    start_test("Test TC cBPF bytcode tries offload by default...")
+    ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+    sim.dfs_get_bound_progs(expected=0)
+    ingress = sim.tc_show_ingress(expected=1)
+    fltr = ingress[0]
+    fail(not fltr["in_hw"], "Bytecode not offloaded by default")
+
+    sim.tc_flush_filters()
+    sim.dfs["bpf_tc_non_bound_accept"] = "N"
+
+    start_test("Test TC cBPF unbound bytecode doesn't offload...")
+    ret, _, err = sim.cls_bpf_add_filter(bytecode, skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret == 0, "TC bytecode loaded for offload")
+    check_extack_nsim(err, "netdevsim configured to reject unbound programs.",
+                      args)
+    sim.wait_for_flush()
+
+    start_test("Test non-0 chain offload...")
+    ret, _, err = sim.cls_bpf_add_filter(obj, chain=1, prio=1, handle=1,
+                                         skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret == 0, "Offloaded a filter to chain other than 0")
+    check_extack(err, "Error: Driver supports only offload of chain 0.", args)
+    sim.tc_flush_filters()
+
+    start_test("Test TC replace...")
+    sim.cls_bpf_add_filter(obj, prio=1, handle=1)
+    sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1)
+    sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+    sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_sw=True)
+    sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_sw=True)
+    sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+    sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=True)
+    sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_hw=True)
+    sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+    start_test("Test TC replace bad flags...")
+    for i in range(3):
+        for j in range(3):
+            ret, _ = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
+                                            skip_sw=(j == 1), skip_hw=(j == 2),
+                                            fail=False)
+            fail(bool(ret) != bool(j),
+                 "Software TC incorrect load in replace test, iteration %d" %
+                 (j))
+        sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+    start_test("Test spurious extack from the driver...")
+    test_spurios_extack(sim, obj, False, "netdevsim")
+    test_spurios_extack(sim, obj, True, "netdevsim")
+
+    sim.set_ethtool_tc_offloads(False)
+
+    test_spurios_extack(sim, obj, False, "TC offload is disabled")
+    test_spurios_extack(sim, obj, True, "TC offload is disabled")
+
+    sim.set_ethtool_tc_offloads(True)
+
+    sim.tc_flush_filters()
+
+    start_test("Test TC offloads work...")
+    ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret != 0, "TC filter did not load with TC offloads enabled")
+    check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+
+    start_test("Test TC offload basics...")
+    dfs = sim.dfs_get_bound_progs(expected=1)
+    progs = bpftool_prog_list(expected=1)
+    ingress = sim.tc_show_ingress(expected=1)
+
+    dprog = dfs[0]
+    prog = progs[0]
+    fltr = ingress[0]
+    fail(fltr["skip_hw"], "TC does reports 'skip_hw' on offloaded filter")
+    fail(not fltr["in_hw"], "TC does not report 'in_hw' for offloaded filter")
+    fail(not fltr["skip_sw"], "TC does not report 'skip_sw' back")
+
+    start_test("Test TC offload is device-bound...")
+    fail(str(prog["id"]) != fltr["id"], "Program IDs don't match")
+    fail(prog["tag"] != fltr["tag"], "Program tags don't match")
+    fail(fltr["id"] != dprog["id"], "Program IDs don't match")
+    fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+    fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+    start_test("Test disabling TC offloads is rejected while filters installed...")
+    ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+    fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+
+    start_test("Test qdisc removal frees things...")
+    sim.tc_flush_filters()
+    sim.tc_show_ingress(expected=0)
+
+    start_test("Test disabling TC offloads is OK without filters...")
+    ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+    fail(ret != 0,
+         "Driver refused to disable TC offloads without filters installed...")
+
+    sim.set_ethtool_tc_offloads(True)
+
+    start_test("Test destroying device gets rid of TC filters...")
+    sim.cls_bpf_add_filter(obj, skip_sw=True)
+    sim.remove()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+
+    start_test("Test destroying device gets rid of XDP...")
+    sim.set_xdp(obj, "offload")
+    sim.remove()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+
+    start_test("Test XDP prog reporting...")
+    sim.set_xdp(obj, "drv")
+    ipl = sim.ip_link_show(xdp=True)
+    progs = bpftool_prog_list(expected=1)
+    fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+         "Loaded program has wrong ID")
+
+    start_test("Test XDP prog replace without force...")
+    ret, _ = sim.set_xdp(obj, "drv", fail=False)
+    fail(ret == 0, "Replaced XDP program without -force")
+    sim.wait_for_flush(total=1)
+
+    start_test("Test XDP prog replace with force...")
+    ret, _ = sim.set_xdp(obj, "drv", force=True, fail=False)
+    fail(ret != 0, "Could not replace XDP program with -force")
+    bpftool_prog_list_wait(expected=1)
+    ipl = sim.ip_link_show(xdp=True)
+    progs = bpftool_prog_list(expected=1)
+    fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+         "Loaded program has wrong ID")
+    fail("dev" in progs[0].keys(),
+         "Device parameters reported for non-offloaded program")
+
+    start_test("Test XDP prog replace with bad flags...")
+    ret, _, err = sim.set_xdp(obj, "offload", force=True,
+                              fail=False, include_stderr=True)
+    fail(ret == 0, "Replaced XDP program with a program in different mode")
+    check_extack_nsim(err, "program loaded with different flags.", args)
+    ret, _, err = sim.set_xdp(obj, "", force=True,
+                              fail=False, include_stderr=True)
+    fail(ret == 0, "Replaced XDP program with a program in different mode")
+    check_extack_nsim(err, "program loaded with different flags.", args)
+
+    start_test("Test XDP prog remove with bad flags...")
+    ret, _, err = sim.unset_xdp("offload", force=True,
+                                fail=False, include_stderr=True)
+    fail(ret == 0, "Removed program with a bad mode mode")
+    check_extack_nsim(err, "program loaded with different flags.", args)
+    ret, _, err = sim.unset_xdp("", force=True,
+                                fail=False, include_stderr=True)
+    fail(ret == 0, "Removed program with a bad mode mode")
+    check_extack_nsim(err, "program loaded with different flags.", args)
+
+    start_test("Test MTU restrictions...")
+    ret, _ = sim.set_mtu(9000, fail=False)
+    fail(ret == 0,
+         "Driver should refuse to increase MTU to 9000 with XDP loaded...")
+    sim.unset_xdp("drv")
+    bpftool_prog_list_wait(expected=0)
+    sim.set_mtu(9000)
+    ret, _, err = sim.set_xdp(obj, "drv", fail=False, include_stderr=True)
+    fail(ret == 0, "Driver should refuse to load program with MTU of 9000...")
+    check_extack_nsim(err, "MTU too large w/ XDP enabled.", args)
+    sim.set_mtu(1500)
+
+    sim.wait_for_flush()
+    start_test("Test XDP offload...")
+    _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
+    ipl = sim.ip_link_show(xdp=True)
+    link_xdp = ipl["xdp"]["prog"]
+    progs = bpftool_prog_list(expected=1)
+    prog = progs[0]
+    fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
+    check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+
+    start_test("Test XDP offload is device bound...")
+    dfs = sim.dfs_get_bound_progs(expected=1)
+    dprog = dfs[0]
+
+    fail(prog["id"] != link_xdp["id"], "Program IDs don't match")
+    fail(prog["tag"] != link_xdp["tag"], "Program tags don't match")
+    fail(str(link_xdp["id"]) != dprog["id"], "Program IDs don't match")
+    fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+    fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+    start_test("Test removing XDP program many times...")
+    sim.unset_xdp("offload")
+    sim.unset_xdp("offload")
+    sim.unset_xdp("drv")
+    sim.unset_xdp("drv")
+    sim.unset_xdp("")
+    sim.unset_xdp("")
+    bpftool_prog_list_wait(expected=0)
+
+    start_test("Test attempt to use a program for a wrong device...")
+    sim2 = NetdevSim()
+    sim2.set_xdp(obj, "offload")
+    pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+
+    ret, _, err = sim.set_xdp(pinned, "offload",
+                              fail=False, include_stderr=True)
+    fail(ret == 0, "Pinned program loaded for a different device accepted")
+    check_extack_nsim(err, "program bound to different dev.", args)
+    sim2.remove()
+    ret, _, err = sim.set_xdp(pinned, "offload",
+                              fail=False, include_stderr=True)
+    fail(ret == 0, "Pinned program loaded for a removed device accepted")
+    check_extack_nsim(err, "xdpoffload of non-bound program.", args)
+    rm(pin_file)
+    bpftool_prog_list_wait(expected=0)
+
+    start_test("Test mixing of TC and XDP...")
+    sim.tc_add_ingress()
+    sim.set_xdp(obj, "offload")
+    ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret == 0, "Loading TC when XDP active should fail")
+    check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
+    sim.unset_xdp("offload")
+    sim.wait_for_flush()
+
+    sim.cls_bpf_add_filter(obj, skip_sw=True)
+    ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+    fail(ret == 0, "Loading XDP when TC active should fail")
+    check_extack_nsim(err, "TC program is already loaded.", args)
+
+    start_test("Test binding TC from pinned...")
+    pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+    sim.tc_flush_filters(bound=1, total=1)
+    sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True)
+    sim.tc_flush_filters(bound=1, total=1)
+
+    start_test("Test binding XDP from pinned...")
+    sim.set_xdp(obj, "offload")
+    pin_file, pinned = pin_prog("/sys/fs/bpf/tmp2", idx=1)
+
+    sim.set_xdp(pinned, "offload", force=True)
+    sim.unset_xdp("offload")
+    sim.set_xdp(pinned, "offload", force=True)
+    sim.unset_xdp("offload")
+
+    start_test("Test offload of wrong type fails...")
+    ret, _ = sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True, fail=False)
+    fail(ret == 0, "Managed to attach XDP program to TC")
+
+    start_test("Test asking for TC offload of two filters...")
+    sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
+    ret, _, err = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret == 0, "Managed to offload two TC filters at the same time")
+    check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
+
+    sim.tc_flush_filters(bound=2, total=2)
+
+    start_test("Test if netdev removal waits for translation...")
+    delay_msec = 500
+    sim.dfs["bpf_bind_verifier_delay"] = delay_msec
+    start = time.time()
+    cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
+               (sim['ifname'], obj)
+    tc_proc = cmd(cmd_line, background=True, fail=False)
+    # Wait for the verifier to start
+    while sim.dfs_num_bound_progs() <= 2:
+        pass
+    sim.remove()
+    end = time.time()
+    ret, _ = cmd_result(tc_proc, fail=False)
+    time_diff = end - start
+    log("Time", "start:\t%s\nend:\t%s\ndiff:\t%s" % (start, end, time_diff))
+
+    fail(ret == 0, "Managed to load TC filter on a unregistering device")
+    delay_sec = delay_msec * 0.001
+    fail(time_diff < delay_sec, "Removal process took %s, expected %s" %
+         (time_diff, delay_sec))
+
+    # Remove all pinned files and reinstantiate the netdev
+    clean_up()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    map_obj = bpf_obj("sample_map_ret0.o")
+    start_test("Test loading program with maps...")
+    sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+
+    start_test("Test bpftool bound info reporting (own ns)...")
+    check_dev_info(False, "")
+
+    start_test("Test bpftool bound info reporting (other ns)...")
+    ns = mknetns()
+    sim.set_ns(ns)
+    check_dev_info(True, "")
+
+    start_test("Test bpftool bound info reporting (remote ns)...")
+    check_dev_info(False, ns)
+
+    start_test("Test bpftool bound info reporting (back to own ns)...")
+    sim.set_ns("")
+    check_dev_info(False, "")
+
+    prog_file, _ = pin_prog("/sys/fs/bpf/tmp_prog")
+    map_file, _ = pin_map("/sys/fs/bpf/tmp_map", idx=1, expected=2)
+    sim.remove()
+
+    start_test("Test bpftool bound info reporting (removed dev)...")
+    check_dev_info_removed(prog_file=prog_file, map_file=map_file)
+
+    # Remove all pinned files and reinstantiate the netdev
+    clean_up()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+
+    start_test("Test map update (no flags)...")
+    sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+    maps = bpftool_map_list(expected=2)
+    array = maps[0] if maps[0]["type"] == "array" else maps[1]
+    htab = maps[0] if maps[0]["type"] == "hash" else maps[1]
+    for m in maps:
+        for i in range(2):
+            bpftool("map update id %d key %s value %s" %
+                    (m["id"], int2str("I", i), int2str("Q", i * 3)))
+
+    for m in maps:
+        ret, _ = bpftool("map update id %d key %s value %s" %
+                         (m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
+                         fail=False)
+        fail(ret == 0, "added too many entries")
+
+    start_test("Test map update (exists)...")
+    for m in maps:
+        for i in range(2):
+            bpftool("map update id %d key %s value %s exist" %
+                    (m["id"], int2str("I", i), int2str("Q", i * 3)))
+
+    for m in maps:
+        ret, err = bpftool("map update id %d key %s value %s exist" %
+                           (m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
+                           fail=False)
+        fail(ret == 0, "updated non-existing key")
+        fail(err["error"].find("No such file or directory") == -1,
+             "expected ENOENT, error is '%s'" % (err["error"]))
+
+    start_test("Test map update (noexist)...")
+    for m in maps:
+        for i in range(2):
+            ret, err = bpftool("map update id %d key %s value %s noexist" %
+                               (m["id"], int2str("I", i), int2str("Q", i * 3)),
+                               fail=False)
+        fail(ret == 0, "updated existing key")
+        fail(err["error"].find("File exists") == -1,
+             "expected EEXIST, error is '%s'" % (err["error"]))
+
+    start_test("Test map dump...")
+    for m in maps:
+        _, entries = bpftool("map dump id %d" % (m["id"]))
+        for i in range(2):
+            key = str2int(entries[i]["key"])
+            fail(key != i, "expected key %d, got %d" % (key, i))
+            val = str2int(entries[i]["value"])
+            fail(val != i * 3, "expected value %d, got %d" % (val, i * 3))
+
+    start_test("Test map getnext...")
+    for m in maps:
+        _, entry = bpftool("map getnext id %d" % (m["id"]))
+        key = str2int(entry["next_key"])
+        fail(key != 0, "next key %d, expected %d" % (key, 0))
+        _, entry = bpftool("map getnext id %d key %s" %
+                           (m["id"], int2str("I", 0)))
+        key = str2int(entry["next_key"])
+        fail(key != 1, "next key %d, expected %d" % (key, 1))
+        ret, err = bpftool("map getnext id %d key %s" %
+                           (m["id"], int2str("I", 1)), fail=False)
+        fail(ret == 0, "got next key past the end of map")
+        fail(err["error"].find("No such file or directory") == -1,
+             "expected ENOENT, error is '%s'" % (err["error"]))
+
+    start_test("Test map delete (htab)...")
+    for i in range(2):
+        bpftool("map delete id %d key %s" % (htab["id"], int2str("I", i)))
+
+    start_test("Test map delete (array)...")
+    for i in range(2):
+        ret, err = bpftool("map delete id %d key %s" %
+                           (htab["id"], int2str("I", i)), fail=False)
+        fail(ret == 0, "removed entry from an array")
+        fail(err["error"].find("No such file or directory") == -1,
+             "expected ENOENT, error is '%s'" % (err["error"]))
+
+    start_test("Test map remove...")
+    sim.unset_xdp("offload")
+    bpftool_map_list_wait(expected=0)
+    sim.remove()
+
+    sim = NetdevSim()
+    sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+    sim.remove()
+    bpftool_map_list_wait(expected=0)
+
+    start_test("Test map creation fail path...")
+    sim = NetdevSim()
+    sim.dfs["bpf_map_accept"] = "N"
+    ret, _ = sim.set_xdp(map_obj, "offload", JSON=False, fail=False)
+    fail(ret == 0,
+         "netdevsim didn't refuse to create a map with offload disabled")
+
+    print("%s: OK" % (os.path.basename(__file__)))
+
+finally:
+    log("Clean up...", "", level=1)
+    log_level_inc()
+    clean_up()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 6761be18a91f..b549308abd19 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -21,8 +21,10 @@ typedef __u16 __sum16;
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/filter.h>
+#include <linux/perf_event.h>
 #include <linux/unistd.h>
 
+#include <sys/ioctl.h>
 #include <sys/wait.h>
 #include <sys/resource.h>
 #include <sys/types.h>
@@ -167,10 +169,9 @@ out:
 #define NUM_ITER 100000
 #define VIP_NUM 5
 
-static void test_l4lb(void)
+static void test_l4lb(const char *file)
 {
 	unsigned int nr_cpus = bpf_num_possible_cpus();
-	const char *file = "./test_l4lb.o";
 	struct vip key = {.protocol = 6};
 	struct vip_meta {
 		__u32 flags;
@@ -247,6 +248,95 @@ out:
 	bpf_object__close(obj);
 }
 
+static void test_l4lb_all(void)
+{
+	const char *file1 = "./test_l4lb.o";
+	const char *file2 = "./test_l4lb_noinline.o";
+
+	test_l4lb(file1);
+	test_l4lb(file2);
+}
+
+static void test_xdp_noinline(void)
+{
+	const char *file = "./test_xdp_noinline.o";
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct vip key = {.protocol = 6};
+	struct vip_meta {
+		__u32 flags;
+		__u32 vip_num;
+	} value = {.vip_num = VIP_NUM};
+	__u32 stats_key = VIP_NUM;
+	struct vip_stats {
+		__u64 bytes;
+		__u64 pkts;
+	} stats[nr_cpus];
+	struct real_definition {
+		union {
+			__be32 dst;
+			__be32 dstv6[4];
+		};
+		__u8 flags;
+	} real_def = {.dst = MAGIC_VAL};
+	__u32 ch_key = 11, real_num = 3;
+	__u32 duration, retval, size;
+	int err, i, prog_fd, map_fd;
+	__u64 bytes = 0, pkts = 0;
+	struct bpf_object *obj;
+	char buf[128];
+	u32 *magic = (u32 *)buf;
+
+	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+	if (err) {
+		error_cnt++;
+		return;
+	}
+
+	map_fd = bpf_find_map(__func__, obj, "vip_map");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_update_elem(map_fd, &key, &value, 0);
+
+	map_fd = bpf_find_map(__func__, obj, "ch_rings");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+	map_fd = bpf_find_map(__func__, obj, "reals");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+				buf, &size, &retval, &duration);
+	CHECK(err || errno || retval != 1 || size != 54 ||
+	      *magic != MAGIC_VAL, "ipv4",
+	      "err %d errno %d retval %d size %d magic %x\n",
+	      err, errno, retval, size, *magic);
+
+	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+				buf, &size, &retval, &duration);
+	CHECK(err || errno || retval != 1 || size != 74 ||
+	      *magic != MAGIC_VAL, "ipv6",
+	      "err %d errno %d retval %d size %d magic %x\n",
+	      err, errno, retval, size, *magic);
+
+	map_fd = bpf_find_map(__func__, obj, "stats");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_lookup_elem(map_fd, &stats_key, stats);
+	for (i = 0; i < nr_cpus; i++) {
+		bytes += stats[i].bytes;
+		pkts += stats[i].pkts;
+	}
+	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+		error_cnt++;
+		printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+	}
+out:
+	bpf_object__close(obj);
+}
+
 static void test_tcp_estats(void)
 {
 	const char *file = "./test_tcp_estats.o";
@@ -617,6 +707,262 @@ static void test_obj_name(void)
 	}
 }
 
+static void test_tp_attach_query(void)
+{
+	const int num_progs = 3;
+	int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+	__u32 duration = 0, info_len, saved_prog_ids[num_progs];
+	const char *file = "./test_tracepoint.o";
+	struct perf_event_query_bpf *query;
+	struct perf_event_attr attr = {};
+	struct bpf_object *obj[num_progs];
+	struct bpf_prog_info prog_info;
+	char buf[256];
+
+	snprintf(buf, sizeof(buf),
+		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+	efd = open(buf, O_RDONLY, 0);
+	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+		return;
+	bytes = read(efd, buf, sizeof(buf));
+	close(efd);
+	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+		  "read", "bytes %d errno %d\n", bytes, errno))
+		return;
+
+	attr.config = strtol(buf, NULL, 0);
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+	attr.sample_period = 1;
+	attr.wakeup_events = 1;
+
+	query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+	for (i = 0; i < num_progs; i++) {
+		err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+				    &prog_fd[i]);
+		if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+			goto cleanup1;
+
+		bzero(&prog_info, sizeof(prog_info));
+		prog_info.jited_prog_len = 0;
+		prog_info.xlated_prog_len = 0;
+		prog_info.nr_map_ids = 0;
+		info_len = sizeof(prog_info);
+		err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+		if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+			  err, errno))
+			goto cleanup1;
+		saved_prog_ids[i] = prog_info.id;
+
+		pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+				    0 /* cpu 0 */, -1 /* group id */,
+				    0 /* flags */);
+		if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+			  pmu_fd[i], errno))
+			goto cleanup2;
+		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+		if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+			  err, errno))
+			goto cleanup3;
+
+		if (i == 0) {
+			/* check NULL prog array query */
+			query->ids_len = num_progs;
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+			if (CHECK(err || query->prog_cnt != 0,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d query->prog_cnt %u\n",
+				  err, errno, query->prog_cnt))
+				goto cleanup3;
+		}
+
+		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+		if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+			  err, errno))
+			goto cleanup3;
+
+		if (i == 1) {
+			/* try to get # of programs only */
+			query->ids_len = 0;
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+			if (CHECK(err || query->prog_cnt != 2,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d query->prog_cnt %u\n",
+				  err, errno, query->prog_cnt))
+				goto cleanup3;
+
+			/* try a few negative tests */
+			/* invalid query pointer */
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+				    (struct perf_event_query_bpf *)0x1);
+			if (CHECK(!err || errno != EFAULT,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d\n", err, errno))
+				goto cleanup3;
+
+			/* no enough space */
+			query->ids_len = 1;
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+			if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d query->prog_cnt %u\n",
+				  err, errno, query->prog_cnt))
+				goto cleanup3;
+		}
+
+		query->ids_len = num_progs;
+		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+		if (CHECK(err || query->prog_cnt != (i + 1),
+			  "perf_event_ioc_query_bpf",
+			  "err %d errno %d query->prog_cnt %u\n",
+			  err, errno, query->prog_cnt))
+			goto cleanup3;
+		for (j = 0; j < i + 1; j++)
+			if (CHECK(saved_prog_ids[j] != query->ids[j],
+				  "perf_event_ioc_query_bpf",
+				  "#%d saved_prog_id %x query prog_id %x\n",
+				  j, saved_prog_ids[j], query->ids[j]))
+				goto cleanup3;
+	}
+
+	i = num_progs - 1;
+	for (; i >= 0; i--) {
+ cleanup3:
+		ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+		close(pmu_fd[i]);
+ cleanup1:
+		bpf_object__close(obj[i]);
+	}
+	free(query);
+}
+
+static int compare_map_keys(int map1_fd, int map2_fd)
+{
+	__u32 key, next_key;
+	char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
+	int err;
+
+	err = bpf_map_get_next_key(map1_fd, NULL, &key);
+	if (err)
+		return err;
+	err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
+	if (err)
+		return err;
+
+	while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
+		err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
+		if (err)
+			return err;
+
+		key = next_key;
+	}
+	if (errno != ENOENT)
+		return -1;
+
+	return 0;
+}
+
+static void test_stacktrace_map()
+{
+	int control_map_fd, stackid_hmap_fd, stackmap_fd;
+	const char *file = "./test_stacktrace_map.o";
+	int bytes, efd, err, pmu_fd, prog_fd;
+	struct perf_event_attr attr = {};
+	__u32 key, val, duration = 0;
+	struct bpf_object *obj;
+	char buf[256];
+
+	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+		goto out;
+
+	/* Get the ID for the sched/sched_switch tracepoint */
+	snprintf(buf, sizeof(buf),
+		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+	efd = open(buf, O_RDONLY, 0);
+	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+		goto close_prog;
+
+	bytes = read(efd, buf, sizeof(buf));
+	close(efd);
+	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+		  "read", "bytes %d errno %d\n", bytes, errno))
+		goto close_prog;
+
+	/* Open the perf event and attach bpf progrram */
+	attr.config = strtol(buf, NULL, 0);
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+	attr.sample_period = 1;
+	attr.wakeup_events = 1;
+	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+			 0 /* cpu 0 */, -1 /* group id */,
+			 0 /* flags */);
+	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+		  pmu_fd, errno))
+		goto close_prog;
+
+	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+	if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+		  err, errno))
+		goto close_pmu;
+
+	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+	if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+		  err, errno))
+		goto disable_pmu;
+
+	/* find map fds */
+	control_map_fd = bpf_find_map(__func__, obj, "control_map");
+	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+		  "err %d errno %d\n", err, errno))
+		goto disable_pmu;
+
+	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+		  "err %d errno %d\n", err, errno))
+		goto disable_pmu;
+
+	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+		  err, errno))
+		goto disable_pmu;
+
+	/* give some time for bpf program run */
+	sleep(1);
+
+	/* disable stack trace collection */
+	key = 0;
+	val = 1;
+	bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+	/* for every element in stackid_hmap, we can find a corresponding one
+	 * in stackmap, and vise versa.
+	 */
+	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+		  "err %d errno %d\n", err, errno))
+		goto disable_pmu;
+
+	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+		  "err %d errno %d\n", err, errno))
+		; /* fall through */
+
+disable_pmu:
+	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+	close(pmu_fd);
+
+close_prog:
+	bpf_object__close(obj);
+
+out:
+	return;
+}
+
 int main(void)
 {
 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -625,11 +971,14 @@ int main(void)
 
 	test_pkt_access();
 	test_xdp();
-	test_l4lb();
+	test_l4lb_all();
+	test_xdp_noinline();
 	test_tcp_estats();
 	test_bpf_obj_id();
 	test_pkt_md_access();
 	test_obj_name();
+	test_tp_attach_query();
+	test_stacktrace_map();
 
 	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
 	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/test_stacktrace_map.c
new file mode 100644
index 000000000000..76d85c5d08bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_stacktrace_map.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH         127
+#endif
+
+struct bpf_map_def SEC("maps") control_map = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") stackid_hmap = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+	.type = BPF_MAP_TYPE_STACK_TRACE,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
+	.max_entries = 10000,
+};
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+	unsigned long long pad;
+	char prev_comm[16];
+	int prev_pid;
+	int prev_prio;
+	long long prev_state;
+	char next_comm[16];
+	int next_pid;
+	int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+	__u32 key = 0, val = 0, *value_p;
+
+	value_p = bpf_map_lookup_elem(&control_map, &key);
+	if (value_p && *value_p)
+		return 0; /* skip if non-zero *value_p */
+
+	/* The size of stackmap and stackid_hmap should be the same */
+	key = bpf_get_stackid(ctx, &stackmap, 0);
+	if ((int)key >= 0)
+		bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
new file mode 100644
index 000000000000..2fe43289943c
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _TEST_TCPBPF_H
+#define _TEST_TCPBPF_H
+
+struct tcpbpf_globals {
+	__u32 event_map;
+	__u32 total_retrans;
+	__u32 data_segs_in;
+	__u32 data_segs_out;
+	__u32 bad_cb_test_rv;
+	__u32 good_cb_test_rv;
+	__u64 bytes_received;
+	__u64 bytes_acked;
+};
+#endif
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
new file mode 100644
index 000000000000..57119ad57a3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/in6.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <netinet/in.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+#include "test_tcpbpf.h"
+
+struct bpf_map_def SEC("maps") global_map = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct tcpbpf_globals),
+	.max_entries = 2,
+};
+
+static inline void update_event_map(int event)
+{
+	__u32 key = 0;
+	struct tcpbpf_globals g, *gp;
+
+	gp = bpf_map_lookup_elem(&global_map, &key);
+	if (gp == NULL) {
+		struct tcpbpf_globals g = {0};
+
+		g.event_map |= (1 << event);
+		bpf_map_update_elem(&global_map, &key, &g,
+			    BPF_ANY);
+	} else {
+		g = *gp;
+		g.event_map |= (1 << event);
+		bpf_map_update_elem(&global_map, &key, &g,
+			    BPF_ANY);
+	}
+}
+
+int _version SEC("version") = 1;
+
+SEC("sockops")
+int bpf_testcb(struct bpf_sock_ops *skops)
+{
+	int rv = -1;
+	int bad_call_rv = 0;
+	int good_call_rv = 0;
+	int op;
+	int v = 0;
+
+	op = (int) skops->op;
+
+	update_event_map(op);
+
+	switch (op) {
+	case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+		/* Test failure to set largest cb flag (assumes not defined) */
+		bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
+		/* Set callback */
+		good_call_rv = bpf_sock_ops_cb_flags_set(skops,
+						 BPF_SOCK_OPS_STATE_CB_FLAG);
+		/* Update results */
+		{
+			__u32 key = 0;
+			struct tcpbpf_globals g, *gp;
+
+			gp = bpf_map_lookup_elem(&global_map, &key);
+			if (!gp)
+				break;
+			g = *gp;
+			g.bad_cb_test_rv = bad_call_rv;
+			g.good_cb_test_rv = good_call_rv;
+			bpf_map_update_elem(&global_map, &key, &g,
+					    BPF_ANY);
+		}
+		break;
+	case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+		skops->sk_txhash = 0x12345f;
+		v = 0xff;
+		rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v,
+				    sizeof(v));
+		break;
+	case BPF_SOCK_OPS_RTO_CB:
+		break;
+	case BPF_SOCK_OPS_RETRANS_CB:
+		break;
+	case BPF_SOCK_OPS_STATE_CB:
+		if (skops->args[1] == BPF_TCP_CLOSE) {
+			__u32 key = 0;
+			struct tcpbpf_globals g, *gp;
+
+			gp = bpf_map_lookup_elem(&global_map, &key);
+			if (!gp)
+				break;
+			g = *gp;
+			g.total_retrans = skops->total_retrans;
+			g.data_segs_in = skops->data_segs_in;
+			g.data_segs_out = skops->data_segs_out;
+			g.bytes_received = skops->bytes_received;
+			g.bytes_acked = skops->bytes_acked;
+			bpf_map_update_elem(&global_map, &key, &g,
+					    BPF_ANY);
+		}
+		break;
+	default:
+		rv = -1;
+	}
+	skops->reply = rv;
+	return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
new file mode 100644
index 000000000000..95a370f3d378
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include <linux/perf_event.h>
+#include "test_tcpbpf.h"
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+			const char *name)
+{
+	struct bpf_map *map;
+
+	map = bpf_object__find_map_by_name(obj, name);
+	if (!map) {
+		printf("%s:FAIL:map '%s' not found\n", test, name);
+		return -1;
+	}
+	return bpf_map__fd(map);
+}
+
+#define SYSTEM(CMD)						\
+	do {							\
+		if (system(CMD)) {				\
+			printf("system(%s) FAILS!\n", CMD);	\
+		}						\
+	} while (0)
+
+int main(int argc, char **argv)
+{
+	const char *file = "test_tcpbpf_kern.o";
+	struct tcpbpf_globals g = {0};
+	int cg_fd, prog_fd, map_fd;
+	bool debug_flag = false;
+	int error = EXIT_FAILURE;
+	struct bpf_object *obj;
+	char cmd[100], *dir;
+	struct stat buffer;
+	__u32 key = 0;
+	int pid;
+	int rv;
+
+	if (argc > 1 && strcmp(argv[1], "-d") == 0)
+		debug_flag = true;
+
+	dir = "/tmp/cgroupv2/foo";
+
+	if (stat(dir, &buffer) != 0) {
+		SYSTEM("mkdir -p /tmp/cgroupv2");
+		SYSTEM("mount -t cgroup2 none /tmp/cgroupv2");
+		SYSTEM("mkdir -p /tmp/cgroupv2/foo");
+	}
+	pid = (int) getpid();
+	sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid);
+	SYSTEM(cmd);
+
+	cg_fd = open(dir, O_DIRECTORY, O_RDONLY);
+	if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
+		printf("FAILED: load_bpf_file failed for: %s\n", file);
+		goto err;
+	}
+
+	rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
+	if (rv) {
+		printf("FAILED: bpf_prog_attach: %d (%s)\n",
+		       error, strerror(errno));
+		goto err;
+	}
+
+	SYSTEM("./tcp_server.py");
+
+	map_fd = bpf_find_map(__func__, obj, "global_map");
+	if (map_fd < 0)
+		goto err;
+
+	rv = bpf_map_lookup_elem(map_fd, &key, &g);
+	if (rv != 0) {
+		printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
+		goto err;
+	}
+
+	if (g.bytes_received != 501 || g.bytes_acked != 1002 ||
+	    g.data_segs_in != 1 || g.data_segs_out != 1 ||
+	    (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 ||
+		g.good_cb_test_rv != 0) {
+		printf("FAILED: Wrong stats\n");
+		if (debug_flag) {
+			printf("\n");
+			printf("bytes_received: %d (expecting 501)\n",
+			       (int)g.bytes_received);
+			printf("bytes_acked:    %d (expecting 1002)\n",
+			       (int)g.bytes_acked);
+			printf("data_segs_in:   %d (expecting 1)\n",
+			       g.data_segs_in);
+			printf("data_segs_out:  %d (expecting 1)\n",
+			       g.data_segs_out);
+			printf("event_map:      0x%x (at least 0x47e)\n",
+			       g.event_map);
+			printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n",
+			       g.bad_cb_test_rv);
+			printf("good_cb_test_rv:0x%x (expecting 0)\n",
+			       g.good_cb_test_rv);
+		}
+		goto err;
+	}
+	printf("PASSED!\n");
+	error = 0;
+err:
+	bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+	return error;
+
+}
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/test_tracepoint.c
new file mode 100644
index 000000000000..04bf084517e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tracepoint.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+	unsigned long long pad;
+	char prev_comm[16];
+	int prev_pid;
+	int prev_prio;
+	long long prev_state;
+	char next_comm[16];
+	int next_pid;
+	int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 5ed4175c4ff8..697bd83de295 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2,6 +2,7 @@
  * Testsuite for eBPF verifier
  *
  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2017 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -20,6 +21,7 @@
 #include <stddef.h>
 #include <stdbool.h>
 #include <sched.h>
+#include <limits.h>
 
 #include <sys/capability.h>
 #include <sys/resource.h>
@@ -28,6 +30,7 @@
 #include <linux/filter.h>
 #include <linux/bpf_perf_event.h>
 #include <linux/bpf.h>
+#include <linux/if_ether.h>
 
 #include <bpf/bpf.h>
 
@@ -48,6 +51,8 @@
 #define MAX_INSNS	512
 #define MAX_FIXUPS	8
 #define MAX_NR_MAPS	4
+#define POINTER_VALUE	0xcafe4all
+#define TEST_DATA_LEN	64
 
 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
@@ -61,6 +66,7 @@ struct bpf_test {
 	int fixup_map_in_map[MAX_FIXUPS];
 	const char *errstr;
 	const char *errstr_unpriv;
+	uint32_t retval;
 	enum {
 		UNDEF,
 		ACCEPT,
@@ -94,6 +100,326 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.retval = -3,
+	},
+	{
+		"DIV32 by 0, zero check 1",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_2, 1),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"DIV32 by 0, zero check 2",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+			BPF_MOV32_IMM(BPF_REG_2, 1),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"DIV64 by 0, zero check",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_2, 1),
+			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"MOD32 by 0, zero check 1",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_2, 1),
+			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"MOD32 by 0, zero check 2",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+			BPF_MOV32_IMM(BPF_REG_2, 1),
+			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"MOD64 by 0, zero check",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_2, 1),
+			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"DIV32 by 0, zero check ok, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_MOV32_IMM(BPF_REG_1, 2),
+			BPF_MOV32_IMM(BPF_REG_2, 16),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 8,
+	},
+	{
+		"DIV32 by 0, zero check 1, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"DIV32 by 0, zero check 2, cls",
+		.insns = {
+			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"DIV64 by 0, zero check, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"MOD32 by 0, zero check ok, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_MOV32_IMM(BPF_REG_1, 3),
+			BPF_MOV32_IMM(BPF_REG_2, 5),
+			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 2,
+	},
+	{
+		"MOD32 by 0, zero check 1, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"MOD32 by 0, zero check 2, cls",
+		.insns = {
+			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"MOD64 by 0, zero check 1, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_0, 2),
+			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 2,
+	},
+	{
+		"MOD64 by 0, zero check 2, cls",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_0, -1),
+			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = -1,
+	},
+	/* Just make sure that JITs used udiv/umod as otherwise we get
+	 * an exception from INT_MIN/-1 overflow similarly as with div
+	 * by zero.
+	 */
+	{
+		"DIV32 overflow, check 1",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, -1),
+			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"DIV32 overflow, check 2",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"DIV64 overflow, check 1",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"DIV64 overflow, check 2",
+		.insns = {
+			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"MOD32 overflow, check 1",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, -1),
+			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = INT_MIN,
+	},
+	{
+		"MOD32 overflow, check 2",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = INT_MIN,
+	},
+	{
+		"MOD64 overflow, check 1",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"MOD64 overflow, check 2",
+		.insns = {
+			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"xor32 zero extend check",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_2, -1),
+			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+			BPF_MOV32_IMM(BPF_REG_0, 2),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+			BPF_MOV32_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"empty prog",
+		.insns = {
+		},
+		.errstr = "unknown opcode 00",
+		.result = REJECT,
+	},
+	{
+		"only exit insn",
+		.insns = {
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "R0 !read_ok",
+		.result = REJECT,
 	},
 	{
 		"unreachable",
@@ -209,6 +535,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.retval = 1,
 	},
 	{
 		"test8 ld_imm64",
@@ -280,7 +607,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = REJECT,
-		.errstr = "BPF_ARSH not supported for 32 bit ALU",
+		.errstr = "unknown opcode c4",
 	},
 	{
 		"arsh32 on reg",
@@ -291,7 +618,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = REJECT,
-		.errstr = "BPF_ARSH not supported for 32 bit ALU",
+		.errstr = "unknown opcode cc",
 	},
 	{
 		"arsh64 on imm",
@@ -317,7 +644,7 @@ static struct bpf_test tests[] = {
 		.insns = {
 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
 		},
-		.errstr = "jump out of range",
+		.errstr = "not an exit",
 		.result = REJECT,
 	},
 	{
@@ -407,7 +734,7 @@ static struct bpf_test tests[] = {
 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "BPF_CALL uses reserved",
+		.errstr = "unknown opcode 8d",
 		.result = REJECT,
 	},
 	{
@@ -516,6 +843,7 @@ static struct bpf_test tests[] = {
 		.errstr_unpriv = "R0 leaks addr",
 		.result = ACCEPT,
 		.result_unpriv = REJECT,
+		.retval = POINTER_VALUE,
 	},
 	{
 		"check valid spill/fill, skb mark",
@@ -596,7 +924,7 @@ static struct bpf_test tests[] = {
 			BPF_RAW_INSN(0, 0, 0, 0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "invalid BPF_LD_IMM",
+		.errstr = "unknown opcode 00",
 		.result = REJECT,
 	},
 	{
@@ -614,7 +942,7 @@ static struct bpf_test tests[] = {
 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "invalid BPF_ALU opcode f0",
+		.errstr = "unknown opcode ff",
 		.result = REJECT,
 	},
 	{
@@ -623,7 +951,7 @@ static struct bpf_test tests[] = {
 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "invalid BPF_ALU opcode f0",
+		.errstr = "unknown opcode ff",
 		.result = REJECT,
 	},
 	{
@@ -802,6 +1130,7 @@ static struct bpf_test tests[] = {
 		.errstr_unpriv = "R1 pointer comparison",
 		.result_unpriv = REJECT,
 		.result = ACCEPT,
+		.retval = -ENOENT,
 	},
 	{
 		"jump test 4",
@@ -1822,6 +2151,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.retval = 0xfaceb00c,
 	},
 	{
 		"PTR_TO_STACK store/load - bad alignment on off",
@@ -1880,6 +2210,7 @@ static struct bpf_test tests[] = {
 		.result = ACCEPT,
 		.result_unpriv = REJECT,
 		.errstr_unpriv = "R0 leaks addr",
+		.retval = POINTER_VALUE,
 	},
 	{
 		"unpriv: add const to pointer",
@@ -2053,6 +2384,7 @@ static struct bpf_test tests[] = {
 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 				     BPF_FUNC_get_hash_recalc),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
@@ -2840,6 +3172,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = 1,
 	},
 	{
 		"direct packet access: test12 (and, good access)",
@@ -2864,6 +3197,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = 1,
 	},
 	{
 		"direct packet access: test13 (branches, good access)",
@@ -2894,6 +3228,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = 1,
 	},
 	{
 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
@@ -2917,6 +3252,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = 1,
 	},
 	{
 		"direct packet access: test15 (spill with xadd)",
@@ -3203,6 +3539,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = 1,
 	},
 	{
 		"direct packet access: test28 (marking on <=, bad access)",
@@ -5700,7 +6037,7 @@ static struct bpf_test tests[] = {
 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
 		.insns = {
 			BPF_MOV64_IMM(BPF_REG_1, 0),
-			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -5822,6 +6159,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = 0 /* csum_diff of 64-byte packet */,
 	},
 	{
 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
@@ -5935,7 +6273,7 @@ static struct bpf_test tests[] = {
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
@@ -6190,6 +6528,7 @@ static struct bpf_test tests[] = {
 		},
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.result = ACCEPT,
+		.retval = 42 /* ultimate return value */,
 	},
 	{
 		"ld_ind: check calling conv, r1",
@@ -6261,6 +6600,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.retval = 1,
 	},
 	{
 		"check bpf_perf_event_data->sample_period byte load permitted",
@@ -7248,6 +7588,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map1 = { 3 },
 		.result = ACCEPT,
+		.retval = POINTER_VALUE,
 		.result_unpriv = REJECT,
 		.errstr_unpriv = "R0 leaks addr as return value"
 	},
@@ -7268,6 +7609,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map1 = { 3 },
 		.result = ACCEPT,
+		.retval = POINTER_VALUE,
 		.result_unpriv = REJECT,
 		.errstr_unpriv = "R0 leaks addr as return value"
 	},
@@ -7434,7 +7776,7 @@ static struct bpf_test tests[] = {
 			},
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "BPF_END uses reserved fields",
+		.errstr = "unknown opcode d7",
 		.result = REJECT,
 	},
 	{
@@ -7709,6 +8051,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.retval = TEST_DATA_LEN,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
 	{
@@ -8656,6 +8999,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.retval = 1,
 	},
 	{
 		"check deducing bounds from const, 3",
@@ -8826,6 +9170,1959 @@ static struct bpf_test tests[] = {
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 	},
+	{
+		"calls: basic sanity",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: not on unpriviledged",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"calls: div by 0 in subprog",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(BPF_REG_2, 0),
+			BPF_MOV32_IMM(BPF_REG_3, 1),
+			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"calls: multiple ret types in subprog 1",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_MOV32_IMM(BPF_REG_0, 42),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = REJECT,
+		.errstr = "R0 invalid mem access 'inv'",
+	},
+	{
+		"calls: multiple ret types in subprog 2",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+				    offsetof(struct __sk_buff, data)),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 16 },
+		.result = REJECT,
+		.errstr = "R0 min value is outside of the array range",
+	},
+	{
+		"calls: overlapping caller/callee",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "last insn is not an exit or jmp",
+		.result = REJECT,
+	},
+	{
+		"calls: wrong recursive calls",
+		.insns = {
+			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: wrong src reg",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "BPF_CALL uses reserved fields",
+		.result = REJECT,
+	},
+	{
+		"calls: wrong off value",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "BPF_CALL uses reserved fields",
+		.result = REJECT,
+	},
+	{
+		"calls: jump back loop",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn 0 to 0",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call 2",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: conditional call 3",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call 4",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: conditional call 5",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call 6",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn",
+		.result = REJECT,
+	},
+	{
+		"calls: using r0 returned by callee",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: using uninit r0 from callee",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "!read_ok",
+		.result = REJECT,
+	},
+	{
+		"calls: callee is using r1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
+		.result = ACCEPT,
+		.retval = TEST_DATA_LEN,
+	},
+	{
+		"calls: callee using args1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+		.retval = POINTER_VALUE,
+	},
+	{
+		"calls: callee using wrong args2",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "R2 !read_ok",
+		.result = REJECT,
+	},
+	{
+		"calls: callee using two args",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+				    offsetof(struct __sk_buff, len)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+				    offsetof(struct __sk_buff, len)),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+	},
+	{
+		"calls: callee changing pkt pointers",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+				    offsetof(struct xdp_md, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+				    offsetof(struct xdp_md, data_end)),
+			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			/* clear_all_pkt_pointers() has to walk all frames
+			 * to make sure that pkt pointers in the caller
+			 * are cleared when callee is calling a helper that
+			 * adjusts packet size
+			 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_xdp_adjust_head),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "R6 invalid mem access 'inv'",
+		.prog_type = BPF_PROG_TYPE_XDP,
+	},
+	{
+		"calls: two calls with args",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
+	},
+	{
+		"calls: calls with stack arith",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"calls: calls with misaligned stack access",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+		.errstr = "misaligned stack access",
+		.result = REJECT,
+	},
+	{
+		"calls: calls control flow, jump test",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 43),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 43,
+	},
+	{
+		"calls: calls control flow, jump test 2",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 43),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "jump out of range from insn 1 to 4",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with bad jump",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range from insn 11 to 9",
+		.result = REJECT,
+	},
+	{
+		"calls: recursive call. test1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge",
+		.result = REJECT,
+	},
+	{
+		"calls: recursive call. test2",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge",
+		.result = REJECT,
+	},
+	{
+		"calls: unreachable code",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "unreachable insn 6",
+		.result = REJECT,
+	},
+	{
+		"calls: invalid call",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "invalid destination",
+		.result = REJECT,
+	},
+	{
+		"calls: invalid call 2",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "invalid destination",
+		.result = REJECT,
+	},
+	{
+		"calls: jumping across function bodies. test1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: jumping across function bodies. test2",
+		.insns = {
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: call without exit",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "not an exit",
+		.result = REJECT,
+	},
+	{
+		"calls: call into middle of ld_imm64",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_LD_IMM64(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "last insn",
+		.result = REJECT,
+	},
+	{
+		"calls: call into middle of other call",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "last insn",
+		.result = REJECT,
+	},
+	{
+		"calls: ld_abs with changing ctx data in callee",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_LD_ABS(BPF_B, 0),
+			BPF_LD_ABS(BPF_H, 0),
+			BPF_LD_ABS(BPF_W, 0),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+			BPF_LD_ABS(BPF_B, 0),
+			BPF_LD_ABS(BPF_H, 0),
+			BPF_LD_ABS(BPF_W, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
+			BPF_MOV64_IMM(BPF_REG_3, 2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_skb_vlan_push),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with bad fallthrough",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "not an exit",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with stack read",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls with stack write",
+		.insns = {
+			/* main prog */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+			/* write into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* read from stack frame of main prog */
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: stack overflow using two frames (pre-call access)",
+		.insns = {
+			/* prog 1 */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* prog 2 */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "combined stack size",
+		.result = REJECT,
+	},
+	{
+		"calls: stack overflow using two frames (post-call access)",
+		.insns = {
+			/* prog 1 */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_EXIT_INSN(),
+
+			/* prog 2 */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "combined stack size",
+		.result = REJECT,
+	},
+	{
+		"calls: stack depth check using three frames. test1",
+		.insns = {
+			/* main */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		/* stack_main=32, stack_A=256, stack_B=64
+		 * and max(main+A, main+A+B) < 512
+		 */
+		.result = ACCEPT,
+	},
+	{
+		"calls: stack depth check using three frames. test2",
+		.insns = {
+			/* main */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		/* stack_main=32, stack_A=64, stack_B=256
+		 * and max(main+A, main+A+B) < 512
+		 */
+		.result = ACCEPT,
+	},
+	{
+		"calls: stack depth check using three frames. test3",
+		.insns = {
+			/* main */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+			BPF_EXIT_INSN(),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+			/* B */
+			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		/* stack_main=64, stack_A=224, stack_B=256
+		 * and max(main+A, main+A+B) > 512
+		 */
+		.errstr = "combined stack",
+		.result = REJECT,
+	},
+	{
+		"calls: stack depth check using three frames. test4",
+		/* void main(void) {
+		 *   func1(0);
+		 *   func1(1);
+		 *   func2(1);
+		 * }
+		 * void func1(int alloc_or_recurse) {
+		 *   if (alloc_or_recurse) {
+		 *     frame_pointer[-300] = 1;
+		 *   } else {
+		 *     func2(alloc_or_recurse);
+		 *   }
+		 * }
+		 * void func2(int alloc_or_recurse) {
+		 *   if (alloc_or_recurse) {
+		 *     frame_pointer[-300] = 1;
+		 *   }
+		 * }
+		 */
+		.insns = {
+			/* main */
+			BPF_MOV64_IMM(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = REJECT,
+		.errstr = "combined stack",
+	},
+	{
+		"calls: stack depth check using three frames. test5",
+		.insns = {
+			/* main */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+			BPF_EXIT_INSN(),
+			/* C */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+			BPF_EXIT_INSN(),
+			/* D */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+			BPF_EXIT_INSN(),
+			/* E */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+			BPF_EXIT_INSN(),
+			/* F */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+			BPF_EXIT_INSN(),
+			/* G */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+			BPF_EXIT_INSN(),
+			/* H */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "call stack",
+		.result = REJECT,
+	},
+	{
+		"calls: spill into caller stack frame",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "cannot spill",
+		.result = REJECT,
+	},
+	{
+		"calls: write into caller stack frame",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"calls: write into callee stack frame",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "cannot return stack pointer",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with stack write and void return",
+		.insns = {
+			/* main prog */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* write into stack frame of main prog */
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+			BPF_EXIT_INSN(), /* void return */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: ambiguous return value",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+			BPF_EXIT_INSN(),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "allowed for root only",
+		.result_unpriv = REJECT,
+		.errstr = "R0 !read_ok",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls that return map_value",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			/* fetch secound map_value_ptr from the stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* call 3rd function twice */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* first time with fp-8 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			/* second time with fp-16 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			/* lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(), /* return 0 */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.fixup_map1 = { 23 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls that return map_value with bool condition",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* call 3rd function twice */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* first time with fp-8 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			/* second time with fp-16 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			/* fetch secound map_value_ptr from the stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			/* lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(), /* return 0 */
+			/* write map_value_ptr into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(), /* return 1 */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.fixup_map1 = { 23 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls that return map_value with incorrect bool check",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* call 3rd function twice */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* first time with fp-8 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			/* second time with fp-16 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			/* fetch secound map_value_ptr from the stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			/* lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(), /* return 0 */
+			/* write map_value_ptr into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(), /* return 1 */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.fixup_map1 = { 23 },
+		.result = REJECT,
+		.errstr = "invalid read from stack off -16+0 size 8",
+	},
+	{
+		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = REJECT,
+		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
+	},
+	{
+		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = REJECT,
+		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
+	},
+	{
+		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 0 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = REJECT,
+		.errstr = "R0 invalid mem access 'inv'",
+	},
+	{
+		"calls: pkt_ptr spill into caller stack",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			/* now the pkt range is verified, read pkt_ptr from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.retval = POINTER_VALUE,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 2",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			/* Marking is still kept, but not in all cases safe. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			/* now the pkt range is verified, read pkt_ptr from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "invalid access to packet",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 3",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+			/* Marking is still kept and safe here. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* now the pkt range is verified, read pkt_ptr from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 4",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+			/* Check marking propagated. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 5",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "same insn cannot be used with different",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 6",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "R4 invalid mem access",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 7",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "R4 invalid mem access",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 8",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 9",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "invalid access to packet",
+		.result = REJECT,
+	},
+	{
+		"calls: caller stack init to zero or map_value_or_null",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			/* fetch map_value_or_null or const_zero from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			/* store into map_value */
+			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* if (ctx == 0) return; */
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+			/* else bpf_map_lookup() and *(fp - 8) = r0 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 13 },
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_XDP,
+	},
+	{
+		"calls: stack init to zero and pruning",
+		.insns = {
+			/* first make allocated_stack 16 byte */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+			/* now fork the execution such that the false branch
+			 * of JGT insn will be verified second and it skisp zero
+			 * init of fp-8 stack slot. If stack liveness marking
+			 * is missing live_read marks from call map_lookup
+			 * processing then pruning will incorrectly assume
+			 * that fp-8 stack slot was unused in the fall-through
+			 * branch and will accept the program incorrectly
+			 */
+			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map2 = { 6 },
+		.errstr = "invalid indirect read from stack off -8+0 size 8",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_XDP,
+	},
+	{
+		"search pruning: all branches should be verified (nop operation)",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+			BPF_MOV64_IMM(BPF_REG_4, 0),
+			BPF_JMP_A(1),
+			BPF_MOV64_IMM(BPF_REG_4, 1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_6, 0),
+			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "R6 invalid mem access 'inv'",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"search pruning: all branches should be verified (invalid stack access)",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_4, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+			BPF_JMP_A(1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "invalid read from stack off -16+0 size 8",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -8937,10 +11234,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 	int fd_prog, expected_ret, reject_from_alignment;
 	struct bpf_insn *prog = test->insns;
 	int prog_len = probe_filter_length(prog);
+	char data_in[TEST_DATA_LEN] = {};
 	int prog_type = test->prog_type;
 	int map_fds[MAX_NR_MAPS];
 	const char *expected_err;
-	int i;
+	uint32_t retval;
+	int i, err;
 
 	for (i = 0; i < MAX_NR_MAPS; i++)
 		map_fds[i] = -1;
@@ -8983,6 +11282,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 		}
 	}
 
+	if (fd_prog >= 0) {
+		err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
+					NULL, NULL, &retval, NULL);
+		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
+			printf("Unexpected bpf_prog_test_run error\n");
+			goto fail_log;
+		}
+		if (!err && retval != test->retval &&
+		    test->retval != POINTER_VALUE) {
+			printf("FAIL retval %d != %d\n", retval, test->retval);
+			goto fail_log;
+		}
+	}
 	(*passes)++;
 	printf("OK%s\n", reject_from_alignment ?
 	       " (NOTE: reject due to unknown alignment)" : "");
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/test_xdp_noinline.c
new file mode 100644
index 000000000000..5e4aac74f9d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_noinline.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+
+#define bpf_printk(fmt, ...)				\
+({							\
+	char ____fmt[] = fmt;				\
+	bpf_trace_printk(____fmt, sizeof(____fmt),	\
+			##__VA_ARGS__);			\
+})
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+	return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c)			\
+{						\
+	a -= c;  a ^= rol32(c, 4);  c += b;	\
+	b -= a;  b ^= rol32(a, 6);  a += c;	\
+	c -= b;  c ^= rol32(b, 8);  b += a;	\
+	a -= c;  a ^= rol32(c, 16); c += b;	\
+	b -= a;  b ^= rol32(a, 19); a += c;	\
+	c -= b;  c ^= rol32(b, 4);  b += a;	\
+}
+
+#define __jhash_final(a, b, c)			\
+{						\
+	c ^= b; c -= rol32(b, 14);		\
+	a ^= c; a -= rol32(c, 11);		\
+	b ^= a; b -= rol32(a, 25);		\
+	c ^= b; c -= rol32(b, 16);		\
+	a ^= c; a -= rol32(c, 4);		\
+	b ^= a; b -= rol32(a, 14);		\
+	c ^= b; c -= rol32(b, 24);		\
+}
+
+#define JHASH_INITVAL		0xdeadbeef
+
+typedef unsigned int u32;
+
+static __attribute__ ((noinline))
+u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c;
+	const unsigned char *k = key;
+
+	a = b = c = JHASH_INITVAL + length + initval;
+
+	while (length > 12) {
+		a += *(u32 *)(k);
+		b += *(u32 *)(k + 4);
+		c += *(u32 *)(k + 8);
+		__jhash_mix(a, b, c);
+		length -= 12;
+		k += 12;
+	}
+	switch (length) {
+	case 12: c += (u32)k[11]<<24;
+	case 11: c += (u32)k[10]<<16;
+	case 10: c += (u32)k[9]<<8;
+	case 9:  c += k[8];
+	case 8:  b += (u32)k[7]<<24;
+	case 7:  b += (u32)k[6]<<16;
+	case 6:  b += (u32)k[5]<<8;
+	case 5:  b += k[4];
+	case 4:  a += (u32)k[3]<<24;
+	case 3:  a += (u32)k[2]<<16;
+	case 2:  a += (u32)k[1]<<8;
+	case 1:  a += k[0];
+		 __jhash_final(a, b, c);
+	case 0: /* Nothing left to add */
+		break;
+	}
+
+	return c;
+}
+
+static __attribute__ ((noinline))
+u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+	a += initval;
+	b += initval;
+	c += initval;
+	__jhash_final(a, b, c);
+	return c;
+}
+
+static __attribute__ ((noinline))
+u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+	return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+struct flow_key {
+	union {
+		__be32 src;
+		__be32 srcv6[4];
+	};
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	union {
+		__u32 ports;
+		__u16 port16[2];
+	};
+	__u8 proto;
+};
+
+struct packet_description {
+	struct flow_key flow;
+	__u8 flags;
+};
+
+struct ctl_value {
+	union {
+		__u64 value;
+		__u32 ifindex;
+		__u8 mac[6];
+	};
+};
+
+struct vip_definition {
+	union {
+		__be32 vip;
+		__be32 vipv6[4];
+	};
+	__u16 port;
+	__u16 family;
+	__u8 proto;
+};
+
+struct vip_meta {
+	__u32 flags;
+	__u32 vip_num;
+};
+
+struct real_pos_lru {
+	__u32 pos;
+	__u64 atime;
+};
+
+struct real_definition {
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	__u8 flags;
+};
+
+struct lb_stats {
+	__u64 v2;
+	__u64 v1;
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(struct vip_definition),
+	.value_size = sizeof(struct vip_meta),
+	.max_entries = 512,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
+	.type = BPF_MAP_TYPE_LRU_HASH,
+	.key_size = sizeof(struct flow_key),
+	.value_size = sizeof(struct real_pos_lru),
+	.max_entries = 300,
+	.map_flags = 1U << 1,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = 12 * 655,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct real_definition),
+	.max_entries = 40,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
+	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct lb_stats),
+	.max_entries = 515,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct ctl_value),
+	.max_entries = 16,
+	.map_flags = 0,
+};
+
+struct eth_hdr {
+	unsigned char eth_dest[6];
+	unsigned char eth_source[6];
+	unsigned short eth_proto;
+};
+
+static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
+{
+	__u64 off = sizeof(struct eth_hdr);
+	if (is_ipv6) {
+		off += sizeof(struct ipv6hdr);
+		if (is_icmp)
+			off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
+	} else {
+		off += sizeof(struct iphdr);
+		if (is_icmp)
+			off += sizeof(struct icmphdr) + sizeof(struct iphdr);
+	}
+	return off;
+}
+
+static __attribute__ ((noinline))
+bool parse_udp(void *data, void *data_end,
+	       bool is_ipv6, struct packet_description *pckt)
+{
+
+	bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+	__u64 off = calc_offset(is_ipv6, is_icmp);
+	struct udphdr *udp;
+	udp = data + off;
+
+	if (udp + 1 > data_end)
+		return 0;
+	if (!is_icmp) {
+		pckt->flow.port16[0] = udp->source;
+		pckt->flow.port16[1] = udp->dest;
+	} else {
+		pckt->flow.port16[0] = udp->dest;
+		pckt->flow.port16[1] = udp->source;
+	}
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool parse_tcp(void *data, void *data_end,
+	       bool is_ipv6, struct packet_description *pckt)
+{
+
+	bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+	__u64 off = calc_offset(is_ipv6, is_icmp);
+	struct tcphdr *tcp;
+
+	tcp = data + off;
+	if (tcp + 1 > data_end)
+		return 0;
+	if (tcp->syn)
+		pckt->flags |= (1 << 1);
+	if (!is_icmp) {
+		pckt->flow.port16[0] = tcp->source;
+		pckt->flow.port16[1] = tcp->dest;
+	} else {
+		pckt->flow.port16[0] = tcp->dest;
+		pckt->flow.port16[1] = tcp->source;
+	}
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
+	      struct packet_description *pckt,
+	      struct real_definition *dst, __u32 pkt_bytes)
+{
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+	struct ipv6hdr *ip6h;
+	__u32 ip_suffix;
+	void *data_end;
+	void *data;
+
+	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+		return 0;
+	data = (void *)(long)xdp->data;
+	data_end = (void *)(long)xdp->data_end;
+	new_eth = data;
+	ip6h = data + sizeof(struct eth_hdr);
+	old_eth = data + sizeof(struct ipv6hdr);
+	if (new_eth + 1 > data_end ||
+	    old_eth + 1 > data_end || ip6h + 1 > data_end)
+		return 0;
+	memcpy(new_eth->eth_dest, cval->mac, 6);
+	memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+	new_eth->eth_proto = 56710;
+	ip6h->version = 6;
+	ip6h->priority = 0;
+	memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+
+	ip6h->nexthdr = IPPROTO_IPV6;
+	ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
+	ip6h->payload_len =
+	    __builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
+	ip6h->hop_limit = 4;
+
+	ip6h->saddr.in6_u.u6_addr32[0] = 1;
+	ip6h->saddr.in6_u.u6_addr32[1] = 2;
+	ip6h->saddr.in6_u.u6_addr32[2] = 3;
+	ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
+	memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
+	      struct packet_description *pckt,
+	      struct real_definition *dst, __u32 pkt_bytes)
+{
+
+	__u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+	__u16 *next_iph_u16;
+	struct iphdr *iph;
+	__u32 csum = 0;
+	void *data_end;
+	void *data;
+
+	ip_suffix <<= 15;
+	ip_suffix ^= pckt->flow.src;
+	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+		return 0;
+	data = (void *)(long)xdp->data;
+	data_end = (void *)(long)xdp->data_end;
+	new_eth = data;
+	iph = data + sizeof(struct eth_hdr);
+	old_eth = data + sizeof(struct iphdr);
+	if (new_eth + 1 > data_end ||
+	    old_eth + 1 > data_end || iph + 1 > data_end)
+		return 0;
+	memcpy(new_eth->eth_dest, cval->mac, 6);
+	memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+	new_eth->eth_proto = 8;
+	iph->version = 4;
+	iph->ihl = 5;
+	iph->frag_off = 0;
+	iph->protocol = IPPROTO_IPIP;
+	iph->check = 0;
+	iph->tos = 1;
+	iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
+	/* don't update iph->daddr, since it will overwrite old eth_proto
+	 * and multiple iterations of bpf_prog_run() will fail
+	 */
+
+	iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
+	iph->ttl = 4;
+
+	next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+	for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+		csum += *next_iph_u16++;
+	iph->check = ~((csum & 0xffff) + (csum >> 16));
+	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+		return 0;
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
+{
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+
+	old_eth = *data;
+	new_eth = *data + sizeof(struct ipv6hdr);
+	memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+	memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+	if (inner_v4)
+		new_eth->eth_proto = 8;
+	else
+		new_eth->eth_proto = 56710;
+	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
+		return 0;
+	*data = (void *)(long)xdp->data;
+	*data_end = (void *)(long)xdp->data_end;
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
+{
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+
+	old_eth = *data;
+	new_eth = *data + sizeof(struct iphdr);
+	memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+	memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+	new_eth->eth_proto = 8;
+	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+		return 0;
+	*data = (void *)(long)xdp->data;
+	*data_end = (void *)(long)xdp->data_end;
+	return 1;
+}
+
+static __attribute__ ((noinline))
+int swap_mac_and_send(void *data, void *data_end)
+{
+	unsigned char tmp_mac[6];
+	struct eth_hdr *eth;
+
+	eth = data;
+	memcpy(tmp_mac, eth->eth_source, 6);
+	memcpy(eth->eth_source, eth->eth_dest, 6);
+	memcpy(eth->eth_dest, tmp_mac, 6);
+	return XDP_TX;
+}
+
+static __attribute__ ((noinline))
+int send_icmp_reply(void *data, void *data_end)
+{
+	struct icmphdr *icmp_hdr;
+	__u16 *next_iph_u16;
+	__u32 tmp_addr = 0;
+	struct iphdr *iph;
+	__u32 csum1 = 0;
+	__u32 csum = 0;
+	__u64 off = 0;
+
+	if (data + sizeof(struct eth_hdr)
+	     + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
+		return XDP_DROP;
+	off += sizeof(struct eth_hdr);
+	iph = data + off;
+	off += sizeof(struct iphdr);
+	icmp_hdr = data + off;
+	icmp_hdr->type = 0;
+	icmp_hdr->checksum += 0x0007;
+	iph->ttl = 4;
+	tmp_addr = iph->daddr;
+	iph->daddr = iph->saddr;
+	iph->saddr = tmp_addr;
+	iph->check = 0;
+	next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+	for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+		csum += *next_iph_u16++;
+	iph->check = ~((csum & 0xffff) + (csum >> 16));
+	return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int send_icmp6_reply(void *data, void *data_end)
+{
+	struct icmp6hdr *icmp_hdr;
+	struct ipv6hdr *ip6h;
+	__be32 tmp_addr[4];
+	__u64 off = 0;
+
+	if (data + sizeof(struct eth_hdr)
+	     + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
+		return XDP_DROP;
+	off += sizeof(struct eth_hdr);
+	ip6h = data + off;
+	off += sizeof(struct ipv6hdr);
+	icmp_hdr = data + off;
+	icmp_hdr->icmp6_type = 129;
+	icmp_hdr->icmp6_cksum -= 0x0001;
+	ip6h->hop_limit = 4;
+	memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
+	memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
+	memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
+	return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int parse_icmpv6(void *data, void *data_end, __u64 off,
+		 struct packet_description *pckt)
+{
+	struct icmp6hdr *icmp_hdr;
+	struct ipv6hdr *ip6h;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return XDP_DROP;
+	if (icmp_hdr->icmp6_type == 128)
+		return send_icmp6_reply(data, data_end);
+	if (icmp_hdr->icmp6_type != 3)
+		return XDP_PASS;
+	off += sizeof(struct icmp6hdr);
+	ip6h = data + off;
+	if (ip6h + 1 > data_end)
+		return XDP_DROP;
+	pckt->flow.proto = ip6h->nexthdr;
+	pckt->flags |= (1 << 0);
+	memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
+	memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
+	return -1;
+}
+
+static __attribute__ ((noinline))
+int parse_icmp(void *data, void *data_end, __u64 off,
+	       struct packet_description *pckt)
+{
+	struct icmphdr *icmp_hdr;
+	struct iphdr *iph;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return XDP_DROP;
+	if (icmp_hdr->type == 8)
+		return send_icmp_reply(data, data_end);
+	if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
+		return XDP_PASS;
+	off += sizeof(struct icmphdr);
+	iph = data + off;
+	if (iph + 1 > data_end)
+		return XDP_DROP;
+	if (iph->ihl != 5)
+		return XDP_DROP;
+	pckt->flow.proto = iph->protocol;
+	pckt->flags |= (1 << 0);
+	pckt->flow.src = iph->daddr;
+	pckt->flow.dst = iph->saddr;
+	return -1;
+}
+
+static __attribute__ ((noinline))
+__u32 get_packet_hash(struct packet_description *pckt,
+		      bool hash_16bytes)
+{
+	if (hash_16bytes)
+		return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
+				    pckt->flow.ports, 24);
+	else
+		return jhash_2words(pckt->flow.src, pckt->flow.ports,
+				    24);
+}
+
+__attribute__ ((noinline))
+static bool get_packet_dst(struct real_definition **real,
+			   struct packet_description *pckt,
+			   struct vip_meta *vip_info,
+			   bool is_ipv6, void *lru_map)
+{
+	struct real_pos_lru new_dst_lru = { };
+	bool hash_16bytes = is_ipv6;
+	__u32 *real_pos, hash, key;
+	__u64 cur_time;
+
+	if (vip_info->flags & (1 << 2))
+		hash_16bytes = 1;
+	if (vip_info->flags & (1 << 3)) {
+		pckt->flow.port16[0] = pckt->flow.port16[1];
+		memset(pckt->flow.srcv6, 0, 16);
+	}
+	hash = get_packet_hash(pckt, hash_16bytes);
+	if (hash != 0x358459b7 /* jhash of ipv4 packet */  &&
+	    hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+		return 0;
+	key = 2 * vip_info->vip_num + hash % 2;
+	real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+	if (!real_pos)
+		return 0;
+	key = *real_pos;
+	*real = bpf_map_lookup_elem(&reals, &key);
+	if (!(*real))
+		return 0;
+	if (!(vip_info->flags & (1 << 1))) {
+		__u32 conn_rate_key = 512 + 2;
+		struct lb_stats *conn_rate_stats =
+		    bpf_map_lookup_elem(&stats, &conn_rate_key);
+
+		if (!conn_rate_stats)
+			return 1;
+		cur_time = bpf_ktime_get_ns();
+		if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
+			conn_rate_stats->v1 = 1;
+			conn_rate_stats->v2 = cur_time;
+		} else {
+			conn_rate_stats->v1 += 1;
+			if (conn_rate_stats->v1 >= 1)
+				return 1;
+		}
+		if (pckt->flow.proto == IPPROTO_UDP)
+			new_dst_lru.atime = cur_time;
+		new_dst_lru.pos = key;
+		bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
+	}
+	return 1;
+}
+
+__attribute__ ((noinline))
+static void connection_table_lookup(struct real_definition **real,
+				    struct packet_description *pckt,
+				    void *lru_map)
+{
+
+	struct real_pos_lru *dst_lru;
+	__u64 cur_time;
+	__u32 key;
+
+	dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
+	if (!dst_lru)
+		return;
+	if (pckt->flow.proto == IPPROTO_UDP) {
+		cur_time = bpf_ktime_get_ns();
+		if (cur_time - dst_lru->atime > 300000)
+			return;
+		dst_lru->atime = cur_time;
+	}
+	key = dst_lru->pos;
+	*real = bpf_map_lookup_elem(&reals, &key);
+}
+
+/* don't believe your eyes!
+ * below function has 6 arguments whereas bpf and llvm allow maximum of 5
+ * but since it's _static_ llvm can optimize one argument away
+ */
+__attribute__ ((noinline))
+static int process_l3_headers_v6(struct packet_description *pckt,
+				 __u8 *protocol, __u64 off,
+				 __u16 *pkt_bytes, void *data,
+				 void *data_end)
+{
+	struct ipv6hdr *ip6h;
+	__u64 iph_len;
+	int action;
+
+	ip6h = data + off;
+	if (ip6h + 1 > data_end)
+		return XDP_DROP;
+	iph_len = sizeof(struct ipv6hdr);
+	*protocol = ip6h->nexthdr;
+	pckt->flow.proto = *protocol;
+	*pkt_bytes = __builtin_bswap16(ip6h->payload_len);
+	off += iph_len;
+	if (*protocol == 45) {
+		return XDP_DROP;
+	} else if (*protocol == 59) {
+		action = parse_icmpv6(data, data_end, off, pckt);
+		if (action >= 0)
+			return action;
+	} else {
+		memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
+		memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
+	}
+	return -1;
+}
+
+__attribute__ ((noinline))
+static int process_l3_headers_v4(struct packet_description *pckt,
+				 __u8 *protocol, __u64 off,
+				 __u16 *pkt_bytes, void *data,
+				 void *data_end)
+{
+	struct iphdr *iph;
+	__u64 iph_len;
+	int action;
+
+	iph = data + off;
+	if (iph + 1 > data_end)
+		return XDP_DROP;
+	if (iph->ihl != 5)
+		return XDP_DROP;
+	*protocol = iph->protocol;
+	pckt->flow.proto = *protocol;
+	*pkt_bytes = __builtin_bswap16(iph->tot_len);
+	off += 20;
+	if (iph->frag_off & 65343)
+		return XDP_DROP;
+	if (*protocol == IPPROTO_ICMP) {
+		action = parse_icmp(data, data_end, off, pckt);
+		if (action >= 0)
+			return action;
+	} else {
+		pckt->flow.src = iph->saddr;
+		pckt->flow.dst = iph->daddr;
+	}
+	return -1;
+}
+
+__attribute__ ((noinline))
+static int process_packet(void *data, __u64 off, void *data_end,
+			  bool is_ipv6, struct xdp_md *xdp)
+{
+
+	struct real_definition *dst = NULL;
+	struct packet_description pckt = { };
+	struct vip_definition vip = { };
+	struct lb_stats *data_stats;
+	struct eth_hdr *eth = data;
+	void *lru_map = &lru_cache;
+	struct vip_meta *vip_info;
+	__u32 lru_stats_key = 513;
+	__u32 mac_addr_pos = 0;
+	__u32 stats_key = 512;
+	struct ctl_value *cval;
+	__u16 pkt_bytes;
+	__u64 iph_len;
+	__u8 protocol;
+	__u32 vip_num;
+	int action;
+
+	if (is_ipv6)
+		action = process_l3_headers_v6(&pckt, &protocol, off,
+					       &pkt_bytes, data, data_end);
+	else
+		action = process_l3_headers_v4(&pckt, &protocol, off,
+					       &pkt_bytes, data, data_end);
+	if (action >= 0)
+		return action;
+	protocol = pckt.flow.proto;
+	if (protocol == IPPROTO_TCP) {
+		if (!parse_tcp(data, data_end, is_ipv6, &pckt))
+			return XDP_DROP;
+	} else if (protocol == IPPROTO_UDP) {
+		if (!parse_udp(data, data_end, is_ipv6, &pckt))
+			return XDP_DROP;
+	} else {
+		return XDP_TX;
+	}
+
+	if (is_ipv6)
+		memcpy(vip.vipv6, pckt.flow.dstv6, 16);
+	else
+		vip.vip = pckt.flow.dst;
+	vip.port = pckt.flow.port16[1];
+	vip.proto = pckt.flow.proto;
+	vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+	if (!vip_info) {
+		vip.port = 0;
+		vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+		if (!vip_info)
+			return XDP_PASS;
+		if (!(vip_info->flags & (1 << 4)))
+			pckt.flow.port16[1] = 0;
+	}
+	if (data_end - data > 1400)
+		return XDP_DROP;
+	data_stats = bpf_map_lookup_elem(&stats, &stats_key);
+	if (!data_stats)
+		return XDP_DROP;
+	data_stats->v1 += 1;
+	if (!dst) {
+		if (vip_info->flags & (1 << 0))
+			pckt.flow.port16[0] = 0;
+		if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
+			connection_table_lookup(&dst, &pckt, lru_map);
+		if (dst)
+			goto out;
+		if (pckt.flow.proto == IPPROTO_TCP) {
+			struct lb_stats *lru_stats =
+			    bpf_map_lookup_elem(&stats, &lru_stats_key);
+
+			if (!lru_stats)
+				return XDP_DROP;
+			if (pckt.flags & (1 << 1))
+				lru_stats->v1 += 1;
+			else
+				lru_stats->v2 += 1;
+		}
+		if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
+			return XDP_DROP;
+		data_stats->v2 += 1;
+	}
+out:
+	cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
+	if (!cval)
+		return XDP_DROP;
+	if (dst->flags & (1 << 0)) {
+		if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
+			return XDP_DROP;
+	} else {
+		if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
+			return XDP_DROP;
+	}
+	vip_num = vip_info->vip_num;
+	data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+	if (!data_stats)
+		return XDP_DROP;
+	data_stats->v1 += 1;
+	data_stats->v2 += pkt_bytes;
+
+	data = (void *)(long)xdp->data;
+	data_end = (void *)(long)xdp->data_end;
+	if (data + 4 > data_end)
+		return XDP_DROP;
+	*(u32 *)data = dst->dst;
+	return XDP_DROP;
+}
+
+__attribute__ ((section("xdp-test"), used))
+int balancer_ingress(struct xdp_md *ctx)
+{
+	void *data = (void *)(long)ctx->data;
+	void *data_end = (void *)(long)ctx->data_end;
+	struct eth_hdr *eth = data;
+	__u32 eth_proto;
+	__u32 nh_off;
+
+	nh_off = sizeof(struct eth_hdr);
+	if (data + nh_off > data_end)
+		return XDP_DROP;
+	eth_proto = eth->eth_proto;
+	if (eth_proto == 8)
+		return process_packet(data, nh_off, data_end, 0, ctx);
+	else if (eth_proto == 56710)
+		return process_packet(data, nh_off, data_end, 1, ctx);
+	else
+		return XDP_DROP;
+}
+
+char _license[] __attribute__ ((section("license"), used)) = "GPL";
+int _version __attribute__ ((section("version"), used)) = 1;
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 500c74db746c..d7c30d366935 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,6 +5,7 @@ CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g
 CFLAGS += -I../../../../usr/include/
 
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS += fib_tests.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
new file mode 100755
index 000000000000..a9154eefb2e2
--- /dev/null
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -0,0 +1,429 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking IPv4 and IPv6 FIB behavior in response to
+# different events.
+
+ret=0
+
+check_err()
+{
+	if [ $ret -eq 0 ]; then
+		ret=$1
+	fi
+}
+
+check_fail()
+{
+	if [ $1 -eq 0 ]; then
+		ret=1
+	fi
+}
+
+netns_create()
+{
+	local testns=$1
+
+	ip netns add $testns
+	ip netns exec $testns ip link set dev lo up
+}
+
+fib_unreg_unicast_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy0
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_fail $?
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: unicast route test"
+		return 1
+	fi
+	echo "PASS: unicast route test"
+}
+
+fib_unreg_multipath_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link add dummy1 type dummy
+	ip netns exec testns ip link set dev dummy1 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+	ip netns exec testns ip route add 203.0.113.0/24 \
+		nexthop via 198.51.100.2 dev dummy0 \
+		nexthop via 192.0.2.2 dev dummy1
+	ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+		nexthop via 2001:db8:1::2 dev dummy0 \
+		nexthop via 2001:db8:2::2 dev dummy1
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy0
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	# In IPv6 we do not flush the entire multipath route.
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy1
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: multipath route test"
+		return 1
+	fi
+	echo "PASS: multipath route test"
+}
+
+fib_unreg_test()
+{
+	echo "Running netdev unregister tests"
+
+	fib_unreg_unicast_test
+	fib_unreg_multipath_test
+}
+
+fib_down_unicast_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link set dev dummy0 down
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_fail $?
+
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: unicast route test"
+		return 1
+	fi
+	echo "PASS: unicast route test"
+}
+
+fib_down_multipath_test_do()
+{
+	local down_dev=$1
+	local up_dev=$2
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 \
+		oif $down_dev &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+		oif $down_dev &> /dev/null
+	check_fail $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 \
+		oif $up_dev &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+		oif $up_dev &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+		grep $down_dev | grep -q "dead linkdown"
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+		grep $down_dev | grep -q "dead linkdown"
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+		grep $up_dev | grep -q "dead linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+		grep $up_dev | grep -q "dead linkdown"
+	check_fail $?
+}
+
+fib_down_multipath_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link add dummy1 type dummy
+	ip netns exec testns ip link set dev dummy1 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+	ip netns exec testns ip route add 203.0.113.0/24 \
+		nexthop via 198.51.100.2 dev dummy0 \
+		nexthop via 192.0.2.2 dev dummy1
+	ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+		nexthop via 2001:db8:1::2 dev dummy0 \
+		nexthop via 2001:db8:2::2 dev dummy1
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link set dev dummy0 down
+	check_err $?
+
+	fib_down_multipath_test_do "dummy0" "dummy1"
+
+	ip netns exec testns ip link set dev dummy0 up
+	check_err $?
+	ip netns exec testns ip link set dev dummy1 down
+	check_err $?
+
+	fib_down_multipath_test_do "dummy1" "dummy0"
+
+	ip netns exec testns ip link set dev dummy0 down
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	check_fail $?
+
+	ip netns exec testns ip link del dev dummy1
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: multipath route test"
+		return 1
+	fi
+	echo "PASS: multipath route test"
+}
+
+fib_down_test()
+{
+	echo "Running netdev down tests"
+
+	fib_down_unicast_test
+	fib_down_multipath_test
+}
+
+fib_carrier_local_test()
+{
+	ret=0
+
+	# Local routes should not be affected when carrier changes.
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link set dev dummy0 carrier on
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip link set dev dummy0 carrier off
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 192.0.2.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 192.0.2.1 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: local route carrier test"
+		return 1
+	fi
+	echo "PASS: local route carrier test"
+}
+
+fib_carrier_unicast_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link set dev dummy0 carrier on
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip link set dev dummy0 carrier off
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+		grep -q "linkdown"
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+		grep -q "linkdown"
+	check_err $?
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 192.0.2.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 192.0.2.2 | \
+		grep -q "linkdown"
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 | \
+		grep -q "linkdown"
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: unicast route carrier test"
+		return 1
+	fi
+	echo "PASS: unicast route carrier test"
+}
+
+fib_carrier_test()
+{
+	echo "Running netdev carrier change tests"
+
+	fib_carrier_local_test
+	fib_carrier_unicast_test
+}
+
+fib_test()
+{
+	fib_unreg_test
+	fib_down_test
+	fib_carrier_test
+}
+
+if [ "$(id -u)" -ne 0 ];then
+	echo "SKIP: Need root privileges"
+	exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+	echo "SKIP: Could not run test without ip tool"
+	exit 0
+fi
+
+ip route help 2>&1 | grep -q fibmatch
+if [ $? -ne 0 ]; then
+	echo "SKIP: iproute2 too old, missing fibmatch"
+	exit 0
+fi
+
+fib_test
+
+exit $ret
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 3ab6ec403905..e11fe84de0fd 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
 	return sizeof(*ip6h);
 }
 
-static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+
+static void setup_sockaddr(int domain, const char *str_addr,
+			   struct sockaddr_storage *sockaddr)
 {
 	struct sockaddr_in6 *addr6 = (void *) sockaddr;
 	struct sockaddr_in *addr4 = (void *) sockaddr;
 
 	switch (domain) {
 	case PF_INET:
+		memset(addr4, 0, sizeof(*addr4));
 		addr4->sin_family = AF_INET;
 		addr4->sin_port = htons(cfg_port);
-		if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+		if (str_addr &&
+		    inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
 			error(1, 0, "ipv4 parse error: %s", str_addr);
 		break;
 	case PF_INET6:
+		memset(addr6, 0, sizeof(*addr6));
 		addr6->sin6_family = AF_INET6;
 		addr6->sin6_port = htons(cfg_port);
-		if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+		if (str_addr &&
+		    inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
 			error(1, 0, "ipv6 parse error: %s", str_addr);
 		break;
 	default:
@@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
 				    sizeof(struct tcphdr) -
 				    40 /* max tcp options */;
 	int c;
+	char *daddr = NULL, *saddr = NULL;
 
 	cfg_payload_len = max_payload_len;
 
@@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_cpu = strtol(optarg, NULL, 0);
 			break;
 		case 'D':
-			setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+			daddr = optarg;
 			break;
 		case 'i':
 			cfg_ifindex = if_nametoindex(optarg);
@@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_cork_mixed = true;
 			break;
 		case 'p':
-			cfg_port = htons(strtoul(optarg, NULL, 0));
+			cfg_port = strtoul(optarg, NULL, 0);
 			break;
 		case 'r':
 			cfg_rx = true;
@@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_payload_len = strtoul(optarg, NULL, 0);
 			break;
 		case 'S':
-			setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
+			saddr = optarg;
 			break;
 		case 't':
 			cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
@@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
 			break;
 		}
 	}
+	setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
+	setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
 
 	if (cfg_payload_len > max_payload_len)
 		error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 5215493166c9..a622eeecc3a6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -502,6 +502,231 @@ kci_test_macsec()
 	echo "PASS: macsec"
 }
 
+kci_test_gretap()
+{
+	testns="testns"
+	DEV_NS=gretap00
+	ret=0
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP gretap tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	ip link help gretap 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: gretap: iproute2 too old"
+		return 1
+	fi
+
+	# test native tunnel
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+		key 102 local 172.16.1.100 remote 172.16.1.200
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: gretap"
+		return 1
+	fi
+	echo "PASS: gretap"
+
+	ip netns del "$testns"
+}
+
+kci_test_ip6gretap()
+{
+	testns="testns"
+	DEV_NS=ip6gretap00
+	ret=0
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: ip6gretap: iproute2 too old"
+		return 1
+	fi
+
+	# test native tunnel
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+		key 102 local fc00:100::1 remote fc00:100::2
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: ip6gretap"
+		return 1
+	fi
+	echo "PASS: ip6gretap"
+
+	ip netns del "$testns"
+}
+
+kci_test_erspan()
+{
+	testns="testns"
+	DEV_NS=erspan00
+	ret=0
+
+	ip link help erspan 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: erspan: iproute2 too old"
+		return 1
+	fi
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP erspan tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	# test native tunnel erspan v1
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+		key 102 local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 1 erspan 488
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test native tunnel erspan v2
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+		key 102 local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 2 erspan_dir ingress erspan_hwid 7
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: erspan"
+		return 1
+	fi
+	echo "PASS: erspan"
+
+	ip netns del "$testns"
+}
+
+kci_test_ip6erspan()
+{
+	testns="testns"
+	DEV_NS=ip6erspan00
+	ret=0
+
+	ip link help ip6erspan 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: ip6erspan: iproute2 too old"
+		return 1
+	fi
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP ip6erspan tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	# test native tunnel ip6erspan v1
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+		key 102 local fc00:100::1 remote fc00:100::2 \
+		erspan_ver 1 erspan 488
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test native tunnel ip6erspan v2
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+		key 102 local fc00:100::1 remote fc00:100::2 \
+		erspan_ver 2 erspan_dir ingress erspan_hwid 7
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" \
+		type ip6erspan external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: ip6erspan"
+		return 1
+	fi
+	echo "PASS: ip6erspan"
+
+	ip netns del "$testns"
+}
+
 kci_test_rtnl()
 {
 	kci_add_dummy
@@ -514,6 +739,10 @@ kci_test_rtnl()
 	kci_test_route_get
 	kci_test_tc
 	kci_test_gre
+	kci_test_gretap
+	kci_test_ip6gretap
+	kci_test_erspan
+	kci_test_ip6erspan
 	kci_test_bridge
 	kci_test_addrlabel
 	kci_test_ifalias
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 395521a7a8d8..fca8381bbe04 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -118,7 +118,7 @@ static inline void free_page(unsigned long addr)
 #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
 #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
 
-#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
+#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0)
 
 #define min(x, y) ({				\
 	typeof(x) _min1 = (x);			\
diff --git a/tools/virtio/linux/thread_info.h b/tools/virtio/linux/thread_info.h
new file mode 100644
index 000000000000..e0f610d08006
--- /dev/null
+++ b/tools/virtio/linux/thread_info.h
@@ -0,0 +1 @@
+#define check_copy_size(A, B, C) (1)
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 5706e075adf2..301d59bfcd0a 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -111,7 +111,7 @@ static inline void busy_wait(void)
 } 
 
 #if defined(__x86_64__) || defined(__i386__)
-#define smp_mb()     asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
+#define smp_mb()     asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
 #else
 /*
  * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
@@ -134,4 +134,61 @@ static inline void busy_wait(void)
     barrier(); \
 } while (0)
 
+#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
+#define smp_wmb() barrier()
+#else
+#define smp_wmb() smp_release()
+#endif
+
+#ifdef __alpha__
+#define smp_read_barrier_depends() smp_acquire()
+#else
+#define smp_read_barrier_depends() do {} while(0)
+#endif
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
+{
+        switch (size) {                                                 \
+        case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
+        case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
+        case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
+        case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
+        default:                                                        \
+                barrier();                                              \
+                __builtin_memcpy((void *)res, (const void *)p, size);   \
+                barrier();                                              \
+        }                                                               \
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+	switch (size) {
+	case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
+	case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
+	case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
+	case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
+	default:
+		barrier();
+		__builtin_memcpy((void *)p, (const void *)res, size);
+		barrier();
+	}
+}
+
+#define READ_ONCE(x) \
+({									\
+	union { typeof(x) __val; char __c[1]; } __u;			\
+	__read_once_size(&(x), __u.__c, sizeof(x));		\
+	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
+	__u.__val;							\
+})
+
+#define WRITE_ONCE(x, val) \
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__val = (typeof(x)) (val) }; \
+	__write_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
+
 #endif
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index e6e81305ef46..477899c12c51 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -187,7 +187,7 @@ bool enable_kick()
 
 bool avail_empty()
 {
-	return !__ptr_ring_peek(&array);
+	return __ptr_ring_empty(&array);
 }
 
 bool use_buf(unsigned *lenp, void **bufp)